repo_id
stringclasses
208 values
file_path
stringlengths
31
190
content
stringlengths
1
2.65M
__index_level_0__
int64
0
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet/euckrfreq.py
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # Sampling from about 20M text materials include literature and computer technology # 128 --> 0.79 # 256 --> 0.92 # 512 --> 0.986 # 1024 --> 0.99944 # 2048 --> 0.99999 # # Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24 # Random Distribution Ration = 512 / (2350-512) = 0.279. # # Typical Distribution Ratio EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0 EUCKR_TABLE_SIZE = 2352 # Char to FreqOrder table , EUCKR_CHAR_TO_FREQ_ORDER = ( 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87, 1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398, 1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734, 945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739, 116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622, 708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750, 1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856, 344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205, 709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779, 1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19, 1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567, 1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797, 1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802, 1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899, 885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818, 1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409, 1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697, 1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770, 1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723, 544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416, 1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300, 119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083, 893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857, 1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871, 282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420, 1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885, 127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889, 0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893, 1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317, 1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841, 1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910, 1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610, 269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375, 1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939, 887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870, 217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934, 1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888, 1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950, 1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065, 1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002, 1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965, 1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467, 50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285, 639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7, 103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979, 1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985, 818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994, 1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250, 423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824, 532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003, 2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745, 619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61, 191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023, 2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032, 2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912, 2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224, 719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012, 819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050, 2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681, 499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414, 1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068, 2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075, 1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850, 2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606, 2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449, 1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452, 949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112, 2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121, 2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130, 22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274, 962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139, 2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721, 1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298, 2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463, 2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747, 2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285, 2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187, 2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10, 2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350, 1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201, 2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972, 2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219, 2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233, 2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242, 2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247, 1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178, 1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255, 2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259, 1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262, 2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702, 1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273, 295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541, 2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117, 432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187, 2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800, 808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312, 2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229, 2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315, 501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484, 2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170, 1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335, 425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601, 1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395, 2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354, 1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476, 2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035, 416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498, 2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310, 1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389, 2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504, 1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505, 2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145, 1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624, 593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700, 2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221, 2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377, 644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448, 915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485, 1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705, 1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465, 291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471, 2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997, 2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486, 797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494, 434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771, 585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323, 2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491, 95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510, 161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519, 2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532, 2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199, 704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544, 2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247, 1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441, 249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562, 2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362, 2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583, 2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465, 3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431, 202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151, 974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596, 2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406, 2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611, 2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619, 1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628, 2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042, 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256 )
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet/big5prober.py
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import Big5DistributionAnalysis from .mbcssm import BIG5_SM_MODEL class Big5Prober(MultiByteCharSetProber): def __init__(self): super(Big5Prober, self).__init__() self.coding_sm = CodingStateMachine(BIG5_SM_MODEL) self.distribution_analyzer = Big5DistributionAnalysis() self.reset() @property def charset_name(self): return "Big5" @property def language(self): return "Chinese"
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet/hebrewprober.py
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Shy Shalom # Portions created by the Initial Developer are Copyright (C) 2005 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetprober import CharSetProber from .enums import ProbingState # This prober doesn't actually recognize a language or a charset. # It is a helper prober for the use of the Hebrew model probers ### General ideas of the Hebrew charset recognition ### # # Four main charsets exist in Hebrew: # "ISO-8859-8" - Visual Hebrew # "windows-1255" - Logical Hebrew # "ISO-8859-8-I" - Logical Hebrew # "x-mac-hebrew" - ?? Logical Hebrew ?? # # Both "ISO" charsets use a completely identical set of code points, whereas # "windows-1255" and "x-mac-hebrew" are two different proper supersets of # these code points. windows-1255 defines additional characters in the range # 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific # diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6. # x-mac-hebrew defines similar additional code points but with a different # mapping. # # As far as an average Hebrew text with no diacritics is concerned, all four # charsets are identical with respect to code points. Meaning that for the # main Hebrew alphabet, all four map the same values to all 27 Hebrew letters # (including final letters). # # The dominant difference between these charsets is their directionality. # "Visual" directionality means that the text is ordered as if the renderer is # not aware of a BIDI rendering algorithm. The renderer sees the text and # draws it from left to right. The text itself when ordered naturally is read # backwards. A buffer of Visual Hebrew generally looks like so: # "[last word of first line spelled backwards] [whole line ordered backwards # and spelled backwards] [first word of first line spelled backwards] # [end of line] [last word of second line] ... etc' " # adding punctuation marks, numbers and English text to visual text is # naturally also "visual" and from left to right. # # "Logical" directionality means the text is ordered "naturally" according to # the order it is read. It is the responsibility of the renderer to display # the text from right to left. A BIDI algorithm is used to place general # punctuation marks, numbers and English text in the text. # # Texts in x-mac-hebrew are almost impossible to find on the Internet. From # what little evidence I could find, it seems that its general directionality # is Logical. # # To sum up all of the above, the Hebrew probing mechanism knows about two # charsets: # Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are # backwards while line order is natural. For charset recognition purposes # the line order is unimportant (In fact, for this implementation, even # word order is unimportant). # Logical Hebrew - "windows-1255" - normal, naturally ordered text. # # "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be # specifically identified. # "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew # that contain special punctuation marks or diacritics is displayed with # some unconverted characters showing as question marks. This problem might # be corrected using another model prober for x-mac-hebrew. Due to the fact # that x-mac-hebrew texts are so rare, writing another model prober isn't # worth the effort and performance hit. # #### The Prober #### # # The prober is divided between two SBCharSetProbers and a HebrewProber, # all of which are managed, created, fed data, inquired and deleted by the # SBCSGroupProber. The two SBCharSetProbers identify that the text is in # fact some kind of Hebrew, Logical or Visual. The final decision about which # one is it is made by the HebrewProber by combining final-letter scores # with the scores of the two SBCharSetProbers to produce a final answer. # # The SBCSGroupProber is responsible for stripping the original text of HTML # tags, English characters, numbers, low-ASCII punctuation characters, spaces # and new lines. It reduces any sequence of such characters to a single space. # The buffer fed to each prober in the SBCS group prober is pure text in # high-ASCII. # The two SBCharSetProbers (model probers) share the same language model: # Win1255Model. # The first SBCharSetProber uses the model normally as any other # SBCharSetProber does, to recognize windows-1255, upon which this model was # built. The second SBCharSetProber is told to make the pair-of-letter # lookup in the language model backwards. This in practice exactly simulates # a visual Hebrew model using the windows-1255 logical Hebrew model. # # The HebrewProber is not using any language model. All it does is look for # final-letter evidence suggesting the text is either logical Hebrew or visual # Hebrew. Disjointed from the model probers, the results of the HebrewProber # alone are meaningless. HebrewProber always returns 0.00 as confidence # since it never identifies a charset by itself. Instead, the pointer to the # HebrewProber is passed to the model probers as a helper "Name Prober". # When the Group prober receives a positive identification from any prober, # it asks for the name of the charset identified. If the prober queried is a # Hebrew model prober, the model prober forwards the call to the # HebrewProber to make the final decision. In the HebrewProber, the # decision is made according to the final-letters scores maintained and Both # model probers scores. The answer is returned in the form of the name of the # charset identified, either "windows-1255" or "ISO-8859-8". class HebrewProber(CharSetProber): # windows-1255 / ISO-8859-8 code points of interest FINAL_KAF = 0xea NORMAL_KAF = 0xeb FINAL_MEM = 0xed NORMAL_MEM = 0xee FINAL_NUN = 0xef NORMAL_NUN = 0xf0 FINAL_PE = 0xf3 NORMAL_PE = 0xf4 FINAL_TSADI = 0xf5 NORMAL_TSADI = 0xf6 # Minimum Visual vs Logical final letter score difference. # If the difference is below this, don't rely solely on the final letter score # distance. MIN_FINAL_CHAR_DISTANCE = 5 # Minimum Visual vs Logical model score difference. # If the difference is below this, don't rely at all on the model score # distance. MIN_MODEL_DISTANCE = 0.01 VISUAL_HEBREW_NAME = "ISO-8859-8" LOGICAL_HEBREW_NAME = "windows-1255" def __init__(self): super(HebrewProber, self).__init__() self._final_char_logical_score = None self._final_char_visual_score = None self._prev = None self._before_prev = None self._logical_prober = None self._visual_prober = None self.reset() def reset(self): self._final_char_logical_score = 0 self._final_char_visual_score = 0 # The two last characters seen in the previous buffer, # mPrev and mBeforePrev are initialized to space in order to simulate # a word delimiter at the beginning of the data self._prev = ' ' self._before_prev = ' ' # These probers are owned by the group prober. def set_model_probers(self, logicalProber, visualProber): self._logical_prober = logicalProber self._visual_prober = visualProber def is_final(self, c): return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN, self.FINAL_PE, self.FINAL_TSADI] def is_non_final(self, c): # The normal Tsadi is not a good Non-Final letter due to words like # 'lechotet' (to chat) containing an apostrophe after the tsadi. This # apostrophe is converted to a space in FilterWithoutEnglishLetters # causing the Non-Final tsadi to appear at an end of a word even # though this is not the case in the original text. # The letters Pe and Kaf rarely display a related behavior of not being # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' # for example legally end with a Non-Final Pe or Kaf. However, the # benefit of these letters as Non-Final letters outweighs the damage # since these words are quite rare. return c in [self.NORMAL_KAF, self.NORMAL_MEM, self.NORMAL_NUN, self.NORMAL_PE] def feed(self, byte_str): # Final letter analysis for logical-visual decision. # Look for evidence that the received buffer is either logical Hebrew # or visual Hebrew. # The following cases are checked: # 1) A word longer than 1 letter, ending with a final letter. This is # an indication that the text is laid out "naturally" since the # final letter really appears at the end. +1 for logical score. # 2) A word longer than 1 letter, ending with a Non-Final letter. In # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, # should not end with the Non-Final form of that letter. Exceptions # to this rule are mentioned above in isNonFinal(). This is an # indication that the text is laid out backwards. +1 for visual # score # 3) A word longer than 1 letter, starting with a final letter. Final # letters should not appear at the beginning of a word. This is an # indication that the text is laid out backwards. +1 for visual # score. # # The visual score and logical score are accumulated throughout the # text and are finally checked against each other in GetCharSetName(). # No checking for final letters in the middle of words is done since # that case is not an indication for either Logical or Visual text. # # We automatically filter out all 7-bit characters (replace them with # spaces) so the word boundary detection works properly. [MAP] if self.state == ProbingState.NOT_ME: # Both model probers say it's not them. No reason to continue. return ProbingState.NOT_ME byte_str = self.filter_high_byte_only(byte_str) for cur in byte_str: if cur == ' ': # We stand on a space - a word just ended if self._before_prev != ' ': # next-to-last char was not a space so self._prev is not a # 1 letter word if self.is_final(self._prev): # case (1) [-2:not space][-1:final letter][cur:space] self._final_char_logical_score += 1 elif self.is_non_final(self._prev): # case (2) [-2:not space][-1:Non-Final letter][ # cur:space] self._final_char_visual_score += 1 else: # Not standing on a space if ((self._before_prev == ' ') and (self.is_final(self._prev)) and (cur != ' ')): # case (3) [-2:space][-1:final letter][cur:not space] self._final_char_visual_score += 1 self._before_prev = self._prev self._prev = cur # Forever detecting, till the end or until both model probers return # ProbingState.NOT_ME (handled above) return ProbingState.DETECTING @property def charset_name(self): # Make the decision: is it Logical or Visual? # If the final letter score distance is dominant enough, rely on it. finalsub = self._final_char_logical_score - self._final_char_visual_score if finalsub >= self.MIN_FINAL_CHAR_DISTANCE: return self.LOGICAL_HEBREW_NAME if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE: return self.VISUAL_HEBREW_NAME # It's not dominant enough, try to rely on the model scores instead. modelsub = (self._logical_prober.get_confidence() - self._visual_prober.get_confidence()) if modelsub > self.MIN_MODEL_DISTANCE: return self.LOGICAL_HEBREW_NAME if modelsub < -self.MIN_MODEL_DISTANCE: return self.VISUAL_HEBREW_NAME # Still no good, back to final letter distance, maybe it'll save the # day. if finalsub < 0.0: return self.VISUAL_HEBREW_NAME # (finalsub > 0 - Logical) or (don't know what to do) default to # Logical. return self.LOGICAL_HEBREW_NAME @property def language(self): return 'Hebrew' @property def state(self): # Remain active as long as any of the model probers are active. if (self._logical_prober.state == ProbingState.NOT_ME) and \ (self._visual_prober.state == ProbingState.NOT_ME): return ProbingState.NOT_ME return ProbingState.DETECTING
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet/charsetgroupprober.py
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .enums import ProbingState from .charsetprober import CharSetProber class CharSetGroupProber(CharSetProber): def __init__(self, lang_filter=None): super(CharSetGroupProber, self).__init__(lang_filter=lang_filter) self._active_num = 0 self.probers = [] self._best_guess_prober = None def reset(self): super(CharSetGroupProber, self).reset() self._active_num = 0 for prober in self.probers: if prober: prober.reset() prober.active = True self._active_num += 1 self._best_guess_prober = None @property def charset_name(self): if not self._best_guess_prober: self.get_confidence() if not self._best_guess_prober: return None return self._best_guess_prober.charset_name @property def language(self): if not self._best_guess_prober: self.get_confidence() if not self._best_guess_prober: return None return self._best_guess_prober.language def feed(self, byte_str): for prober in self.probers: if not prober: continue if not prober.active: continue state = prober.feed(byte_str) if not state: continue if state == ProbingState.FOUND_IT: self._best_guess_prober = prober return self.state elif state == ProbingState.NOT_ME: prober.active = False self._active_num -= 1 if self._active_num <= 0: self._state = ProbingState.NOT_ME return self.state return self.state def get_confidence(self): state = self.state if state == ProbingState.FOUND_IT: return 0.99 elif state == ProbingState.NOT_ME: return 0.01 best_conf = 0.0 self._best_guess_prober = None for prober in self.probers: if not prober: continue if not prober.active: self.logger.debug('%s not active', prober.charset_name) continue conf = prober.get_confidence() self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf) if best_conf < conf: best_conf = conf self._best_guess_prober = prober if not self._best_guess_prober: return 0.0 return best_conf
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet/sbcsgroupprober.py
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .sbcharsetprober import SingleByteCharSetProber from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model) from .langgreekmodel import Latin7GreekModel, Win1253GreekModel from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel # from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel from .langthaimodel import TIS620ThaiModel from .langhebrewmodel import Win1255HebrewModel from .hebrewprober import HebrewProber from .langturkishmodel import Latin5TurkishModel class SBCSGroupProber(CharSetGroupProber): def __init__(self): super(SBCSGroupProber, self).__init__() self.probers = [ SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), SingleByteCharSetProber(MacCyrillicModel), SingleByteCharSetProber(Ibm866Model), SingleByteCharSetProber(Ibm855Model), SingleByteCharSetProber(Latin7GreekModel), SingleByteCharSetProber(Win1253GreekModel), SingleByteCharSetProber(Latin5BulgarianModel), SingleByteCharSetProber(Win1251BulgarianModel), # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250) # after we retrain model. # SingleByteCharSetProber(Latin2HungarianModel), # SingleByteCharSetProber(Win1250HungarianModel), SingleByteCharSetProber(TIS620ThaiModel), SingleByteCharSetProber(Latin5TurkishModel), ] hebrew_prober = HebrewProber() logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, False, hebrew_prober) visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True, hebrew_prober) hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober) self.probers.extend([hebrew_prober, logical_hebrew_prober, visual_hebrew_prober]) self.reset()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet/jpcntx.py
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # This is hiragana 2-char sequence table, the number in each cell represents its frequency category jp2CharContext = ( (0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), (2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), (0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), (0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), (1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), (0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), (0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), (0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), (0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), (0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), (2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), (0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), (0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), (0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), (2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), (0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), (1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), (0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), (0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), (0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), (0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), (0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), (0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), (0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), (0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), (0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), (0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), (0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), (0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), (1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), (0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), (0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), (0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), (0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), (0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), (2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), (0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), (0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), (0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), (0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), (0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), (0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), (0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), (0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), (0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), (0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), (0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), (0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), (0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), (0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), (0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), (0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), (0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), (0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), (0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), (2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), (0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), (0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), (0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), (0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), (1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), (0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), (0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), (0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), (0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), (0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), (0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), (0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), (0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), (1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), (0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), (0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), (0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), (0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), ) class JapaneseContextAnalysis(object): NUM_OF_CATEGORY = 6 DONT_KNOW = -1 ENOUGH_REL_THRESHOLD = 100 MAX_REL_THRESHOLD = 1000 MINIMUM_DATA_THRESHOLD = 4 def __init__(self): self._total_rel = None self._rel_sample = None self._need_to_skip_char_num = None self._last_char_order = None self._done = None self.reset() def reset(self): self._total_rel = 0 # total sequence received # category counters, each integer counts sequence in its category self._rel_sample = [0] * self.NUM_OF_CATEGORY # if last byte in current buffer is not the last byte of a character, # we need to know how many bytes to skip in next buffer self._need_to_skip_char_num = 0 self._last_char_order = -1 # The order of previous char # If this flag is set to True, detection is done and conclusion has # been made self._done = False def feed(self, byte_str, num_bytes): if self._done: return # The buffer we got is byte oriented, and a character may span in more than one # buffers. In case the last one or two byte in last buffer is not # complete, we record how many byte needed to complete that character # and skip these bytes here. We can choose to record those bytes as # well and analyse the character once it is complete, but since a # character will not make much difference, by simply skipping # this character will simply our logic and improve performance. i = self._need_to_skip_char_num while i < num_bytes: order, char_len = self.get_order(byte_str[i:i + 2]) i += char_len if i > num_bytes: self._need_to_skip_char_num = i - num_bytes self._last_char_order = -1 else: if (order != -1) and (self._last_char_order != -1): self._total_rel += 1 if self._total_rel > self.MAX_REL_THRESHOLD: self._done = True break self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1 self._last_char_order = order def got_enough_data(self): return self._total_rel > self.ENOUGH_REL_THRESHOLD def get_confidence(self): # This is just one way to calculate confidence. It works well for me. if self._total_rel > self.MINIMUM_DATA_THRESHOLD: return (self._total_rel - self._rel_sample[0]) / self._total_rel else: return self.DONT_KNOW def get_order(self, byte_str): return -1, 1 class SJISContextAnalysis(JapaneseContextAnalysis): def __init__(self): super(SJISContextAnalysis, self).__init__() self._charset_name = "SHIFT_JIS" @property def charset_name(self): return self._charset_name def get_order(self, byte_str): if not byte_str: return -1, 1 # find out current char's byte length first_char = byte_str[0] if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC): char_len = 2 if (first_char == 0x87) or (0xFA <= first_char <= 0xFC): self._charset_name = "CP932" else: char_len = 1 # return its order if it is hiragana if len(byte_str) > 1: second_char = byte_str[1] if (first_char == 202) and (0x9F <= second_char <= 0xF1): return second_char - 0x9F, char_len return -1, char_len class EUCJPContextAnalysis(JapaneseContextAnalysis): def get_order(self, byte_str): if not byte_str: return -1, 1 # find out current char's byte length first_char = byte_str[0] if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): char_len = 2 elif first_char == 0x8F: char_len = 3 else: char_len = 1 # return its order if it is hiragana if len(byte_str) > 1: second_char = byte_str[1] if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): return second_char - 0xA1, char_len return -1, char_len
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet/cli/__init__.py
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py
#!/usr/bin/env python """ Script which takes one or more file paths and reports on their detected encodings Example:: % chardetect somefile someotherfile somefile: windows-1252 with confidence 0.5 someotherfile: ascii with confidence 1.0 If no paths are provided, it takes its input from stdin. """ from __future__ import absolute_import, print_function, unicode_literals import argparse import sys from pip._vendor.chardet import __version__ from pip._vendor.chardet.compat import PY2 from pip._vendor.chardet.universaldetector import UniversalDetector def description_of(lines, name='stdin'): """ Return a string describing the probable encoding of a file or list of strings. :param lines: The lines to get the encoding of. :type lines: Iterable of bytes :param name: Name of file or collection of lines :type name: str """ u = UniversalDetector() for line in lines: line = bytearray(line) u.feed(line) # shortcut out of the loop to save reading further - particularly useful if we read a BOM. if u.done: break u.close() result = u.result if PY2: name = name.decode(sys.getfilesystemencoding(), 'ignore') if result['encoding']: return '{0}: {1} with confidence {2}'.format(name, result['encoding'], result['confidence']) else: return '{0}: no result'.format(name) def main(argv=None): """ Handles command line arguments and gets things started. :param argv: List of arguments, as if specified on the command-line. If None, ``sys.argv[1:]`` is used instead. :type argv: list of str """ # Get command line arguments parser = argparse.ArgumentParser( description="Takes one or more file paths and reports their detected \ encodings") parser.add_argument('input', help='File whose encoding we would like to determine. \ (default: stdin)', type=argparse.FileType('rb'), nargs='*', default=[sys.stdin if PY2 else sys.stdin.buffer]) parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__)) args = parser.parse_args(argv) for f in args.input: if f.isatty(): print("You are running chardetect interactively. Press " + "CTRL-D twice at the start of a blank line to signal the " + "end of your input. If you want help, run chardetect " + "--help\n", file=sys.stderr) print(description_of(f, f.name)) if __name__ == '__main__': main()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/webencodings/labels.py
""" webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { 'unicode-1-1-utf-8': 'utf-8', 'utf-8': 'utf-8', 'utf8': 'utf-8', '866': 'ibm866', 'cp866': 'ibm866', 'csibm866': 'ibm866', 'ibm866': 'ibm866', 'csisolatin2': 'iso-8859-2', 'iso-8859-2': 'iso-8859-2', 'iso-ir-101': 'iso-8859-2', 'iso8859-2': 'iso-8859-2', 'iso88592': 'iso-8859-2', 'iso_8859-2': 'iso-8859-2', 'iso_8859-2:1987': 'iso-8859-2', 'l2': 'iso-8859-2', 'latin2': 'iso-8859-2', 'csisolatin3': 'iso-8859-3', 'iso-8859-3': 'iso-8859-3', 'iso-ir-109': 'iso-8859-3', 'iso8859-3': 'iso-8859-3', 'iso88593': 'iso-8859-3', 'iso_8859-3': 'iso-8859-3', 'iso_8859-3:1988': 'iso-8859-3', 'l3': 'iso-8859-3', 'latin3': 'iso-8859-3', 'csisolatin4': 'iso-8859-4', 'iso-8859-4': 'iso-8859-4', 'iso-ir-110': 'iso-8859-4', 'iso8859-4': 'iso-8859-4', 'iso88594': 'iso-8859-4', 'iso_8859-4': 'iso-8859-4', 'iso_8859-4:1988': 'iso-8859-4', 'l4': 'iso-8859-4', 'latin4': 'iso-8859-4', 'csisolatincyrillic': 'iso-8859-5', 'cyrillic': 'iso-8859-5', 'iso-8859-5': 'iso-8859-5', 'iso-ir-144': 'iso-8859-5', 'iso8859-5': 'iso-8859-5', 'iso88595': 'iso-8859-5', 'iso_8859-5': 'iso-8859-5', 'iso_8859-5:1988': 'iso-8859-5', 'arabic': 'iso-8859-6', 'asmo-708': 'iso-8859-6', 'csiso88596e': 'iso-8859-6', 'csiso88596i': 'iso-8859-6', 'csisolatinarabic': 'iso-8859-6', 'ecma-114': 'iso-8859-6', 'iso-8859-6': 'iso-8859-6', 'iso-8859-6-e': 'iso-8859-6', 'iso-8859-6-i': 'iso-8859-6', 'iso-ir-127': 'iso-8859-6', 'iso8859-6': 'iso-8859-6', 'iso88596': 'iso-8859-6', 'iso_8859-6': 'iso-8859-6', 'iso_8859-6:1987': 'iso-8859-6', 'csisolatingreek': 'iso-8859-7', 'ecma-118': 'iso-8859-7', 'elot_928': 'iso-8859-7', 'greek': 'iso-8859-7', 'greek8': 'iso-8859-7', 'iso-8859-7': 'iso-8859-7', 'iso-ir-126': 'iso-8859-7', 'iso8859-7': 'iso-8859-7', 'iso88597': 'iso-8859-7', 'iso_8859-7': 'iso-8859-7', 'iso_8859-7:1987': 'iso-8859-7', 'sun_eu_greek': 'iso-8859-7', 'csiso88598e': 'iso-8859-8', 'csisolatinhebrew': 'iso-8859-8', 'hebrew': 'iso-8859-8', 'iso-8859-8': 'iso-8859-8', 'iso-8859-8-e': 'iso-8859-8', 'iso-ir-138': 'iso-8859-8', 'iso8859-8': 'iso-8859-8', 'iso88598': 'iso-8859-8', 'iso_8859-8': 'iso-8859-8', 'iso_8859-8:1988': 'iso-8859-8', 'visual': 'iso-8859-8', 'csiso88598i': 'iso-8859-8-i', 'iso-8859-8-i': 'iso-8859-8-i', 'logical': 'iso-8859-8-i', 'csisolatin6': 'iso-8859-10', 'iso-8859-10': 'iso-8859-10', 'iso-ir-157': 'iso-8859-10', 'iso8859-10': 'iso-8859-10', 'iso885910': 'iso-8859-10', 'l6': 'iso-8859-10', 'latin6': 'iso-8859-10', 'iso-8859-13': 'iso-8859-13', 'iso8859-13': 'iso-8859-13', 'iso885913': 'iso-8859-13', 'iso-8859-14': 'iso-8859-14', 'iso8859-14': 'iso-8859-14', 'iso885914': 'iso-8859-14', 'csisolatin9': 'iso-8859-15', 'iso-8859-15': 'iso-8859-15', 'iso8859-15': 'iso-8859-15', 'iso885915': 'iso-8859-15', 'iso_8859-15': 'iso-8859-15', 'l9': 'iso-8859-15', 'iso-8859-16': 'iso-8859-16', 'cskoi8r': 'koi8-r', 'koi': 'koi8-r', 'koi8': 'koi8-r', 'koi8-r': 'koi8-r', 'koi8_r': 'koi8-r', 'koi8-u': 'koi8-u', 'csmacintosh': 'macintosh', 'mac': 'macintosh', 'macintosh': 'macintosh', 'x-mac-roman': 'macintosh', 'dos-874': 'windows-874', 'iso-8859-11': 'windows-874', 'iso8859-11': 'windows-874', 'iso885911': 'windows-874', 'tis-620': 'windows-874', 'windows-874': 'windows-874', 'cp1250': 'windows-1250', 'windows-1250': 'windows-1250', 'x-cp1250': 'windows-1250', 'cp1251': 'windows-1251', 'windows-1251': 'windows-1251', 'x-cp1251': 'windows-1251', 'ansi_x3.4-1968': 'windows-1252', 'ascii': 'windows-1252', 'cp1252': 'windows-1252', 'cp819': 'windows-1252', 'csisolatin1': 'windows-1252', 'ibm819': 'windows-1252', 'iso-8859-1': 'windows-1252', 'iso-ir-100': 'windows-1252', 'iso8859-1': 'windows-1252', 'iso88591': 'windows-1252', 'iso_8859-1': 'windows-1252', 'iso_8859-1:1987': 'windows-1252', 'l1': 'windows-1252', 'latin1': 'windows-1252', 'us-ascii': 'windows-1252', 'windows-1252': 'windows-1252', 'x-cp1252': 'windows-1252', 'cp1253': 'windows-1253', 'windows-1253': 'windows-1253', 'x-cp1253': 'windows-1253', 'cp1254': 'windows-1254', 'csisolatin5': 'windows-1254', 'iso-8859-9': 'windows-1254', 'iso-ir-148': 'windows-1254', 'iso8859-9': 'windows-1254', 'iso88599': 'windows-1254', 'iso_8859-9': 'windows-1254', 'iso_8859-9:1989': 'windows-1254', 'l5': 'windows-1254', 'latin5': 'windows-1254', 'windows-1254': 'windows-1254', 'x-cp1254': 'windows-1254', 'cp1255': 'windows-1255', 'windows-1255': 'windows-1255', 'x-cp1255': 'windows-1255', 'cp1256': 'windows-1256', 'windows-1256': 'windows-1256', 'x-cp1256': 'windows-1256', 'cp1257': 'windows-1257', 'windows-1257': 'windows-1257', 'x-cp1257': 'windows-1257', 'cp1258': 'windows-1258', 'windows-1258': 'windows-1258', 'x-cp1258': 'windows-1258', 'x-mac-cyrillic': 'x-mac-cyrillic', 'x-mac-ukrainian': 'x-mac-cyrillic', 'chinese': 'gbk', 'csgb2312': 'gbk', 'csiso58gb231280': 'gbk', 'gb2312': 'gbk', 'gb_2312': 'gbk', 'gb_2312-80': 'gbk', 'gbk': 'gbk', 'iso-ir-58': 'gbk', 'x-gbk': 'gbk', 'gb18030': 'gb18030', 'hz-gb-2312': 'hz-gb-2312', 'big5': 'big5', 'big5-hkscs': 'big5', 'cn-big5': 'big5', 'csbig5': 'big5', 'x-x-big5': 'big5', 'cseucpkdfmtjapanese': 'euc-jp', 'euc-jp': 'euc-jp', 'x-euc-jp': 'euc-jp', 'csiso2022jp': 'iso-2022-jp', 'iso-2022-jp': 'iso-2022-jp', 'csshiftjis': 'shift_jis', 'ms_kanji': 'shift_jis', 'shift-jis': 'shift_jis', 'shift_jis': 'shift_jis', 'sjis': 'shift_jis', 'windows-31j': 'shift_jis', 'x-sjis': 'shift_jis', 'cseuckr': 'euc-kr', 'csksc56011987': 'euc-kr', 'euc-kr': 'euc-kr', 'iso-ir-149': 'euc-kr', 'korean': 'euc-kr', 'ks_c_5601-1987': 'euc-kr', 'ks_c_5601-1989': 'euc-kr', 'ksc5601': 'euc-kr', 'ksc_5601': 'euc-kr', 'windows-949': 'euc-kr', 'csiso2022kr': 'iso-2022-kr', 'iso-2022-kr': 'iso-2022-kr', 'utf-16be': 'utf-16be', 'utf-16': 'utf-16le', 'utf-16le': 'utf-16le', 'x-user-defined': 'x-user-defined', }
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/webencodings/mklabels.py
""" webencodings.mklabels ~~~~~~~~~~~~~~~~~~~~~ Regenarate the webencodings.labels module. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ import json try: from urllib import urlopen except ImportError: from urllib.request import urlopen def assert_lower(string): assert string == string.lower() return string def generate(url): parts = ['''\ """ webencodings.labels ~~~~~~~~~~~~~~~~~~~ Map encoding labels to their name. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ # XXX Do not edit! # This file is automatically generated by mklabels.py LABELS = { '''] labels = [ (repr(assert_lower(label)).lstrip('u'), repr(encoding['name']).lstrip('u')) for category in json.loads(urlopen(url).read().decode('ascii')) for encoding in category['encodings'] for label in encoding['labels']] max_len = max(len(label) for label, name in labels) parts.extend( ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name) for label, name in labels) parts.append('}') return ''.join(parts) if __name__ == '__main__': print(generate('http://encoding.spec.whatwg.org/encodings.json'))
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/webencodings/x_user_defined.py
# coding: utf-8 """ webencodings.x_user_defined ~~~~~~~~~~~~~~~~~~~~~~~~~~~ An implementation of the x-user-defined encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self, input, errors='strict'): return codecs.charmap_encode(input, errors, encoding_table) def decode(self, input, errors='strict'): return codecs.charmap_decode(input, errors, decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input, self.errors, encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input, self.errors, decoding_table)[0] class StreamWriter(Codec, codecs.StreamWriter): pass class StreamReader(Codec, codecs.StreamReader): pass ### encodings module API codec_info = codecs.CodecInfo( name='x-user-defined', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table # Python 3: # for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700)) decoding_table = ( '\x00' '\x01' '\x02' '\x03' '\x04' '\x05' '\x06' '\x07' '\x08' '\t' '\n' '\x0b' '\x0c' '\r' '\x0e' '\x0f' '\x10' '\x11' '\x12' '\x13' '\x14' '\x15' '\x16' '\x17' '\x18' '\x19' '\x1a' '\x1b' '\x1c' '\x1d' '\x1e' '\x1f' ' ' '!' '"' '#' '$' '%' '&' "'" '(' ')' '*' '+' ',' '-' '.' '/' '0' '1' '2' '3' '4' '5' '6' '7' '8' '9' ':' ';' '<' '=' '>' '?' '@' 'A' 'B' 'C' 'D' 'E' 'F' 'G' 'H' 'I' 'J' 'K' 'L' 'M' 'N' 'O' 'P' 'Q' 'R' 'S' 'T' 'U' 'V' 'W' 'X' 'Y' 'Z' '[' '\\' ']' '^' '_' '`' 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j' 'k' 'l' 'm' 'n' 'o' 'p' 'q' 'r' 's' 't' 'u' 'v' 'w' 'x' 'y' 'z' '{' '|' '}' '~' '\x7f' '\uf780' '\uf781' '\uf782' '\uf783' '\uf784' '\uf785' '\uf786' '\uf787' '\uf788' '\uf789' '\uf78a' '\uf78b' '\uf78c' '\uf78d' '\uf78e' '\uf78f' '\uf790' '\uf791' '\uf792' '\uf793' '\uf794' '\uf795' '\uf796' '\uf797' '\uf798' '\uf799' '\uf79a' '\uf79b' '\uf79c' '\uf79d' '\uf79e' '\uf79f' '\uf7a0' '\uf7a1' '\uf7a2' '\uf7a3' '\uf7a4' '\uf7a5' '\uf7a6' '\uf7a7' '\uf7a8' '\uf7a9' '\uf7aa' '\uf7ab' '\uf7ac' '\uf7ad' '\uf7ae' '\uf7af' '\uf7b0' '\uf7b1' '\uf7b2' '\uf7b3' '\uf7b4' '\uf7b5' '\uf7b6' '\uf7b7' '\uf7b8' '\uf7b9' '\uf7ba' '\uf7bb' '\uf7bc' '\uf7bd' '\uf7be' '\uf7bf' '\uf7c0' '\uf7c1' '\uf7c2' '\uf7c3' '\uf7c4' '\uf7c5' '\uf7c6' '\uf7c7' '\uf7c8' '\uf7c9' '\uf7ca' '\uf7cb' '\uf7cc' '\uf7cd' '\uf7ce' '\uf7cf' '\uf7d0' '\uf7d1' '\uf7d2' '\uf7d3' '\uf7d4' '\uf7d5' '\uf7d6' '\uf7d7' '\uf7d8' '\uf7d9' '\uf7da' '\uf7db' '\uf7dc' '\uf7dd' '\uf7de' '\uf7df' '\uf7e0' '\uf7e1' '\uf7e2' '\uf7e3' '\uf7e4' '\uf7e5' '\uf7e6' '\uf7e7' '\uf7e8' '\uf7e9' '\uf7ea' '\uf7eb' '\uf7ec' '\uf7ed' '\uf7ee' '\uf7ef' '\uf7f0' '\uf7f1' '\uf7f2' '\uf7f3' '\uf7f4' '\uf7f5' '\uf7f6' '\uf7f7' '\uf7f8' '\uf7f9' '\uf7fa' '\uf7fb' '\uf7fc' '\uf7fd' '\uf7fe' '\uf7ff' ) ### Encoding table encoding_table = codecs.charmap_build(decoding_table)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/webencodings/__init__.py
# coding: utf-8 """ webencodings ~~~~~~~~~~~~ This is a Python implementation of the `WHATWG Encoding standard <http://encoding.spec.whatwg.org/>`. See README for details. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals import codecs from .labels import LABELS VERSION = '0.5.1' # Some names in Encoding are not valid Python aliases. Remap these. PYTHON_NAMES = { 'iso-8859-8-i': 'iso-8859-8', 'x-mac-cyrillic': 'mac-cyrillic', 'macintosh': 'mac-roman', 'windows-874': 'cp874'} CACHE = {} def ascii_lower(string): r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z. :param string: An Unicode string. :returns: A new Unicode string. This is used for `ASCII case-insensitive <http://encoding.spec.whatwg.org/#ascii-case-insensitive>`_ matching of encoding labels. The same matching is also used, among other things, for `CSS keywords <http://dev.w3.org/csswg/css-values/#keywords>`_. This is different from the :meth:`~py:str.lower` method of Unicode strings which also affect non-ASCII characters, sometimes mapping them into the ASCII range: >>> keyword = u'Bac\N{KELVIN SIGN}ground' >>> assert keyword.lower() == u'background' >>> assert ascii_lower(keyword) != keyword.lower() >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground' """ # This turns out to be faster than unicode.translate() return string.encode('utf8').lower().decode('utf8') def lookup(label): """ Look for an encoding by its label. This is the spec’s `get an encoding <http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm. Supported labels are listed there. :param label: A string. :returns: An :class:`Encoding` object, or :obj:`None` for an unknown label. """ # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020. label = ascii_lower(label.strip('\t\n\f\r ')) name = LABELS.get(label) if name is None: return None encoding = CACHE.get(name) if encoding is None: if name == 'x-user-defined': from .x_user_defined import codec_info else: python_name = PYTHON_NAMES.get(name, name) # Any python_name value that gets to here should be valid. codec_info = codecs.lookup(python_name) encoding = Encoding(name, codec_info) CACHE[name] = encoding return encoding def _get_encoding(encoding_or_label): """ Accept either an encoding object or label. :param encoding: An :class:`Encoding` object or a label string. :returns: An :class:`Encoding` object. :raises: :exc:`~exceptions.LookupError` for an unknown label. """ if hasattr(encoding_or_label, 'codec_info'): return encoding_or_label encoding = lookup(encoding_or_label) if encoding is None: raise LookupError('Unknown encoding label: %r' % encoding_or_label) return encoding class Encoding(object): """Reresents a character encoding such as UTF-8, that can be used for decoding or encoding. .. attribute:: name Canonical name of the encoding .. attribute:: codec_info The actual implementation of the encoding, a stdlib :class:`~codecs.CodecInfo` object. See :func:`codecs.register`. """ def __init__(self, name, codec_info): self.name = name self.codec_info = codec_info def __repr__(self): return '<Encoding %s>' % self.name #: The UTF-8 encoding. Should be used for new content and formats. UTF8 = lookup('utf-8') _UTF16LE = lookup('utf-16le') _UTF16BE = lookup('utf-16be') def decode(input, fallback_encoding, errors='replace'): """ Decode a single string. :param input: A byte string :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A ``(output, encoding)`` tuple of an Unicode string and an :obj:`Encoding`. """ # Fail early if `encoding` is an invalid label. fallback_encoding = _get_encoding(fallback_encoding) bom_encoding, input = _detect_bom(input) encoding = bom_encoding or fallback_encoding return encoding.codec_info.decode(input, errors)[0], encoding def _detect_bom(input): """Return (bom_encoding, input), with any BOM removed from the input.""" if input.startswith(b'\xFF\xFE'): return _UTF16LE, input[2:] if input.startswith(b'\xFE\xFF'): return _UTF16BE, input[2:] if input.startswith(b'\xEF\xBB\xBF'): return UTF8, input[3:] return None, input def encode(input, encoding=UTF8, errors='strict'): """ Encode a single string. :param input: An Unicode string. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :return: A byte string. """ return _get_encoding(encoding).codec_info.encode(input, errors)[0] def iter_decode(input, fallback_encoding, errors='replace'): """ "Pull"-based decoder. :param input: An iterable of byte strings. The input is first consumed just enough to determine the encoding based on the precense of a BOM, then consumed on demand when the return value is. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An ``(output, encoding)`` tuple. :obj:`output` is an iterable of Unicode strings, :obj:`encoding` is the :obj:`Encoding` that is being used. """ decoder = IncrementalDecoder(fallback_encoding, errors) generator = _iter_decode_generator(input, decoder) encoding = next(generator) return generator, encoding def _iter_decode_generator(input, decoder): """Return a generator that first yields the :obj:`Encoding`, then yields output chukns as Unicode strings. """ decode = decoder.decode input = iter(input) for chunck in input: output = decode(chunck) if output: assert decoder.encoding is not None yield decoder.encoding yield output break else: # Input exhausted without determining the encoding output = decode(b'', final=True) assert decoder.encoding is not None yield decoder.encoding if output: yield output return for chunck in input: output = decode(chunck) if output: yield output output = decode(b'', final=True) if output: yield output def iter_encode(input, encoding=UTF8, errors='strict'): """ “Pull”-based encoder. :param input: An iterable of Unicode strings. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. :returns: An iterable of byte strings. """ # Fail early if `encoding` is an invalid label. encode = IncrementalEncoder(encoding, errors).encode return _iter_encode_generator(input, encode) def _iter_encode_generator(input, encode): for chunck in input: output = encode(chunck) if output: yield output output = encode('', final=True) if output: yield output class IncrementalDecoder(object): """ “Push”-based decoder. :param fallback_encoding: An :class:`Encoding` object or a label string. The encoding to use if :obj:`input` does note have a BOM. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. """ def __init__(self, fallback_encoding, errors='replace'): # Fail early if `encoding` is an invalid label. self._fallback_encoding = _get_encoding(fallback_encoding) self._errors = errors self._buffer = b'' self._decoder = None #: The actual :class:`Encoding` that is being used, #: or :obj:`None` if that is not determined yet. #: (Ie. if there is not enough input yet to determine #: if there is a BOM.) self.encoding = None # Not known yet. def decode(self, input, final=False): """Decode one chunk of the input. :param input: A byte string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: An Unicode string. """ decoder = self._decoder if decoder is not None: return decoder(input, final) input = self._buffer + input encoding, input = _detect_bom(input) if encoding is None: if len(input) < 3 and not final: # Not enough data yet. self._buffer = input return '' else: # No BOM encoding = self._fallback_encoding decoder = encoding.codec_info.incrementaldecoder(self._errors).decode self._decoder = decoder self.encoding = encoding return decoder(input, final) class IncrementalEncoder(object): """ “Push”-based encoder. :param encoding: An :class:`Encoding` object or a label string. :param errors: Type of error handling. See :func:`codecs.register`. :raises: :exc:`~exceptions.LookupError` for an unknown encoding label. .. method:: encode(input, final=False) :param input: An Unicode string. :param final: Indicate that no more input is available. Must be :obj:`True` if this is the last call. :returns: A byte string. """ def __init__(self, encoding=UTF8, errors='strict'): encoding = _get_encoding(encoding) self.encode = encoding.codec_info.incrementalencoder(errors).encode
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/webencodings/tests.py
# coding: utf-8 """ webencodings.tests ~~~~~~~~~~~~~~~~~~ A basic test suite for Encoding. :copyright: Copyright 2012 by Simon Sapin :license: BSD, see LICENSE for details. """ from __future__ import unicode_literals from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode, IncrementalDecoder, IncrementalEncoder, UTF8) def assert_raises(exception, function, *args, **kwargs): try: function(*args, **kwargs) except exception: return else: # pragma: no cover raise AssertionError('Did not raise %s.' % exception) def test_labels(): assert lookup('utf-8').name == 'utf-8' assert lookup('Utf-8').name == 'utf-8' assert lookup('UTF-8').name == 'utf-8' assert lookup('utf8').name == 'utf-8' assert lookup('utf8').name == 'utf-8' assert lookup('utf8 ').name == 'utf-8' assert lookup(' \r\nutf8\t').name == 'utf-8' assert lookup('u8') is None # Python label. assert lookup('utf-8 ') is None # Non-ASCII white space. assert lookup('US-ASCII').name == 'windows-1252' assert lookup('iso-8859-1').name == 'windows-1252' assert lookup('latin1').name == 'windows-1252' assert lookup('LATIN1').name == 'windows-1252' assert lookup('latin-1') is None assert lookup('LATİN1') is None # ASCII-only case insensitivity. def test_all_labels(): for label in LABELS: assert decode(b'', label) == ('', lookup(label)) assert encode('', label) == b'' for repeat in [0, 1, 12]: output, _ = iter_decode([b''] * repeat, label) assert list(output) == [] assert list(iter_encode([''] * repeat, label)) == [] decoder = IncrementalDecoder(label) assert decoder.decode(b'') == '' assert decoder.decode(b'', final=True) == '' encoder = IncrementalEncoder(label) assert encoder.encode('') == b'' assert encoder.encode('', final=True) == b'' # All encoding names are valid labels too: for name in set(LABELS.values()): assert lookup(name).name == name def test_invalid_label(): assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid') assert_raises(LookupError, encode, 'é', 'invalid') assert_raises(LookupError, iter_decode, [], 'invalid') assert_raises(LookupError, iter_encode, [], 'invalid') assert_raises(LookupError, IncrementalDecoder, 'invalid') assert_raises(LookupError, IncrementalEncoder, 'invalid') def test_decode(): assert decode(b'\x80', 'latin1') == ('€', lookup('latin1')) assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1')) assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8')) assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8')) assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii')) assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be')) assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le')) assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be')) assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le')) assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le')) assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be')) assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le')) assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le')) def test_encode(): assert encode('é', 'latin1') == b'\xe9' assert encode('é', 'utf8') == b'\xc3\xa9' assert encode('é', 'utf8') == b'\xc3\xa9' assert encode('é', 'utf-16') == b'\xe9\x00' assert encode('é', 'utf-16le') == b'\xe9\x00' assert encode('é', 'utf-16be') == b'\x00\xe9' def test_iter_decode(): def iter_decode_to_string(input, fallback_encoding): output, _encoding = iter_decode(input, fallback_encoding) return ''.join(output) assert iter_decode_to_string([], 'latin1') == '' assert iter_decode_to_string([b''], 'latin1') == '' assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é' assert iter_decode_to_string([b'hello'], 'latin1') == 'hello' assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello' assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello' assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é' assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é' assert iter_decode_to_string([ b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é' assert iter_decode_to_string([ b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD' assert iter_decode_to_string([ b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é' assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == '' assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»' assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é' assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é' assert iter_decode_to_string([ b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é' assert iter_decode_to_string([ b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo' def test_iter_encode(): assert b''.join(iter_encode([], 'latin1')) == b'' assert b''.join(iter_encode([''], 'latin1')) == b'' assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9' assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9' assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00' assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00' assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9' assert b''.join(iter_encode([ '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo' def test_x_user_defined(): encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca' decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca' encoded = b'aa' decoded = 'aa' assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined')) assert encode(decoded, 'x-user-defined') == encoded
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/progress/__init__.py
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import division, print_function from collections import deque from datetime import timedelta from math import ceil from sys import stderr try: from time import monotonic except ImportError: from time import time as monotonic __version__ = '1.5' HIDE_CURSOR = '\x1b[?25l' SHOW_CURSOR = '\x1b[?25h' class Infinite(object): file = stderr sma_window = 10 # Simple Moving Average window check_tty = True hide_cursor = True def __init__(self, message='', **kwargs): self.index = 0 self.start_ts = monotonic() self.avg = 0 self._avg_update_ts = self.start_ts self._ts = self.start_ts self._xput = deque(maxlen=self.sma_window) for key, val in kwargs.items(): setattr(self, key, val) self._width = 0 self.message = message if self.file and self.is_tty(): if self.hide_cursor: print(HIDE_CURSOR, end='', file=self.file) print(self.message, end='', file=self.file) self.file.flush() def __getitem__(self, key): if key.startswith('_'): return None return getattr(self, key, None) @property def elapsed(self): return int(monotonic() - self.start_ts) @property def elapsed_td(self): return timedelta(seconds=self.elapsed) def update_avg(self, n, dt): if n > 0: xput_len = len(self._xput) self._xput.append(dt / n) now = monotonic() # update when we're still filling _xput, then after every second if (xput_len < self.sma_window or now - self._avg_update_ts > 1): self.avg = sum(self._xput) / len(self._xput) self._avg_update_ts = now def update(self): pass def start(self): pass def clearln(self): if self.file and self.is_tty(): print('\r\x1b[K', end='', file=self.file) def write(self, s): if self.file and self.is_tty(): line = self.message + s.ljust(self._width) print('\r' + line, end='', file=self.file) self._width = max(self._width, len(s)) self.file.flush() def writeln(self, line): if self.file and self.is_tty(): self.clearln() print(line, end='', file=self.file) self.file.flush() def finish(self): if self.file and self.is_tty(): print(file=self.file) if self.hide_cursor: print(SHOW_CURSOR, end='', file=self.file) def is_tty(self): return self.file.isatty() if self.check_tty else True def next(self, n=1): now = monotonic() dt = now - self._ts self.update_avg(n, dt) self._ts = now self.index = self.index + n self.update() def iter(self, it): with self: for x in it: yield x self.next() def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_val, exc_tb): self.finish() class Progress(Infinite): def __init__(self, *args, **kwargs): super(Progress, self).__init__(*args, **kwargs) self.max = kwargs.get('max', 100) @property def eta(self): return int(ceil(self.avg * self.remaining)) @property def eta_td(self): return timedelta(seconds=self.eta) @property def percent(self): return self.progress * 100 @property def progress(self): return min(1, self.index / self.max) @property def remaining(self): return max(self.max - self.index, 0) def start(self): self.update() def goto(self, index): incr = index - self.index self.next(incr) def iter(self, it): try: self.max = len(it) except TypeError: pass with self: for x in it: yield x self.next()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/progress/bar.py
# -*- coding: utf-8 -*- # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import unicode_literals import sys from . import Progress class Bar(Progress): width = 32 suffix = '%(index)d/%(max)d' bar_prefix = ' |' bar_suffix = '| ' empty_fill = ' ' fill = '#' def update(self): filled_length = int(self.width * self.progress) empty_length = self.width - filled_length message = self.message % self bar = self.fill * filled_length empty = self.empty_fill * empty_length suffix = self.suffix % self line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix, suffix]) self.writeln(line) class ChargingBar(Bar): suffix = '%(percent)d%%' bar_prefix = ' ' bar_suffix = ' ' empty_fill = '∙' fill = '█' class FillingSquaresBar(ChargingBar): empty_fill = '▢' fill = '▣' class FillingCirclesBar(ChargingBar): empty_fill = '◯' fill = '◉' class IncrementalBar(Bar): if sys.platform.startswith('win'): phases = (u' ', u'▌', u'█') else: phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█') def update(self): nphases = len(self.phases) filled_len = self.width * self.progress nfull = int(filled_len) # Number of full chars phase = int((filled_len - nfull) * nphases) # Phase of last char nempty = self.width - nfull # Number of empty chars message = self.message % self bar = self.phases[-1] * nfull current = self.phases[phase] if phase > 0 else '' empty = self.empty_fill * max(0, nempty - len(current)) suffix = self.suffix % self line = ''.join([message, self.bar_prefix, bar, current, empty, self.bar_suffix, suffix]) self.writeln(line) class PixelBar(IncrementalBar): phases = ('⡀', '⡄', '⡆', '⡇', '⣇', '⣧', '⣷', '⣿') class ShadyBar(IncrementalBar): phases = (' ', '░', '▒', '▓', '█')
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/progress/spinner.py
# -*- coding: utf-8 -*- # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import unicode_literals from . import Infinite class Spinner(Infinite): phases = ('-', '\\', '|', '/') hide_cursor = True def update(self): i = self.index % len(self.phases) self.write(self.phases[i]) class PieSpinner(Spinner): phases = ['◷', '◶', '◵', '◴'] class MoonSpinner(Spinner): phases = ['◑', '◒', '◐', '◓'] class LineSpinner(Spinner): phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻'] class PixelSpinner(Spinner): phases = ['⣾', '⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽']
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/progress/counter.py
# -*- coding: utf-8 -*- # Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import unicode_literals from . import Infinite, Progress class Counter(Infinite): def update(self): self.write(str(self.index)) class Countdown(Progress): def update(self): self.write(str(self.remaining)) class Stack(Progress): phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') def update(self): nphases = len(self.phases) i = min(nphases - 1, int(self.progress * nphases)) self.write(self.phases[i]) class Pie(Stack): phases = ('○', '◔', '◑', '◕', '●')
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/build.py
"""Build a project using PEP 517 hooks. """ import argparse import logging import os from pip._vendor import toml import shutil from .envbuild import BuildEnvironment from .wrappers import Pep517HookCaller from .dirtools import tempdir, mkdir_p from .compat import FileNotFoundError log = logging.getLogger(__name__) def validate_system(system): """ Ensure build system has the requisite fields. """ required = {'requires', 'build-backend'} if not (required <= set(system)): message = "Missing required fields: {missing}".format( missing=required-set(system), ) raise ValueError(message) def load_system(source_dir): """ Load the build system from a source dir (pyproject.toml). """ pyproject = os.path.join(source_dir, 'pyproject.toml') with open(pyproject) as f: pyproject_data = toml.load(f) return pyproject_data['build-system'] def compat_system(source_dir): """ Given a source dir, attempt to get a build system backend and requirements from pyproject.toml. Fallback to setuptools but only if the file was not found or a build system was not indicated. """ try: system = load_system(source_dir) except (FileNotFoundError, KeyError): system = {} system.setdefault( 'build-backend', 'setuptools.build_meta:__legacy__', ) system.setdefault('requires', ['setuptools', 'wheel']) return system def _do_build(hooks, env, dist, dest): get_requires_name = 'get_requires_for_build_{dist}'.format(**locals()) get_requires = getattr(hooks, get_requires_name) reqs = get_requires({}) log.info('Got build requires: %s', reqs) env.pip_install(reqs) log.info('Installed dynamic build dependencies') with tempdir() as td: log.info('Trying to build %s in %s', dist, td) build_name = 'build_{dist}'.format(**locals()) build = getattr(hooks, build_name) filename = build(td, {}) source = os.path.join(td, filename) shutil.move(source, os.path.join(dest, os.path.basename(filename))) def build(source_dir, dist, dest=None, system=None): system = system or load_system(source_dir) dest = os.path.join(source_dir, dest or 'dist') mkdir_p(dest) validate_system(system) hooks = Pep517HookCaller( source_dir, system['build-backend'], system.get('backend-path') ) with BuildEnvironment() as env: env.pip_install(system['requires']) _do_build(hooks, env, dist, dest) parser = argparse.ArgumentParser() parser.add_argument( 'source_dir', help="A directory containing pyproject.toml", ) parser.add_argument( '--binary', '-b', action='store_true', default=False, ) parser.add_argument( '--source', '-s', action='store_true', default=False, ) parser.add_argument( '--out-dir', '-o', help="Destination in which to save the builds relative to source dir", ) def main(args): # determine which dists to build dists = list(filter(None, ( 'sdist' if args.source or not args.binary else None, 'wheel' if args.binary or not args.source else None, ))) for dist in dists: build(args.source_dir, dist, args.out_dir) if __name__ == '__main__': main(parser.parse_args())
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/_in_process.py
"""This is invoked in a subprocess to call the build backend hooks. It expects: - Command line args: hook_name, control_dir - Environment variables: PEP517_BUILD_BACKEND=entry.point:spec PEP517_BACKEND_PATH=paths (separated with os.pathsep) - control_dir/input.json: - {"kwargs": {...}} Results: - control_dir/output.json - {"return_val": ...} """ from glob import glob from importlib import import_module import json import os import os.path from os.path import join as pjoin import re import shutil import sys import traceback # This file is run as a script, and `import compat` is not zip-safe, so we # include write_json() and read_json() from compat.py. # # Handle reading and writing JSON in UTF-8, on Python 3 and 2. if sys.version_info[0] >= 3: # Python 3 def write_json(obj, path, **kwargs): with open(path, 'w', encoding='utf-8') as f: json.dump(obj, f, **kwargs) def read_json(path): with open(path, 'r', encoding='utf-8') as f: return json.load(f) else: # Python 2 def write_json(obj, path, **kwargs): with open(path, 'wb') as f: json.dump(obj, f, encoding='utf-8', **kwargs) def read_json(path): with open(path, 'rb') as f: return json.load(f) class BackendUnavailable(Exception): """Raised if we cannot import the backend""" def __init__(self, traceback): self.traceback = traceback class BackendInvalid(Exception): """Raised if the backend is invalid""" def __init__(self, message): self.message = message class HookMissing(Exception): """Raised if a hook is missing and we are not executing the fallback""" def contained_in(filename, directory): """Test if a file is located within the given directory.""" filename = os.path.normcase(os.path.abspath(filename)) directory = os.path.normcase(os.path.abspath(directory)) return os.path.commonprefix([filename, directory]) == directory def _build_backend(): """Find and load the build backend""" # Add in-tree backend directories to the front of sys.path. backend_path = os.environ.get('PEP517_BACKEND_PATH') if backend_path: extra_pathitems = backend_path.split(os.pathsep) sys.path[:0] = extra_pathitems ep = os.environ['PEP517_BUILD_BACKEND'] mod_path, _, obj_path = ep.partition(':') try: obj = import_module(mod_path) except ImportError: raise BackendUnavailable(traceback.format_exc()) if backend_path: if not any( contained_in(obj.__file__, path) for path in extra_pathitems ): raise BackendInvalid("Backend was not loaded from backend-path") if obj_path: for path_part in obj_path.split('.'): obj = getattr(obj, path_part) return obj def get_requires_for_build_wheel(config_settings): """Invoke the optional get_requires_for_build_wheel hook Returns [] if the hook is not defined. """ backend = _build_backend() try: hook = backend.get_requires_for_build_wheel except AttributeError: return [] else: return hook(config_settings) def prepare_metadata_for_build_wheel( metadata_directory, config_settings, _allow_fallback): """Invoke optional prepare_metadata_for_build_wheel Implements a fallback by building a wheel if the hook isn't defined, unless _allow_fallback is False in which case HookMissing is raised. """ backend = _build_backend() try: hook = backend.prepare_metadata_for_build_wheel except AttributeError: if not _allow_fallback: raise HookMissing() return _get_wheel_metadata_from_wheel(backend, metadata_directory, config_settings) else: return hook(metadata_directory, config_settings) WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' def _dist_info_files(whl_zip): """Identify the .dist-info folder inside a wheel ZipFile.""" res = [] for path in whl_zip.namelist(): m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) if m: res.append(path) if res: return res raise Exception("No .dist-info folder found in wheel") def _get_wheel_metadata_from_wheel( backend, metadata_directory, config_settings): """Build a wheel and extract the metadata from it. Fallback for when the build backend does not define the 'get_wheel_metadata' hook. """ from zipfile import ZipFile whl_basename = backend.build_wheel(metadata_directory, config_settings) with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): pass # Touch marker file whl_file = os.path.join(metadata_directory, whl_basename) with ZipFile(whl_file) as zipf: dist_info = _dist_info_files(zipf) zipf.extractall(path=metadata_directory, members=dist_info) return dist_info[0].split('/')[0] def _find_already_built_wheel(metadata_directory): """Check for a wheel already built during the get_wheel_metadata hook. """ if not metadata_directory: return None metadata_parent = os.path.dirname(metadata_directory) if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): return None whl_files = glob(os.path.join(metadata_parent, '*.whl')) if not whl_files: print('Found wheel built marker, but no .whl files') return None if len(whl_files) > 1: print('Found multiple .whl files; unspecified behaviour. ' 'Will call build_wheel.') return None # Exactly one .whl file return whl_files[0] def build_wheel(wheel_directory, config_settings, metadata_directory=None): """Invoke the mandatory build_wheel hook. If a wheel was already built in the prepare_metadata_for_build_wheel fallback, this will copy it rather than rebuilding the wheel. """ prebuilt_whl = _find_already_built_wheel(metadata_directory) if prebuilt_whl: shutil.copy2(prebuilt_whl, wheel_directory) return os.path.basename(prebuilt_whl) return _build_backend().build_wheel(wheel_directory, config_settings, metadata_directory) def get_requires_for_build_sdist(config_settings): """Invoke the optional get_requires_for_build_wheel hook Returns [] if the hook is not defined. """ backend = _build_backend() try: hook = backend.get_requires_for_build_sdist except AttributeError: return [] else: return hook(config_settings) class _DummyException(Exception): """Nothing should ever raise this exception""" class GotUnsupportedOperation(Exception): """For internal use when backend raises UnsupportedOperation""" def __init__(self, traceback): self.traceback = traceback def build_sdist(sdist_directory, config_settings): """Invoke the mandatory build_sdist hook.""" backend = _build_backend() try: return backend.build_sdist(sdist_directory, config_settings) except getattr(backend, 'UnsupportedOperation', _DummyException): raise GotUnsupportedOperation(traceback.format_exc()) HOOK_NAMES = { 'get_requires_for_build_wheel', 'prepare_metadata_for_build_wheel', 'build_wheel', 'get_requires_for_build_sdist', 'build_sdist', } def main(): if len(sys.argv) < 3: sys.exit("Needs args: hook_name, control_dir") hook_name = sys.argv[1] control_dir = sys.argv[2] if hook_name not in HOOK_NAMES: sys.exit("Unknown hook: %s" % hook_name) hook = globals()[hook_name] hook_input = read_json(pjoin(control_dir, 'input.json')) json_out = {'unsupported': False, 'return_val': None} try: json_out['return_val'] = hook(**hook_input['kwargs']) except BackendUnavailable as e: json_out['no_backend'] = True json_out['traceback'] = e.traceback except BackendInvalid as e: json_out['backend_invalid'] = True json_out['backend_error'] = e.message except GotUnsupportedOperation as e: json_out['unsupported'] = True json_out['traceback'] = e.traceback except HookMissing: json_out['hook_missing'] = True write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) if __name__ == '__main__': main()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/compat.py
"""Python 2/3 compatibility""" import json import sys # Handle reading and writing JSON in UTF-8, on Python 3 and 2. if sys.version_info[0] >= 3: # Python 3 def write_json(obj, path, **kwargs): with open(path, 'w', encoding='utf-8') as f: json.dump(obj, f, **kwargs) def read_json(path): with open(path, 'r', encoding='utf-8') as f: return json.load(f) else: # Python 2 def write_json(obj, path, **kwargs): with open(path, 'wb') as f: json.dump(obj, f, encoding='utf-8', **kwargs) def read_json(path): with open(path, 'rb') as f: return json.load(f) # FileNotFoundError try: FileNotFoundError = FileNotFoundError except NameError: FileNotFoundError = IOError
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/dirtools.py
import os import io import contextlib import tempfile import shutil import errno import zipfile @contextlib.contextmanager def tempdir(): """Create a temporary directory in a context manager.""" td = tempfile.mkdtemp() try: yield td finally: shutil.rmtree(td) def mkdir_p(*args, **kwargs): """Like `mkdir`, but does not raise an exception if the directory already exists. """ try: return os.mkdir(*args, **kwargs) except OSError as exc: if exc.errno != errno.EEXIST: raise def dir_to_zipfile(root): """Construct an in-memory zip file for a directory.""" buffer = io.BytesIO() zip_file = zipfile.ZipFile(buffer, 'w') for root, dirs, files in os.walk(root): for path in dirs: fs_path = os.path.join(root, path) rel_path = os.path.relpath(fs_path, root) zip_file.writestr(rel_path + '/', '') for path in files: fs_path = os.path.join(root, path) rel_path = os.path.relpath(fs_path, root) zip_file.write(fs_path, rel_path) return zip_file
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/check.py
"""Check a project and backend by attempting to build using PEP 517 hooks. """ import argparse import logging import os from os.path import isfile, join as pjoin from pip._vendor.toml import TomlDecodeError, load as toml_load import shutil from subprocess import CalledProcessError import sys import tarfile from tempfile import mkdtemp import zipfile from .colorlog import enable_colourful_output from .envbuild import BuildEnvironment from .wrappers import Pep517HookCaller log = logging.getLogger(__name__) def check_build_sdist(hooks, build_sys_requires): with BuildEnvironment() as env: try: env.pip_install(build_sys_requires) log.info('Installed static build dependencies') except CalledProcessError: log.error('Failed to install static build dependencies') return False try: reqs = hooks.get_requires_for_build_sdist({}) log.info('Got build requires: %s', reqs) except Exception: log.error('Failure in get_requires_for_build_sdist', exc_info=True) return False try: env.pip_install(reqs) log.info('Installed dynamic build dependencies') except CalledProcessError: log.error('Failed to install dynamic build dependencies') return False td = mkdtemp() log.info('Trying to build sdist in %s', td) try: try: filename = hooks.build_sdist(td, {}) log.info('build_sdist returned %r', filename) except Exception: log.info('Failure in build_sdist', exc_info=True) return False if not filename.endswith('.tar.gz'): log.error( "Filename %s doesn't have .tar.gz extension", filename) return False path = pjoin(td, filename) if isfile(path): log.info("Output file %s exists", path) else: log.error("Output file %s does not exist", path) return False if tarfile.is_tarfile(path): log.info("Output file is a tar file") else: log.error("Output file is not a tar file") return False finally: shutil.rmtree(td) return True def check_build_wheel(hooks, build_sys_requires): with BuildEnvironment() as env: try: env.pip_install(build_sys_requires) log.info('Installed static build dependencies') except CalledProcessError: log.error('Failed to install static build dependencies') return False try: reqs = hooks.get_requires_for_build_wheel({}) log.info('Got build requires: %s', reqs) except Exception: log.error('Failure in get_requires_for_build_sdist', exc_info=True) return False try: env.pip_install(reqs) log.info('Installed dynamic build dependencies') except CalledProcessError: log.error('Failed to install dynamic build dependencies') return False td = mkdtemp() log.info('Trying to build wheel in %s', td) try: try: filename = hooks.build_wheel(td, {}) log.info('build_wheel returned %r', filename) except Exception: log.info('Failure in build_wheel', exc_info=True) return False if not filename.endswith('.whl'): log.error("Filename %s doesn't have .whl extension", filename) return False path = pjoin(td, filename) if isfile(path): log.info("Output file %s exists", path) else: log.error("Output file %s does not exist", path) return False if zipfile.is_zipfile(path): log.info("Output file is a zip file") else: log.error("Output file is not a zip file") return False finally: shutil.rmtree(td) return True def check(source_dir): pyproject = pjoin(source_dir, 'pyproject.toml') if isfile(pyproject): log.info('Found pyproject.toml') else: log.error('Missing pyproject.toml') return False try: with open(pyproject) as f: pyproject_data = toml_load(f) # Ensure the mandatory data can be loaded buildsys = pyproject_data['build-system'] requires = buildsys['requires'] backend = buildsys['build-backend'] backend_path = buildsys.get('backend-path') log.info('Loaded pyproject.toml') except (TomlDecodeError, KeyError): log.error("Invalid pyproject.toml", exc_info=True) return False hooks = Pep517HookCaller(source_dir, backend, backend_path) sdist_ok = check_build_sdist(hooks, requires) wheel_ok = check_build_wheel(hooks, requires) if not sdist_ok: log.warning('Sdist checks failed; scroll up to see') if not wheel_ok: log.warning('Wheel checks failed') return sdist_ok def main(argv=None): ap = argparse.ArgumentParser() ap.add_argument( 'source_dir', help="A directory containing pyproject.toml") args = ap.parse_args(argv) enable_colourful_output() ok = check(args.source_dir) if ok: print(ansi('Checks passed', 'green')) else: print(ansi('Checks failed', 'red')) sys.exit(1) ansi_codes = { 'reset': '\x1b[0m', 'bold': '\x1b[1m', 'red': '\x1b[31m', 'green': '\x1b[32m', } def ansi(s, attr): if os.name != 'nt' and sys.stdout.isatty(): return ansi_codes[attr] + str(s) + ansi_codes['reset'] else: return str(s) if __name__ == '__main__': main()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/__init__.py
"""Wrappers to build Python packages using PEP 517 hooks """ __version__ = '0.8.2'
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/envbuild.py
"""Build wheels/sdists by installing build deps to a temporary environment. """ import os import logging from pip._vendor import toml import shutil from subprocess import check_call import sys from sysconfig import get_paths from tempfile import mkdtemp from .wrappers import Pep517HookCaller, LoggerWrapper log = logging.getLogger(__name__) def _load_pyproject(source_dir): with open(os.path.join(source_dir, 'pyproject.toml')) as f: pyproject_data = toml.load(f) buildsys = pyproject_data['build-system'] return ( buildsys['requires'], buildsys['build-backend'], buildsys.get('backend-path'), ) class BuildEnvironment(object): """Context manager to install build deps in a simple temporary environment Based on code I wrote for pip, which is MIT licensed. """ # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file) # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. path = None def __init__(self, cleanup=True): self._cleanup = cleanup def __enter__(self): self.path = mkdtemp(prefix='pep517-build-env-') log.info('Temporary build environment: %s', self.path) self.save_path = os.environ.get('PATH', None) self.save_pythonpath = os.environ.get('PYTHONPATH', None) install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' install_dirs = get_paths(install_scheme, vars={ 'base': self.path, 'platbase': self.path, }) scripts = install_dirs['scripts'] if self.save_path: os.environ['PATH'] = scripts + os.pathsep + self.save_path else: os.environ['PATH'] = scripts + os.pathsep + os.defpath if install_dirs['purelib'] == install_dirs['platlib']: lib_dirs = install_dirs['purelib'] else: lib_dirs = install_dirs['purelib'] + os.pathsep + \ install_dirs['platlib'] if self.save_pythonpath: os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ self.save_pythonpath else: os.environ['PYTHONPATH'] = lib_dirs return self def pip_install(self, reqs): """Install dependencies into this env by calling pip in a subprocess""" if not reqs: return log.info('Calling pip to install %s', reqs) cmd = [ sys.executable, '-m', 'pip', 'install', '--ignore-installed', '--prefix', self.path] + list(reqs) check_call( cmd, stdout=LoggerWrapper(log, logging.INFO), stderr=LoggerWrapper(log, logging.ERROR), ) def __exit__(self, exc_type, exc_val, exc_tb): needs_cleanup = ( self._cleanup and self.path is not None and os.path.isdir(self.path) ) if needs_cleanup: shutil.rmtree(self.path) if self.save_path is None: os.environ.pop('PATH', None) else: os.environ['PATH'] = self.save_path if self.save_pythonpath is None: os.environ.pop('PYTHONPATH', None) else: os.environ['PYTHONPATH'] = self.save_pythonpath def build_wheel(source_dir, wheel_dir, config_settings=None): """Build a wheel from a source directory using PEP 517 hooks. :param str source_dir: Source directory containing pyproject.toml :param str wheel_dir: Target directory to create wheel in :param dict config_settings: Options to pass to build backend This is a blocking function which will run pip in a subprocess to install build requirements. """ if config_settings is None: config_settings = {} requires, backend, backend_path = _load_pyproject(source_dir) hooks = Pep517HookCaller(source_dir, backend, backend_path) with BuildEnvironment() as env: env.pip_install(requires) reqs = hooks.get_requires_for_build_wheel(config_settings) env.pip_install(reqs) return hooks.build_wheel(wheel_dir, config_settings) def build_sdist(source_dir, sdist_dir, config_settings=None): """Build an sdist from a source directory using PEP 517 hooks. :param str source_dir: Source directory containing pyproject.toml :param str sdist_dir: Target directory to place sdist in :param dict config_settings: Options to pass to build backend This is a blocking function which will run pip in a subprocess to install build requirements. """ if config_settings is None: config_settings = {} requires, backend, backend_path = _load_pyproject(source_dir) hooks = Pep517HookCaller(source_dir, backend, backend_path) with BuildEnvironment() as env: env.pip_install(requires) reqs = hooks.get_requires_for_build_sdist(config_settings) env.pip_install(reqs) return hooks.build_sdist(sdist_dir, config_settings)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/wrappers.py
import threading from contextlib import contextmanager import os from os.path import dirname, abspath, join as pjoin import shutil from subprocess import check_call, check_output, STDOUT import sys from tempfile import mkdtemp from . import compat try: import importlib.resources as resources def _in_proc_script_path(): return resources.path(__package__, '_in_process.py') except ImportError: @contextmanager def _in_proc_script_path(): yield pjoin(dirname(abspath(__file__)), '_in_process.py') @contextmanager def tempdir(): td = mkdtemp() try: yield td finally: shutil.rmtree(td) class BackendUnavailable(Exception): """Will be raised if the backend cannot be imported in the hook process.""" def __init__(self, traceback): self.traceback = traceback class BackendInvalid(Exception): """Will be raised if the backend is invalid.""" def __init__(self, backend_name, backend_path, message): self.backend_name = backend_name self.backend_path = backend_path self.message = message class HookMissing(Exception): """Will be raised on missing hooks.""" def __init__(self, hook_name): super(HookMissing, self).__init__(hook_name) self.hook_name = hook_name class UnsupportedOperation(Exception): """May be raised by build_sdist if the backend indicates that it can't.""" def __init__(self, traceback): self.traceback = traceback def default_subprocess_runner(cmd, cwd=None, extra_environ=None): """The default method of calling the wrapper subprocess.""" env = os.environ.copy() if extra_environ: env.update(extra_environ) check_call(cmd, cwd=cwd, env=env) def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None): """A method of calling the wrapper subprocess while suppressing output.""" env = os.environ.copy() if extra_environ: env.update(extra_environ) check_output(cmd, cwd=cwd, env=env, stderr=STDOUT) def norm_and_check(source_tree, requested): """Normalise and check a backend path. Ensure that the requested backend path is specified as a relative path, and resolves to a location under the given source tree. Return an absolute version of the requested path. """ if os.path.isabs(requested): raise ValueError("paths must be relative") abs_source = os.path.abspath(source_tree) abs_requested = os.path.normpath(os.path.join(abs_source, requested)) # We have to use commonprefix for Python 2.7 compatibility. So we # normalise case to avoid problems because commonprefix is a character # based comparison :-( norm_source = os.path.normcase(abs_source) norm_requested = os.path.normcase(abs_requested) if os.path.commonprefix([norm_source, norm_requested]) != norm_source: raise ValueError("paths must be inside source tree") return abs_requested class Pep517HookCaller(object): """A wrapper around a source directory to be built with a PEP 517 backend. source_dir : The path to the source directory, containing pyproject.toml. build_backend : The build backend spec, as per PEP 517, from pyproject.toml. backend_path : The backend path, as per PEP 517, from pyproject.toml. runner : A callable that invokes the wrapper subprocess. The 'runner', if provided, must expect the following: cmd : a list of strings representing the command and arguments to execute, as would be passed to e.g. 'subprocess.check_call'. cwd : a string representing the working directory that must be used for the subprocess. Corresponds to the provided source_dir. extra_environ : a dict mapping environment variable names to values which must be set for the subprocess execution. """ def __init__( self, source_dir, build_backend, backend_path=None, runner=None, ): if runner is None: runner = default_subprocess_runner self.source_dir = abspath(source_dir) self.build_backend = build_backend if backend_path: backend_path = [ norm_and_check(self.source_dir, p) for p in backend_path ] self.backend_path = backend_path self._subprocess_runner = runner @contextmanager def subprocess_runner(self, runner): """A context manager for temporarily overriding the default subprocess runner. """ prev = self._subprocess_runner self._subprocess_runner = runner try: yield finally: self._subprocess_runner = prev def get_requires_for_build_wheel(self, config_settings=None): """Identify packages required for building a wheel Returns a list of dependency specifications, e.g.: ["wheel >= 0.25", "setuptools"] This does not include requirements specified in pyproject.toml. It returns the result of calling the equivalently named hook in a subprocess. """ return self._call_hook('get_requires_for_build_wheel', { 'config_settings': config_settings }) def prepare_metadata_for_build_wheel( self, metadata_directory, config_settings=None, _allow_fallback=True): """Prepare a *.dist-info folder with metadata for this project. Returns the name of the newly created folder. If the build backend defines a hook with this name, it will be called in a subprocess. If not, the backend will be asked to build a wheel, and the dist-info extracted from that (unless _allow_fallback is False). """ return self._call_hook('prepare_metadata_for_build_wheel', { 'metadata_directory': abspath(metadata_directory), 'config_settings': config_settings, '_allow_fallback': _allow_fallback, }) def build_wheel( self, wheel_directory, config_settings=None, metadata_directory=None): """Build a wheel from this project. Returns the name of the newly created file. In general, this will call the 'build_wheel' hook in the backend. However, if that was previously called by 'prepare_metadata_for_build_wheel', and the same metadata_directory is used, the previously built wheel will be copied to wheel_directory. """ if metadata_directory is not None: metadata_directory = abspath(metadata_directory) return self._call_hook('build_wheel', { 'wheel_directory': abspath(wheel_directory), 'config_settings': config_settings, 'metadata_directory': metadata_directory, }) def get_requires_for_build_sdist(self, config_settings=None): """Identify packages required for building a wheel Returns a list of dependency specifications, e.g.: ["setuptools >= 26"] This does not include requirements specified in pyproject.toml. It returns the result of calling the equivalently named hook in a subprocess. """ return self._call_hook('get_requires_for_build_sdist', { 'config_settings': config_settings }) def build_sdist(self, sdist_directory, config_settings=None): """Build an sdist from this project. Returns the name of the newly created file. This calls the 'build_sdist' backend hook in a subprocess. """ return self._call_hook('build_sdist', { 'sdist_directory': abspath(sdist_directory), 'config_settings': config_settings, }) def _call_hook(self, hook_name, kwargs): # On Python 2, pytoml returns Unicode values (which is correct) but the # environment passed to check_call needs to contain string values. We # convert here by encoding using ASCII (the backend can only contain # letters, digits and _, . and : characters, and will be used as a # Python identifier, so non-ASCII content is wrong on Python 2 in # any case). # For backend_path, we use sys.getfilesystemencoding. if sys.version_info[0] == 2: build_backend = self.build_backend.encode('ASCII') else: build_backend = self.build_backend extra_environ = {'PEP517_BUILD_BACKEND': build_backend} if self.backend_path: backend_path = os.pathsep.join(self.backend_path) if sys.version_info[0] == 2: backend_path = backend_path.encode(sys.getfilesystemencoding()) extra_environ['PEP517_BACKEND_PATH'] = backend_path with tempdir() as td: hook_input = {'kwargs': kwargs} compat.write_json(hook_input, pjoin(td, 'input.json'), indent=2) # Run the hook in a subprocess with _in_proc_script_path() as script: self._subprocess_runner( [sys.executable, str(script), hook_name, td], cwd=self.source_dir, extra_environ=extra_environ ) data = compat.read_json(pjoin(td, 'output.json')) if data.get('unsupported'): raise UnsupportedOperation(data.get('traceback', '')) if data.get('no_backend'): raise BackendUnavailable(data.get('traceback', '')) if data.get('backend_invalid'): raise BackendInvalid( backend_name=self.build_backend, backend_path=self.backend_path, message=data.get('backend_error', '') ) if data.get('hook_missing'): raise HookMissing(hook_name) return data['return_val'] class LoggerWrapper(threading.Thread): """ Read messages from a pipe and redirect them to a logger (see python's logging module). """ def __init__(self, logger, level): threading.Thread.__init__(self) self.daemon = True self.logger = logger self.level = level # create the pipe and reader self.fd_read, self.fd_write = os.pipe() self.reader = os.fdopen(self.fd_read) self.start() def fileno(self): return self.fd_write @staticmethod def remove_newline(msg): return msg[:-1] if msg.endswith(os.linesep) else msg def run(self): for line in self.reader: self._write(self.remove_newline(line)) def _write(self, message): self.logger.log(self.level, message)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/colorlog.py
"""Nicer log formatting with colours. Code copied from Tornado, Apache licensed. """ # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys try: import curses except ImportError: curses = None def _stderr_supports_color(): color = False if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): try: curses.setupterm() if curses.tigetnum("colors") > 0: color = True except Exception: pass return color class LogFormatter(logging.Formatter): """Log formatter with colour support """ DEFAULT_COLORS = { logging.INFO: 2, # Green logging.WARNING: 3, # Yellow logging.ERROR: 1, # Red logging.CRITICAL: 1, } def __init__(self, color=True, datefmt=None): r""" :arg bool color: Enables color support. :arg string fmt: Log message format. It will be applied to the attributes dict of log records. The text between ``%(color)s`` and ``%(end_color)s`` will be colored depending on the level if color support is on. :arg dict colors: color mappings from logging level to terminal color code :arg string datefmt: Datetime format. Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. .. versionchanged:: 3.2 Added ``fmt`` and ``datefmt`` arguments. """ logging.Formatter.__init__(self, datefmt=datefmt) self._colors = {} if color and _stderr_supports_color(): # The curses module has some str/bytes confusion in # python3. Until version 3.2.3, most methods return # bytes, but only accept strings. In addition, we want to # output these strings with the logging module, which # works with unicode strings. The explicit calls to # unicode() below are harmless in python2 but will do the # right conversion in python 3. fg_color = (curses.tigetstr("setaf") or curses.tigetstr("setf") or "") if (3, 0) < sys.version_info < (3, 2, 3): fg_color = str(fg_color, "ascii") for levelno, code in self.DEFAULT_COLORS.items(): self._colors[levelno] = str( curses.tparm(fg_color, code), "ascii") self._normal = str(curses.tigetstr("sgr0"), "ascii") scr = curses.initscr() self.termwidth = scr.getmaxyx()[1] curses.endwin() else: self._normal = '' # Default width is usually 80, but too wide is # worse than too narrow self.termwidth = 70 def formatMessage(self, record): mlen = len(record.message) right_text = '{initial}-{name}'.format(initial=record.levelname[0], name=record.name) if mlen + len(right_text) < self.termwidth: space = ' ' * (self.termwidth - (mlen + len(right_text))) else: space = ' ' if record.levelno in self._colors: start_color = self._colors[record.levelno] end_color = self._normal else: start_color = end_color = '' return record.message + space + start_color + right_text + end_color def enable_colourful_output(level=logging.INFO): handler = logging.StreamHandler() handler.setFormatter(LogFormatter()) logging.root.addHandler(handler) logging.root.setLevel(level)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/pep517/meta.py
"""Build metadata for a project using PEP 517 hooks. """ import argparse import logging import os import shutil import functools try: import importlib.metadata as imp_meta except ImportError: import importlib_metadata as imp_meta try: from zipfile import Path except ImportError: from zipp import Path from .envbuild import BuildEnvironment from .wrappers import Pep517HookCaller, quiet_subprocess_runner from .dirtools import tempdir, mkdir_p, dir_to_zipfile from .build import validate_system, load_system, compat_system log = logging.getLogger(__name__) def _prep_meta(hooks, env, dest): reqs = hooks.get_requires_for_build_wheel({}) log.info('Got build requires: %s', reqs) env.pip_install(reqs) log.info('Installed dynamic build dependencies') with tempdir() as td: log.info('Trying to build metadata in %s', td) filename = hooks.prepare_metadata_for_build_wheel(td, {}) source = os.path.join(td, filename) shutil.move(source, os.path.join(dest, os.path.basename(filename))) def build(source_dir='.', dest=None, system=None): system = system or load_system(source_dir) dest = os.path.join(source_dir, dest or 'dist') mkdir_p(dest) validate_system(system) hooks = Pep517HookCaller( source_dir, system['build-backend'], system.get('backend-path') ) with hooks.subprocess_runner(quiet_subprocess_runner): with BuildEnvironment() as env: env.pip_install(system['requires']) _prep_meta(hooks, env, dest) def build_as_zip(builder=build): with tempdir() as out_dir: builder(dest=out_dir) return dir_to_zipfile(out_dir) def load(root): """ Given a source directory (root) of a package, return an importlib.metadata.Distribution object with metadata build from that package. """ root = os.path.expanduser(root) system = compat_system(root) builder = functools.partial(build, source_dir=root, system=system) path = Path(build_as_zip(builder)) return imp_meta.PathDistribution(path) parser = argparse.ArgumentParser() parser.add_argument( 'source_dir', help="A directory containing pyproject.toml", ) parser.add_argument( '--out-dir', '-o', help="Destination in which to save the builds relative to source dir", ) def main(): args = parser.parse_args() build(args.source_dir, args.out_dir) if __name__ == '__main__': main()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/locators.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2015 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import gzip from io import BytesIO import json import logging import os import posixpath import re try: import threading except ImportError: # pragma: no cover import dummy_threading as threading import zlib from . import DistlibException from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, queue, quote, unescape, string_types, build_opener, HTTPRedirectHandler as BaseRedirectHandler, text_type, Request, HTTPError, URLError) from .database import Distribution, DistributionPath, make_dist from .metadata import Metadata, MetadataInvalidError from .util import (cached_property, parse_credentials, ensure_slash, split_filename, get_project_data, parse_requirement, parse_name_and_version, ServerProxy, normalize_name) from .version import get_scheme, UnsupportedVersionError from .wheel import Wheel, is_compatible logger = logging.getLogger(__name__) HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)') CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') DEFAULT_INDEX = 'https://pypi.org/pypi' def get_all_distribution_names(url=None): """ Return all distribution names known by an index. :param url: The URL of the index. :return: A list of all known distribution names. """ if url is None: url = DEFAULT_INDEX client = ServerProxy(url, timeout=3.0) try: return client.list_packages() finally: client('close')() class RedirectHandler(BaseRedirectHandler): """ A class to work around a bug in some Python 3.2.x releases. """ # There's a bug in the base version for some 3.2.x # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header # returns e.g. /abc, it bails because it says the scheme '' # is bogus, when actually it should use the request's # URL for the scheme. See Python issue #13696. def http_error_302(self, req, fp, code, msg, headers): # Some servers (incorrectly) return multiple Location headers # (so probably same goes for URI). Use first header. newurl = None for key in ('location', 'uri'): if key in headers: newurl = headers[key] break if newurl is None: # pragma: no cover return urlparts = urlparse(newurl) if urlparts.scheme == '': newurl = urljoin(req.get_full_url(), newurl) if hasattr(headers, 'replace_header'): headers.replace_header(key, newurl) else: headers[key] = newurl return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, headers) http_error_301 = http_error_303 = http_error_307 = http_error_302 class Locator(object): """ A base class for locators - things that locate distributions. """ source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') binary_extensions = ('.egg', '.exe', '.whl') excluded_extensions = ('.pdf',) # A list of tags indicating which wheels you want to match. The default # value of None matches against the tags compatible with the running # Python. If you want to match other values, set wheel_tags on a locator # instance to a list of tuples (pyver, abi, arch) which you want to match. wheel_tags = None downloadable_extensions = source_extensions + ('.whl',) def __init__(self, scheme='default'): """ Initialise an instance. :param scheme: Because locators look for most recent versions, they need to know the version scheme to use. This specifies the current PEP-recommended scheme - use ``'legacy'`` if you need to support existing distributions on PyPI. """ self._cache = {} self.scheme = scheme # Because of bugs in some of the handlers on some of the platforms, # we use our own opener rather than just using urlopen. self.opener = build_opener(RedirectHandler()) # If get_project() is called from locate(), the matcher instance # is set from the requirement passed to locate(). See issue #18 for # why this can be useful to know. self.matcher = None self.errors = queue.Queue() def get_errors(self): """ Return any errors which have occurred. """ result = [] while not self.errors.empty(): # pragma: no cover try: e = self.errors.get(False) result.append(e) except self.errors.Empty: continue self.errors.task_done() return result def clear_errors(self): """ Clear any errors which may have been logged. """ # Just get the errors and throw them away self.get_errors() def clear_cache(self): self._cache.clear() def _get_scheme(self): return self._scheme def _set_scheme(self, value): self._scheme = value scheme = property(_get_scheme, _set_scheme) def _get_project(self, name): """ For a given project, get a dictionary mapping available versions to Distribution instances. This should be implemented in subclasses. If called from a locate() request, self.matcher will be set to a matcher for the requirement to satisfy, otherwise it will be None. """ raise NotImplementedError('Please implement in the subclass') def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Please implement in the subclass') def get_project(self, name): """ For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top. """ if self._cache is None: # pragma: no cover result = self._get_project(name) elif name in self._cache: result = self._cache[name] else: self.clear_errors() result = self._get_project(name) self._cache[name] = result return result def score_url(self, url): """ Give an url a score which can be used to choose preferred URLs for a given project release. """ t = urlparse(url) basename = posixpath.basename(t.path) compatible = True is_wheel = basename.endswith('.whl') is_downloadable = basename.endswith(self.downloadable_extensions) if is_wheel: compatible = is_compatible(Wheel(basename), self.wheel_tags) return (t.scheme == 'https', 'pypi.org' in t.netloc, is_downloadable, is_wheel, compatible, basename) def prefer_url(self, url1, url2): """ Choose one of two URLs where both are candidates for distribution archives for the same version of a distribution (for example, .tar.gz vs. zip). The current implementation favours https:// URLs over http://, archives from PyPI over those from other locations, wheel compatibility (if a wheel) and then the archive name. """ result = url2 if url1: s1 = self.score_url(url1) s2 = self.score_url(url2) if s1 > s2: result = url1 if result != url2: logger.debug('Not replacing %r with %r', url1, url2) else: logger.debug('Replacing %r with %r', url1, url2) return result def split_filename(self, filename, project_name): """ Attempt to split a filename in project name, version and Python version. """ return split_filename(filename, project_name) def convert_url_to_download_info(self, url, project_name): """ See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned. """ def same_project(name1, name2): return normalize_name(name1) == normalize_name(name2) result = None scheme, netloc, path, params, query, frag = urlparse(url) if frag.lower().startswith('egg='): # pragma: no cover logger.debug('%s: version hint in fragment: %r', project_name, frag) m = HASHER_HASH.match(frag) if m: algo, digest = m.groups() else: algo, digest = None, None origpath = path if path and path[-1] == '/': # pragma: no cover path = path[:-1] if path.endswith('.whl'): try: wheel = Wheel(path) if not is_compatible(wheel, self.wheel_tags): logger.debug('Wheel not compatible: %s', path) else: if project_name is None: include = True else: include = same_project(wheel.name, project_name) if include: result = { 'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), 'python-version': ', '.join( ['.'.join(list(v[2:])) for v in wheel.pyver]), } except Exception as e: # pragma: no cover logger.warning('invalid path for wheel: %s', path) elif not path.endswith(self.downloadable_extensions): # pragma: no cover logger.debug('Not downloadable: %s', path) else: # downloadable extension path = filename = posixpath.basename(path) for ext in self.downloadable_extensions: if path.endswith(ext): path = path[:-len(ext)] t = self.split_filename(path, project_name) if not t: # pragma: no cover logger.debug('No match for project/version: %s', path) else: name, version, pyver = t if not project_name or same_project(project_name, name): result = { 'name': name, 'version': version, 'filename': filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), #'packagetype': 'sdist', } if pyver: # pragma: no cover result['python-version'] = pyver break if result and algo: result['%s_digest' % algo] = digest return result def _get_digest(self, info): """ Get a digest from a dictionary by looking at a "digests" dictionary or keys of the form 'algo_digest'. Returns a 2-tuple (algo, digest) if found, else None. Currently looks only for SHA256, then MD5. """ result = None if 'digests' in info: digests = info['digests'] for algo in ('sha256', 'md5'): if algo in digests: result = (algo, digests[algo]) break if not result: for algo in ('sha256', 'md5'): key = '%s_digest' % algo if key in info: result = (algo, info[key]) break return result def _update_version_data(self, result, info): """ Update a result dictionary (the final result from _get_project) with a dictionary for a specific version, which typically holds information gleaned from a filename or URL for an archive for the distribution. """ name = info.pop('name') version = info.pop('version') if version in result: dist = result[version] md = dist.metadata else: dist = make_dist(name, version, scheme=self.scheme) md = dist.metadata dist.digest = digest = self._get_digest(info) url = info['url'] result['digests'][url] = digest if md.source_url != info['url']: md.source_url = self.prefer_url(md.source_url, url) result['urls'].setdefault(version, set()).add(url) dist.locator = self result[version] = dist def locate(self, requirement, prereleases=False): """ Find the most recent distribution which matches the given requirement. :param requirement: A requirement of the form 'foo (1.0)' or perhaps 'foo (>= 1.0, < 2.0, != 1.3)' :param prereleases: If ``True``, allow pre-release versions to be located. Otherwise, pre-release versions are not returned. :return: A :class:`Distribution` instance, or ``None`` if no such distribution could be located. """ result = None r = parse_requirement(requirement) if r is None: # pragma: no cover raise DistlibException('Not a valid requirement: %r' % requirement) scheme = get_scheme(self.scheme) self.matcher = matcher = scheme.matcher(r.requirement) logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) versions = self.get_project(r.name) if len(versions) > 2: # urls and digests keys are present # sometimes, versions are invalid slist = [] vcls = matcher.version_class for k in versions: if k in ('urls', 'digests'): continue try: if not matcher.match(k): logger.debug('%s did not match %r', matcher, k) else: if prereleases or not vcls(k).is_prerelease: slist.append(k) else: logger.debug('skipping pre-release ' 'version %s of %s', k, matcher.name) except Exception: # pragma: no cover logger.warning('error matching %s with %r', matcher, k) pass # slist.append(k) if len(slist) > 1: slist = sorted(slist, key=scheme.key) if slist: logger.debug('sorted list: %s', slist) version = slist[-1] result = versions[version] if result: if r.extras: result.extras = r.extras result.download_urls = versions.get('urls', {}).get(version, set()) d = {} sd = versions.get('digests', {}) for url in result.download_urls: if url in sd: # pragma: no cover d[url] = sd[url] result.digests = d self.matcher = None return result class PyPIRPCLocator(Locator): """ This locator uses XML-RPC to locate distributions. It therefore cannot be used with simple mirrors (that only mirror file content). """ def __init__(self, url, **kwargs): """ Initialise an instance. :param url: The URL to use for XML-RPC. :param kwargs: Passed to the superclass constructor. """ super(PyPIRPCLocator, self).__init__(**kwargs) self.base_url = url self.client = ServerProxy(url, timeout=3.0) def get_distribution_names(self): """ Return all the distribution names known to this locator. """ return set(self.client.list_packages()) def _get_project(self, name): result = {'urls': {}, 'digests': {}} versions = self.client.package_releases(name, True) for v in versions: urls = self.client.release_urls(name, v) data = self.client.release_data(name, v) metadata = Metadata(scheme=self.scheme) metadata.name = data['name'] metadata.version = data['version'] metadata.license = data.get('license') metadata.keywords = data.get('keywords', []) metadata.summary = data.get('summary') dist = Distribution(metadata) if urls: info = urls[0] metadata.source_url = info['url'] dist.digest = self._get_digest(info) dist.locator = self result[v] = dist for info in urls: url = info['url'] digest = self._get_digest(info) result['urls'].setdefault(v, set()).add(url) result['digests'][url] = digest return result class PyPIJSONLocator(Locator): """ This locator uses PyPI's JSON interface. It's very limited in functionality and probably not worth using. """ def __init__(self, url, **kwargs): super(PyPIJSONLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator') def _get_project(self, name): result = {'urls': {}, 'digests': {}} url = urljoin(self.base_url, '%s/json' % quote(name)) try: resp = self.opener.open(url) data = resp.read().decode() # for now d = json.loads(data) md = Metadata(scheme=self.scheme) data = d['info'] md.name = data['name'] md.version = data['version'] md.license = data.get('license') md.keywords = data.get('keywords', []) md.summary = data.get('summary') dist = Distribution(md) dist.locator = self urls = d['urls'] result[md.version] = dist for info in d['urls']: url = info['url'] dist.download_urls.add(url) dist.digests[url] = self._get_digest(info) result['urls'].setdefault(md.version, set()).add(url) result['digests'][url] = self._get_digest(info) # Now get other releases for version, infos in d['releases'].items(): if version == md.version: continue # already done omd = Metadata(scheme=self.scheme) omd.name = md.name omd.version = version odist = Distribution(omd) odist.locator = self result[version] = odist for info in infos: url = info['url'] odist.download_urls.add(url) odist.digests[url] = self._get_digest(info) result['urls'].setdefault(version, set()).add(url) result['digests'][url] = self._get_digest(info) # for info in urls: # md.source_url = info['url'] # dist.digest = self._get_digest(info) # dist.locator = self # for info in urls: # url = info['url'] # result['urls'].setdefault(md.version, set()).add(url) # result['digests'][url] = self._get_digest(info) except Exception as e: self.errors.put(text_type(e)) logger.exception('JSON fetch failed: %s', e) return result class Page(object): """ This class represents a scraped HTML page. """ # The following slightly hairy-looking regex just looks for the contents of # an anchor link, which has an attribute "href" either immediately preceded # or immediately followed by a "rel" attribute. The attribute values can be # declared with double quotes, single quotes or no quotes - which leads to # the length of the expression. _href = re.compile(""" (rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)? href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*)) (\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))? """, re.I | re.S | re.X) _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S) def __init__(self, data, url): """ Initialise an instance with the Unicode page contents and the URL they came from. """ self.data = data self.base_url = self.url = url m = self._base.search(self.data) if m: self.base_url = m.group(1) _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) @cached_property def links(self): """ Return the URLs of all the links on a page together with information about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping. """ def clean(url): "Tidy up an URL." scheme, netloc, path, params, query, frag = urlparse(url) return urlunparse((scheme, netloc, quote(path), params, query, frag)) result = set() for match in self._href.finditer(self.data): d = match.groupdict('') rel = (d['rel1'] or d['rel2'] or d['rel3'] or d['rel4'] or d['rel5'] or d['rel6']) url = d['url1'] or d['url2'] or d['url3'] url = urljoin(self.base_url, url) url = unescape(url) url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) result.add((url, rel)) # We sort the result, hoping to bring the most recent versions # to the front result = sorted(result, key=lambda t: t[0], reverse=True) return result class SimpleScrapingLocator(Locator): """ A locator which scrapes HTML pages to locate downloads for a distribution. This runs multiple threads to do the I/O; performance is at least as good as pip's PackageFinder, which works in an analogous fashion. """ # These are used to deal with various Content-Encoding schemes. decoders = { 'deflate': zlib.decompress, 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), 'none': lambda b: b, } def __init__(self, url, timeout=None, num_workers=10, **kwargs): """ Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass. """ super(SimpleScrapingLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) self.timeout = timeout self._page_cache = {} self._seen = set() self._to_fetch = queue.Queue() self._bad_hosts = set() self.skip_externals = False self.num_workers = num_workers self._lock = threading.RLock() # See issue #45: we need to be resilient when the locator is used # in a thread, e.g. with concurrent.futures. We can't use self._lock # as it is for coordinating our internal threads - the ones created # in _prepare_threads. self._gplock = threading.RLock() self.platform_check = False # See issue #112 def _prepare_threads(self): """ Threads are created only when get_project is called, and terminate before it returns. They are there primarily to parallelise I/O (i.e. fetching web pages). """ self._threads = [] for i in range(self.num_workers): t = threading.Thread(target=self._fetch) t.setDaemon(True) t.start() self._threads.append(t) def _wait_threads(self): """ Tell all the threads to terminate (by sending a sentinel value) and wait for them to do so. """ # Note that you need two loops, since you can't say which # thread will get each sentinel for t in self._threads: self._to_fetch.put(None) # sentinel for t in self._threads: t.join() self._threads = [] def _get_project(self, name): result = {'urls': {}, 'digests': {}} with self._gplock: self.result = result self.project_name = name url = urljoin(self.base_url, '%s/' % quote(name)) self._seen.clear() self._page_cache.clear() self._prepare_threads() try: logger.debug('Queueing %s', url) self._to_fetch.put(url) self._to_fetch.join() finally: self._wait_threads() del self.result return result platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|' r'win(32|_amd64)|macosx_?\d+)\b', re.I) def _is_platform_dependent(self, url): """ Does an URL refer to a platform-specific download? """ return self.platform_dependent.search(url) def _process_download(self, url): """ See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value. """ if self.platform_check and self._is_platform_dependent(url): info = None else: info = self.convert_url_to_download_info(url, self.project_name) logger.debug('process_download: %s -> %s', url, info) if info: with self._lock: # needed because self.result is shared self._update_version_data(self.result, info) return info def _should_queue(self, link, referrer, rel): """ Determine whether a link URL from a referring page and with a particular "rel" attribute should be queued for scraping. """ scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False elif not referrer.startswith(self.base_url): result = False elif rel not in ('homepage', 'download'): result = False elif scheme not in ('http', 'https', 'ftp'): result = False elif self._is_platform_dependent(link): result = False else: host = netloc.split(':', 1)[0] if host.lower() == 'localhost': result = False else: result = True logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result def _fetch(self): """ Get a URL to fetch from the work queue, get the HTML page, examine its links for download candidates and candidates for further scraping. This is a handy method to run in a thread. """ while True: url = self._to_fetch.get() try: if url: page = self.get_page(url) if page is None: # e.g. after an error continue for link, rel in page.links: if link not in self._seen: try: self._seen.add(link) if (not self._process_download(link) and self._should_queue(link, url, rel)): logger.debug('Queueing %s from %s', link, url) self._to_fetch.put(link) except MetadataInvalidError: # e.g. invalid versions pass except Exception as e: # pragma: no cover self.errors.put(text_type(e)) finally: # always do this, to avoid hangs :-) self._to_fetch.task_done() if not url: #logger.debug('Sentinel seen, quitting.') break def get_page(self, url): """ Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator). """ # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: # pragma: no cover data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result _distname_re = re.compile('<a href=[^>]*>([^<]+)<') def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() page = self.get_page(self.base_url) if not page: raise DistlibException('Unable to get %s' % self.base_url) for match in self._distname_re.finditer(page.data): result.add(match.group(1)) return result class DirectoryLocator(Locator): """ This class locates distributions in a directory tree. """ def __init__(self, path, **kwargs): """ Initialise an instance. :param path: The root of the directory tree to search. :param kwargs: Passed to the superclass constructor, except for: * recursive - if True (the default), subdirectories are recursed into. If False, only the top-level directory is searched, """ self.recursive = kwargs.pop('recursive', True) super(DirectoryLocator, self).__init__(**kwargs) path = os.path.abspath(path) if not os.path.isdir(path): # pragma: no cover raise DistlibException('Not a directory: %r' % path) self.base_dir = path def should_include(self, filename, parent): """ Should a filename be considered as a candidate for a distribution archive? As well as the filename, the directory which contains it is provided, though not used by the current implementation. """ return filename.endswith(self.downloadable_extensions) def _get_project(self, name): result = {'urls': {}, 'digests': {}} for root, dirs, files in os.walk(self.base_dir): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, name) if info: self._update_version_data(result, info) if not self.recursive: break return result def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() for root, dirs, files in os.walk(self.base_dir): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, None) if info: result.add(info['name']) if not self.recursive: break return result class JSONLocator(Locator): """ This locator uses special extended metadata (not available on PyPI) and is the basis of performant dependency resolution in distlib. Other locators require archive downloads before dependencies can be determined! As you might imagine, that can be slow. """ def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator') def _get_project(self, name): result = {'urls': {}, 'digests': {}} data = get_project_data(name) if data: for info in data.get('files', []): if info['ptype'] != 'sdist' or info['pyversion'] != 'source': continue # We don't store summary in project metadata as it makes # the data bigger for no benefit during dependency # resolution dist = make_dist(data['name'], info['version'], summary=data.get('summary', 'Placeholder for summary'), scheme=self.scheme) md = dist.metadata md.source_url = info['url'] # TODO SHA256 digest if 'digest' in info and info['digest']: dist.digest = ('md5', info['digest']) md.dependencies = info.get('requirements', {}) dist.exports = info.get('exports', {}) result[dist.version] = dist result['urls'].setdefault(dist.version, set()).add(info['url']) return result class DistPathLocator(Locator): """ This locator finds installed distributions in a path. It can be useful for adding to an :class:`AggregatingLocator`. """ def __init__(self, distpath, **kwargs): """ Initialise an instance. :param distpath: A :class:`DistributionPath` instance to search. """ super(DistPathLocator, self).__init__(**kwargs) assert isinstance(distpath, DistributionPath) self.distpath = distpath def _get_project(self, name): dist = self.distpath.get_distribution(name) if dist is None: result = {'urls': {}, 'digests': {}} else: result = { dist.version: dist, 'urls': {dist.version: set([dist.source_url])}, 'digests': {dist.version: set([None])} } return result class AggregatingLocator(Locator): """ This class allows you to chain and/or merge a list of locators. """ def __init__(self, *locators, **kwargs): """ Initialise an instance. :param locators: The list of locators to search. :param kwargs: Passed to the superclass constructor, except for: * merge - if False (the default), the first successful search from any of the locators is returned. If True, the results from all locators are merged (this can be slow). """ self.merge = kwargs.pop('merge', False) self.locators = locators super(AggregatingLocator, self).__init__(**kwargs) def clear_cache(self): super(AggregatingLocator, self).clear_cache() for locator in self.locators: locator.clear_cache() def _set_scheme(self, value): self._scheme = value for locator in self.locators: locator.scheme = value scheme = property(Locator.scheme.fget, _set_scheme) def _get_project(self, name): result = {} for locator in self.locators: d = locator.get_project(name) if d: if self.merge: files = result.get('urls', {}) digests = result.get('digests', {}) # next line could overwrite result['urls'], result['digests'] result.update(d) df = result.get('urls') if files and df: for k, v in files.items(): if k in df: df[k] |= v else: df[k] = v dd = result.get('digests') if digests and dd: dd.update(digests) else: # See issue #18. If any dists are found and we're looking # for specific constraints, we only return something if # a match is found. For example, if a DirectoryLocator # returns just foo (1.0) while we're looking for # foo (>= 2.0), we'll pretend there was nothing there so # that subsequent locators can be queried. Otherwise we # would just return foo (1.0) which would then lead to a # failure to find foo (>= 2.0), because other locators # weren't searched. Note that this only matters when # merge=False. if self.matcher is None: found = True else: found = False for k in d: if self.matcher.match(k): found = True break if found: result = d break return result def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() for locator in self.locators: try: result |= locator.get_distribution_names() except NotImplementedError: pass return result # We use a legacy scheme simply because most of the dists on PyPI use legacy # versions which don't conform to PEP 426 / PEP 440. default_locator = AggregatingLocator( JSONLocator(), SimpleScrapingLocator('https://pypi.org/simple/', timeout=3.0), scheme='legacy') locate = default_locator.locate NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*' r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$') class DependencyFinder(object): """ Locate dependencies for distributions. """ def __init__(self, locator=None): """ Initialise an instance, using the specified locator to locate distributions. """ self.locator = locator or default_locator self.scheme = get_scheme(self.locator.scheme) def add_distribution(self, dist): """ Add a distribution to the finder. This will update internal information about who provides what. :param dist: The distribution to add. """ logger.debug('adding distribution %s', dist) name = dist.key self.dists_by_name[name] = dist self.dists[(name, dist.version)] = dist for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Add to provided: %s, %s, %s', name, version, dist) self.provided.setdefault(name, set()).add((version, dist)) def remove_distribution(self, dist): """ Remove a distribution from the finder. This will update internal information about who provides what. :param dist: The distribution to remove. """ logger.debug('removing distribution %s', dist) name = dist.key del self.dists_by_name[name] del self.dists[(name, dist.version)] for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Remove from provided: %s, %s, %s', name, version, dist) s = self.provided[name] s.remove((version, dist)) if not s: del self.provided[name] def get_matcher(self, reqt): """ Get a version matcher for a requirement. :param reqt: The requirement :type reqt: str :return: A version matcher (an instance of :class:`distlib.version.Matcher`). """ try: matcher = self.scheme.matcher(reqt) except UnsupportedVersionError: # pragma: no cover # XXX compat-mode if cannot read the version name = reqt.split()[0] matcher = self.scheme.matcher(name) return matcher def find_providers(self, reqt): """ Find the distributions which can fulfill a requirement. :param reqt: The requirement. :type reqt: str :return: A set of distribution which can fulfill the requirement. """ matcher = self.get_matcher(reqt) name = matcher.key # case-insensitive result = set() provided = self.provided if name in provided: for version, provider in provided[name]: try: match = matcher.match(version) except UnsupportedVersionError: match = False if match: result.add(provider) break return result def try_to_replace(self, provider, other, problems): """ Attempt to replace one provider with another. This is typically used when resolving dependencies from multiple sources, e.g. A requires (B >= 1.0) while C requires (B >= 1.1). For successful replacement, ``provider`` must meet all the requirements which ``other`` fulfills. :param provider: The provider we are trying to replace with. :param other: The provider we're trying to replace. :param problems: If False is returned, this will contain what problems prevented replacement. This is currently a tuple of the literal string 'cantreplace', ``provider``, ``other`` and the set of requirements that ``provider`` couldn't fulfill. :return: True if we can replace ``other`` with ``provider``, else False. """ rlist = self.reqts[other] unmatched = set() for s in rlist: matcher = self.get_matcher(s) if not matcher.match(provider.version): unmatched.add(s) if unmatched: # can't replace other with provider problems.add(('cantreplace', provider, other, frozenset(unmatched))) result = False else: # can replace other with provider self.remove_distribution(other) del self.reqts[other] for s in rlist: self.reqts.setdefault(provider, set()).add(s) self.add_distribution(provider) result = True return result def find(self, requirement, meta_extras=None, prereleases=False): """ Find a distribution and all distributions it depends on. :param requirement: The requirement specifying the distribution to find, or a Distribution instance. :param meta_extras: A list of meta extras such as :test:, :build: and so on. :param prereleases: If ``True``, allow pre-release versions to be returned - otherwise, don't return prereleases unless they're all that's available. Return a set of :class:`Distribution` instances and a set of problems. The distributions returned should be such that they have the :attr:`required` attribute set to ``True`` if they were from the ``requirement`` passed to ``find()``, and they have the :attr:`build_time_dependency` attribute set to ``True`` unless they are post-installation dependencies of the ``requirement``. The problems should be a tuple consisting of the string ``'unsatisfied'`` and the requirement which couldn't be satisfied by any distribution known to the locator. """ self.provided = {} self.dists = {} self.dists_by_name = {} self.reqts = {} meta_extras = set(meta_extras or []) if ':*:' in meta_extras: meta_extras.remove(':*:') # :meta: and :run: are implicitly included meta_extras |= set([':test:', ':build:', ':dev:']) if isinstance(requirement, Distribution): dist = odist = requirement logger.debug('passed %s as requirement', odist) else: dist = odist = self.locator.locate(requirement, prereleases=prereleases) if dist is None: raise DistlibException('Unable to locate %r' % requirement) logger.debug('located %s', odist) dist.requested = True problems = set() todo = set([dist]) install_dists = set([odist]) while todo: dist = todo.pop() name = dist.key # case-insensitive if name not in self.dists_by_name: self.add_distribution(dist) else: #import pdb; pdb.set_trace() other = self.dists_by_name[name] if other != dist: self.try_to_replace(dist, other, problems) ireqts = dist.run_requires | dist.meta_requires sreqts = dist.build_requires ereqts = set() if meta_extras and dist in install_dists: for key in ('test', 'build', 'dev'): e = ':%s:' % key if e in meta_extras: ereqts |= getattr(dist, '%s_requires' % key) all_reqts = ireqts | sreqts | ereqts for r in all_reqts: providers = self.find_providers(r) if not providers: logger.debug('No providers found for %r', r) provider = self.locator.locate(r, prereleases=prereleases) # If no provider is found and we didn't consider # prereleases, consider them now. if provider is None and not prereleases: provider = self.locator.locate(r, prereleases=True) if provider is None: logger.debug('Cannot satisfy %r', r) problems.add(('unsatisfied', r)) else: n, v = provider.key, provider.version if (n, v) not in self.dists: todo.add(provider) providers.add(provider) if r in ireqts and dist in install_dists: install_dists.add(provider) logger.debug('Adding %s to install_dists', provider.name_and_version) for p in providers: name = p.key if name not in self.dists_by_name: self.reqts.setdefault(p, set()).add(r) else: other = self.dists_by_name[name] if other != p: # see if other can be replaced by p self.try_to_replace(p, other, problems) dists = set(self.dists.values()) for dist in dists: dist.build_time_dependency = dist not in install_dists if dist.build_time_dependency: logger.debug('%s is a build-time dependency only.', dist.name_and_version) logger.debug('find done for %s', odist) return dists, problems
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/metadata.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Implementation of the Metadata for Python packages PEPs. Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and withdrawn 2.0). """ from __future__ import unicode_literals import codecs from email import message_from_file import json import logging import re from . import DistlibException, __version__ from .compat import StringIO, string_types, text_type from .markers import interpret from .util import extract_by_key, get_extras from .version import get_scheme, PEP440_VERSION_RE logger = logging.getLogger(__name__) class MetadataMissingError(DistlibException): """A required metadata is missing""" class MetadataConflictError(DistlibException): """Attempt to read or write metadata fields that are conflictual.""" class MetadataUnrecognizedVersionError(DistlibException): """Unknown metadata version number.""" class MetadataInvalidError(DistlibException): """A metadata value is invalid""" # public API of this module __all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] # Encoding used for the PKG-INFO files PKG_INFO_ENCODING = 'utf-8' # preferred version. Hopefully will be changed # to 1.2 once PEP 345 is supported everywhere PKG_INFO_PREFERRED_VERSION = '1.1' _LINE_PREFIX_1_2 = re.compile('\n \\|') _LINE_PREFIX_PRE_1_2 = re.compile('\n ') _241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'License') _314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes', 'Provides', 'Requires') _314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', 'Download-URL') _345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Requires-External') _345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Obsoletes-Dist', 'Requires-External', 'Maintainer', 'Maintainer-email', 'Project-URL') _426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Requires-External', 'Private-Version', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', 'Provides-Extra') _426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension') # See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in # the metadata. Include them in the tuple literal below to allow them # (for now). _566_FIELDS = _426_FIELDS + ('Description-Content-Type', 'Requires', 'Provides') _566_MARKERS = ('Description-Content-Type',) _ALL_FIELDS = set() _ALL_FIELDS.update(_241_FIELDS) _ALL_FIELDS.update(_314_FIELDS) _ALL_FIELDS.update(_345_FIELDS) _ALL_FIELDS.update(_426_FIELDS) _ALL_FIELDS.update(_566_FIELDS) EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') def _version2fieldlist(version): if version == '1.0': return _241_FIELDS elif version == '1.1': return _314_FIELDS elif version == '1.2': return _345_FIELDS elif version in ('1.3', '2.1'): return _345_FIELDS + _566_FIELDS elif version == '2.0': return _426_FIELDS raise MetadataUnrecognizedVersionError(version) def _best_version(fields): """Detect the best version depending on the fields used.""" def _has_marker(keys, markers): for marker in markers: if marker in keys: return True return False keys = [] for key, value in fields.items(): if value in ([], 'UNKNOWN', None): continue keys.append(key) possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1'] # first let's try to see if a field is not part of one of the version for key in keys: if key not in _241_FIELDS and '1.0' in possible_versions: possible_versions.remove('1.0') logger.debug('Removed 1.0 due to %s', key) if key not in _314_FIELDS and '1.1' in possible_versions: possible_versions.remove('1.1') logger.debug('Removed 1.1 due to %s', key) if key not in _345_FIELDS and '1.2' in possible_versions: possible_versions.remove('1.2') logger.debug('Removed 1.2 due to %s', key) if key not in _566_FIELDS and '1.3' in possible_versions: possible_versions.remove('1.3') logger.debug('Removed 1.3 due to %s', key) if key not in _566_FIELDS and '2.1' in possible_versions: if key != 'Description': # In 2.1, description allowed after headers possible_versions.remove('2.1') logger.debug('Removed 2.1 due to %s', key) if key not in _426_FIELDS and '2.0' in possible_versions: possible_versions.remove('2.0') logger.debug('Removed 2.0 due to %s', key) # possible_version contains qualified versions if len(possible_versions) == 1: return possible_versions[0] # found ! elif len(possible_versions) == 0: logger.debug('Out of options - unknown metadata set: %s', fields) raise MetadataConflictError('Unknown metadata set') # let's see if one unique marker is found is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS) is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1: raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields') # we have the choice, 1.0, or 1.2, or 2.0 # - 1.0 has a broken Summary field but works with all tools # - 1.1 is to avoid # - 1.2 fixes Summary but has little adoption # - 2.0 adds more features and is very new if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0: # we couldn't find any specific marker if PKG_INFO_PREFERRED_VERSION in possible_versions: return PKG_INFO_PREFERRED_VERSION if is_1_1: return '1.1' if is_1_2: return '1.2' if is_2_1: return '2.1' return '2.0' # This follows the rules about transforming keys as described in # https://www.python.org/dev/peps/pep-0566/#id17 _ATTR2FIELD = { name.lower().replace("-", "_"): name for name in _ALL_FIELDS } _FIELD2ATTR = {field: attr for attr, field in _ATTR2FIELD.items()} _PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') _VERSIONS_FIELDS = ('Requires-Python',) _VERSION_FIELDS = ('Version',) _LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', 'Requires', 'Provides', 'Obsoletes-Dist', 'Provides-Dist', 'Requires-Dist', 'Requires-External', 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', 'Provides-Extra', 'Extension') _LISTTUPLEFIELDS = ('Project-URL',) _ELEMENTSFIELD = ('Keywords',) _UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') _MISSING = object() _FILESAFE = re.compile('[^A-Za-z0-9.]+') def _get_name_and_version(name, version, for_filename=False): """Return the distribution name with version. If for_filename is true, return a filename-escaped form.""" if for_filename: # For both name and version any runs of non-alphanumeric or '.' # characters are replaced with a single '-'. Additionally any # spaces in the version string become '.' name = _FILESAFE.sub('-', name) version = _FILESAFE.sub('-', version.replace(' ', '.')) return '%s-%s' % (name, version) class LegacyMetadata(object): """The legacy metadata of a release. Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can instantiate the class with one of these arguments (or none): - *path*, the path to a metadata file - *fileobj* give a file-like object with metadata as content - *mapping* is a dict-like object - *scheme* is a version scheme name """ # TODO document the mapping API and UNKNOWN default key def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'): if [path, fileobj, mapping].count(None) < 2: raise TypeError('path, fileobj and mapping are exclusive') self._fields = {} self.requires_files = [] self._dependencies = None self.scheme = scheme if path is not None: self.read(path) elif fileobj is not None: self.read_file(fileobj) elif mapping is not None: self.update(mapping) self.set_metadata_version() def set_metadata_version(self): self._fields['Metadata-Version'] = _best_version(self._fields) def _write_field(self, fileobj, name, value): fileobj.write('%s: %s\n' % (name, value)) def __getitem__(self, name): return self.get(name) def __setitem__(self, name, value): return self.set(name, value) def __delitem__(self, name): field_name = self._convert_name(name) try: del self._fields[field_name] except KeyError: raise KeyError(name) def __contains__(self, name): return (name in self._fields or self._convert_name(name) in self._fields) def _convert_name(self, name): if name in _ALL_FIELDS: return name name = name.replace('-', '_').lower() return _ATTR2FIELD.get(name, name) def _default_value(self, name): if name in _LISTFIELDS or name in _ELEMENTSFIELD: return [] return 'UNKNOWN' def _remove_line_prefix(self, value): if self.metadata_version in ('1.0', '1.1'): return _LINE_PREFIX_PRE_1_2.sub('\n', value) else: return _LINE_PREFIX_1_2.sub('\n', value) def __getattr__(self, name): if name in _ATTR2FIELD: return self[name] raise AttributeError(name) # # Public API # # dependencies = property(_get_dependencies, _set_dependencies) def get_fullname(self, filesafe=False): """Return the distribution name with version. If filesafe is true, return a filename-escaped form.""" return _get_name_and_version(self['Name'], self['Version'], filesafe) def is_field(self, name): """return True if name is a valid metadata key""" name = self._convert_name(name) return name in _ALL_FIELDS def is_multi_field(self, name): name = self._convert_name(name) return name in _LISTFIELDS def read(self, filepath): """Read the metadata values from a file path.""" fp = codecs.open(filepath, 'r', encoding='utf-8') try: self.read_file(fp) finally: fp.close() def read_file(self, fileob): """Read the metadata values from a file object.""" msg = message_from_file(fileob) self._fields['Metadata-Version'] = msg['metadata-version'] # When reading, get all the fields we can for field in _ALL_FIELDS: if field not in msg: continue if field in _LISTFIELDS: # we can have multiple lines values = msg.get_all(field) if field in _LISTTUPLEFIELDS and values is not None: values = [tuple(value.split(',')) for value in values] self.set(field, values) else: # single line value = msg[field] if value is not None and value != 'UNKNOWN': self.set(field, value) # PEP 566 specifies that the body be used for the description, if # available body = msg.get_payload() self["Description"] = body if body else self["Description"] # logger.debug('Attempting to set metadata for %s', self) # self.set_metadata_version() def write(self, filepath, skip_unknown=False): """Write the metadata fields to filepath.""" fp = codecs.open(filepath, 'w', encoding='utf-8') try: self.write_file(fp, skip_unknown) finally: fp.close() def write_file(self, fileobject, skip_unknown=False): """Write the PKG-INFO format data to a file object.""" self.set_metadata_version() for field in _version2fieldlist(self['Metadata-Version']): values = self.get(field) if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): continue if field in _ELEMENTSFIELD: self._write_field(fileobject, field, ','.join(values)) continue if field not in _LISTFIELDS: if field == 'Description': if self.metadata_version in ('1.0', '1.1'): values = values.replace('\n', '\n ') else: values = values.replace('\n', '\n |') values = [values] if field in _LISTTUPLEFIELDS: values = [','.join(value) for value in values] for value in values: self._write_field(fileobject, field, value) def update(self, other=None, **kwargs): """Set metadata values from the given iterable `other` and kwargs. Behavior is like `dict.update`: If `other` has a ``keys`` method, they are looped over and ``self[key]`` is assigned ``other[key]``. Else, ``other`` is an iterable of ``(key, value)`` iterables. Keys that don't match a metadata field or that have an empty value are dropped. """ def _set(key, value): if key in _ATTR2FIELD and value: self.set(self._convert_name(key), value) if not other: # other is None or empty container pass elif hasattr(other, 'keys'): for k in other.keys(): _set(k, other[k]) else: for k, v in other: _set(k, v) if kwargs: for k, v in kwargs.items(): _set(k, v) def set(self, name, value): """Control then set a metadata field.""" name = self._convert_name(name) if ((name in _ELEMENTSFIELD or name == 'Platform') and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [v.strip() for v in value.split(',')] else: value = [] elif (name in _LISTFIELDS and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [value] else: value = [] if logger.isEnabledFor(logging.WARNING): project_name = self['Name'] scheme = get_scheme(self.scheme) if name in _PREDICATE_FIELDS and value is not None: for v in value: # check that the values are valid if not scheme.is_valid_matcher(v.split(';')[0]): logger.warning( "'%s': '%s' is not valid (field '%s')", project_name, v, name) # FIXME this rejects UNKNOWN, is that right? elif name in _VERSIONS_FIELDS and value is not None: if not scheme.is_valid_constraint_list(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) elif name in _VERSION_FIELDS and value is not None: if not scheme.is_valid_version(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) if name in _UNICODEFIELDS: if name == 'Description': value = self._remove_line_prefix(value) self._fields[name] = value def get(self, name, default=_MISSING): """Get a metadata field.""" name = self._convert_name(name) if name not in self._fields: if default is _MISSING: default = self._default_value(name) return default if name in _UNICODEFIELDS: value = self._fields[name] return value elif name in _LISTFIELDS: value = self._fields[name] if value is None: return [] res = [] for val in value: if name not in _LISTTUPLEFIELDS: res.append(val) else: # That's for Project-URL res.append((val[0], val[1])) return res elif name in _ELEMENTSFIELD: value = self._fields[name] if isinstance(value, string_types): return value.split(',') return self._fields[name] def check(self, strict=False): """Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided""" self.set_metadata_version() # XXX should check the versions (if the file was loaded) missing, warnings = [], [] for attr in ('Name', 'Version'): # required by PEP 345 if attr not in self: missing.append(attr) if strict and missing != []: msg = 'missing required metadata: %s' % ', '.join(missing) raise MetadataMissingError(msg) for attr in ('Home-page', 'Author'): if attr not in self: missing.append(attr) # checking metadata 1.2 (XXX needs to check 1.1, 1.0) if self['Metadata-Version'] != '1.2': return missing, warnings scheme = get_scheme(self.scheme) def are_valid_constraints(value): for v in value: if not scheme.is_valid_matcher(v.split(';')[0]): return False return True for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS, scheme.is_valid_version)): for field in fields: value = self.get(field, None) if value is not None and not controller(value): warnings.append("Wrong value for '%s': %s" % (field, value)) return missing, warnings def todict(self, skip_missing=False): """Return fields as a dict. Field names will be converted to use the underscore-lowercase style instead of hyphen-mixed case (i.e. home_page instead of Home-page). This is as per https://www.python.org/dev/peps/pep-0566/#id17. """ self.set_metadata_version() fields = _version2fieldlist(self['Metadata-Version']) data = {} for field_name in fields: if not skip_missing or field_name in self._fields: key = _FIELD2ATTR[field_name] if key != 'project_url': data[key] = self[field_name] else: data[key] = [','.join(u) for u in self[field_name]] return data def add_requirements(self, requirements): if self['Metadata-Version'] == '1.1': # we can't have 1.1 metadata *and* Setuptools requires for field in ('Obsoletes', 'Requires', 'Provides'): if field in self: del self[field] self['Requires-Dist'] += requirements # Mapping API # TODO could add iter* variants def keys(self): return list(_version2fieldlist(self['Metadata-Version'])) def __iter__(self): for key in self.keys(): yield key def values(self): return [self[key] for key in self.keys()] def items(self): return [(key, self[key]) for key in self.keys()] def __repr__(self): return '<%s %s %s>' % (self.__class__.__name__, self.name, self.version) METADATA_FILENAME = 'pydist.json' WHEEL_METADATA_FILENAME = 'metadata.json' LEGACY_METADATA_FILENAME = 'METADATA' class Metadata(object): """ The metadata of a release. This implementation uses 2.0 (JSON) metadata where possible. If not possible, it wraps a LegacyMetadata instance which handles the key-value metadata format. """ METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$') NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) VERSION_MATCHER = PEP440_VERSION_RE SUMMARY_MATCHER = re.compile('.{1,2047}') METADATA_VERSION = '2.0' GENERATOR = 'distlib (%s)' % __version__ MANDATORY_KEYS = { 'name': (), 'version': (), 'summary': ('legacy',), } INDEX_KEYS = ('name version license summary description author ' 'author_email keywords platform home_page classifiers ' 'download_url') DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires ' 'dev_requires provides meta_requires obsoleted_by ' 'supports_environments') SYNTAX_VALIDATORS = { 'metadata_version': (METADATA_VERSION_MATCHER, ()), 'name': (NAME_MATCHER, ('legacy',)), 'version': (VERSION_MATCHER, ('legacy',)), 'summary': (SUMMARY_MATCHER, ('legacy',)), } __slots__ = ('_legacy', '_data', 'scheme') def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'): if [path, fileobj, mapping].count(None) < 2: raise TypeError('path, fileobj and mapping are exclusive') self._legacy = None self._data = None self.scheme = scheme #import pdb; pdb.set_trace() if mapping is not None: try: self._validate_mapping(mapping, scheme) self._data = mapping except MetadataUnrecognizedVersionError: self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme) self.validate() else: data = None if path: with open(path, 'rb') as f: data = f.read() elif fileobj: data = fileobj.read() if data is None: # Initialised with no args - to be added self._data = { 'metadata_version': self.METADATA_VERSION, 'generator': self.GENERATOR, } else: if not isinstance(data, text_type): data = data.decode('utf-8') try: self._data = json.loads(data) self._validate_mapping(self._data, scheme) except ValueError: # Note: MetadataUnrecognizedVersionError does not # inherit from ValueError (it's a DistlibException, # which should not inherit from ValueError). # The ValueError comes from the json.load - if that # succeeds and we get a validation error, we want # that to propagate self._legacy = LegacyMetadata(fileobj=StringIO(data), scheme=scheme) self.validate() common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) none_list = (None, list) none_dict = (None, dict) mapped_keys = { 'run_requires': ('Requires-Dist', list), 'build_requires': ('Setup-Requires-Dist', list), 'dev_requires': none_list, 'test_requires': none_list, 'meta_requires': none_list, 'extras': ('Provides-Extra', list), 'modules': none_list, 'namespaces': none_list, 'exports': none_dict, 'commands': none_dict, 'classifiers': ('Classifier', list), 'source_url': ('Download-URL', None), 'metadata_version': ('Metadata-Version', None), } del none_list, none_dict def __getattribute__(self, key): common = object.__getattribute__(self, 'common_keys') mapped = object.__getattribute__(self, 'mapped_keys') if key in mapped: lk, maker = mapped[key] if self._legacy: if lk is None: result = None if maker is None else maker() else: result = self._legacy.get(lk) else: value = None if maker is None else maker() if key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'): result = self._data.get(key, value) else: # special cases for PEP 459 sentinel = object() result = sentinel d = self._data.get('extensions') if d: if key == 'commands': result = d.get('python.commands', value) elif key == 'classifiers': d = d.get('python.details') if d: result = d.get(key, value) else: d = d.get('python.exports') if not d: d = self._data.get('python.exports') if d: result = d.get(key, value) if result is sentinel: result = value elif key not in common: result = object.__getattribute__(self, key) elif self._legacy: result = self._legacy.get(key) else: result = self._data.get(key) return result def _validate_value(self, key, value, scheme=None): if key in self.SYNTAX_VALIDATORS: pattern, exclusions = self.SYNTAX_VALIDATORS[key] if (scheme or self.scheme) not in exclusions: m = pattern.match(value) if not m: raise MetadataInvalidError("'%s' is an invalid value for " "the '%s' property" % (value, key)) def __setattr__(self, key, value): self._validate_value(key, value) common = object.__getattribute__(self, 'common_keys') mapped = object.__getattribute__(self, 'mapped_keys') if key in mapped: lk, _ = mapped[key] if self._legacy: if lk is None: raise NotImplementedError self._legacy[lk] = value elif key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'): self._data[key] = value else: # special cases for PEP 459 d = self._data.setdefault('extensions', {}) if key == 'commands': d['python.commands'] = value elif key == 'classifiers': d = d.setdefault('python.details', {}) d[key] = value else: d = d.setdefault('python.exports', {}) d[key] = value elif key not in common: object.__setattr__(self, key, value) else: if key == 'keywords': if isinstance(value, string_types): value = value.strip() if value: value = value.split() else: value = [] if self._legacy: self._legacy[key] = value else: self._data[key] = value @property def name_and_version(self): return _get_name_and_version(self.name, self.version, True) @property def provides(self): if self._legacy: result = self._legacy['Provides-Dist'] else: result = self._data.setdefault('provides', []) s = '%s (%s)' % (self.name, self.version) if s not in result: result.append(s) return result @provides.setter def provides(self, value): if self._legacy: self._legacy['Provides-Dist'] = value else: self._data['provides'] = value def get_requirements(self, reqts, extras=None, env=None): """ Base method to get dependencies, given a set of extras to satisfy and an optional environment context. :param reqts: A list of sometimes-wanted dependencies, perhaps dependent on extras and environment. :param extras: A list of optional components being requested. :param env: An optional environment for marker evaluation. """ if self._legacy: result = reqts else: result = [] extras = get_extras(extras or [], self.extras) for d in reqts: if 'extra' not in d and 'environment' not in d: # unconditional include = True else: if 'extra' not in d: # Not extra-dependent - only environment-dependent include = True else: include = d.get('extra') in extras if include: # Not excluded because of extras, check environment marker = d.get('environment') if marker: include = interpret(marker, env) if include: result.extend(d['requires']) for key in ('build', 'dev', 'test'): e = ':%s:' % key if e in extras: extras.remove(e) # A recursive call, but it should terminate since 'test' # has been removed from the extras reqts = self._data.get('%s_requires' % key, []) result.extend(self.get_requirements(reqts, extras=extras, env=env)) return result @property def dictionary(self): if self._legacy: return self._from_legacy() return self._data @property def dependencies(self): if self._legacy: raise NotImplementedError else: return extract_by_key(self._data, self.DEPENDENCY_KEYS) @dependencies.setter def dependencies(self, value): if self._legacy: raise NotImplementedError else: self._data.update(value) def _validate_mapping(self, mapping, scheme): if mapping.get('metadata_version') != self.METADATA_VERSION: raise MetadataUnrecognizedVersionError() missing = [] for key, exclusions in self.MANDATORY_KEYS.items(): if key not in mapping: if scheme not in exclusions: missing.append(key) if missing: msg = 'Missing metadata items: %s' % ', '.join(missing) raise MetadataMissingError(msg) for k, v in mapping.items(): self._validate_value(k, v, scheme) def validate(self): if self._legacy: missing, warnings = self._legacy.check(True) if missing or warnings: logger.warning('Metadata: missing: %s, warnings: %s', missing, warnings) else: self._validate_mapping(self._data, self.scheme) def todict(self): if self._legacy: return self._legacy.todict(True) else: result = extract_by_key(self._data, self.INDEX_KEYS) return result def _from_legacy(self): assert self._legacy and not self._data result = { 'metadata_version': self.METADATA_VERSION, 'generator': self.GENERATOR, } lmd = self._legacy.todict(True) # skip missing ones for k in ('name', 'version', 'license', 'summary', 'description', 'classifier'): if k in lmd: if k == 'classifier': nk = 'classifiers' else: nk = k result[nk] = lmd[k] kw = lmd.get('Keywords', []) if kw == ['']: kw = [] result['keywords'] = kw keys = (('requires_dist', 'run_requires'), ('setup_requires_dist', 'build_requires')) for ok, nk in keys: if ok in lmd and lmd[ok]: result[nk] = [{'requires': lmd[ok]}] result['provides'] = self.provides author = {} maintainer = {} return result LEGACY_MAPPING = { 'name': 'Name', 'version': 'Version', ('extensions', 'python.details', 'license'): 'License', 'summary': 'Summary', 'description': 'Description', ('extensions', 'python.project', 'project_urls', 'Home'): 'Home-page', ('extensions', 'python.project', 'contacts', 0, 'name'): 'Author', ('extensions', 'python.project', 'contacts', 0, 'email'): 'Author-email', 'source_url': 'Download-URL', ('extensions', 'python.details', 'classifiers'): 'Classifier', } def _to_legacy(self): def process_entries(entries): reqts = set() for e in entries: extra = e.get('extra') env = e.get('environment') rlist = e['requires'] for r in rlist: if not env and not extra: reqts.add(r) else: marker = '' if extra: marker = 'extra == "%s"' % extra if env: if marker: marker = '(%s) and %s' % (env, marker) else: marker = env reqts.add(';'.join((r, marker))) return reqts assert self._data and not self._legacy result = LegacyMetadata() nmd = self._data # import pdb; pdb.set_trace() for nk, ok in self.LEGACY_MAPPING.items(): if not isinstance(nk, tuple): if nk in nmd: result[ok] = nmd[nk] else: d = nmd found = True for k in nk: try: d = d[k] except (KeyError, IndexError): found = False break if found: result[ok] = d r1 = process_entries(self.run_requires + self.meta_requires) r2 = process_entries(self.build_requires + self.dev_requires) if self.extras: result['Provides-Extra'] = sorted(self.extras) result['Requires-Dist'] = sorted(r1) result['Setup-Requires-Dist'] = sorted(r2) # TODO: any other fields wanted return result def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): if [path, fileobj].count(None) != 1: raise ValueError('Exactly one of path and fileobj is needed') self.validate() if legacy: if self._legacy: legacy_md = self._legacy else: legacy_md = self._to_legacy() if path: legacy_md.write(path, skip_unknown=skip_unknown) else: legacy_md.write_file(fileobj, skip_unknown=skip_unknown) else: if self._legacy: d = self._from_legacy() else: d = self._data if fileobj: json.dump(d, fileobj, ensure_ascii=True, indent=2, sort_keys=True) else: with codecs.open(path, 'w', 'utf-8') as f: json.dump(d, f, ensure_ascii=True, indent=2, sort_keys=True) def add_requirements(self, requirements): if self._legacy: self._legacy.add_requirements(requirements) else: run_requires = self._data.setdefault('run_requires', []) always = None for entry in run_requires: if 'environment' not in entry and 'extra' not in entry: always = entry break if always is None: always = { 'requires': requirements } run_requires.insert(0, always) else: rset = set(always['requires']) | set(requirements) always['requires'] = sorted(rset) def __repr__(self): name = self.name or '(no name)' version = self.version or 'no version' return '<%s %s %s (%s)>' % (self.__class__.__name__, self.metadata_version, name, version)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/version.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2017 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """ Implementation of a flexible versioning scheme providing support for PEP-440, setuptools-compatible and semantic versioning. """ import logging import re from .compat import string_types from .util import parse_requirement __all__ = ['NormalizedVersion', 'NormalizedMatcher', 'LegacyVersion', 'LegacyMatcher', 'SemanticVersion', 'SemanticMatcher', 'UnsupportedVersionError', 'get_scheme'] logger = logging.getLogger(__name__) class UnsupportedVersionError(ValueError): """This is an unsupported version.""" pass class Version(object): def __init__(self, s): self._string = s = s.strip() self._parts = parts = self.parse(s) assert isinstance(parts, tuple) assert len(parts) > 0 def parse(self, s): raise NotImplementedError('please implement in a subclass') def _check_compatible(self, other): if type(self) != type(other): raise TypeError('cannot compare %r and %r' % (self, other)) def __eq__(self, other): self._check_compatible(other) return self._parts == other._parts def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): self._check_compatible(other) return self._parts < other._parts def __gt__(self, other): return not (self.__lt__(other) or self.__eq__(other)) def __le__(self, other): return self.__lt__(other) or self.__eq__(other) def __ge__(self, other): return self.__gt__(other) or self.__eq__(other) # See http://docs.python.org/reference/datamodel#object.__hash__ def __hash__(self): return hash(self._parts) def __repr__(self): return "%s('%s')" % (self.__class__.__name__, self._string) def __str__(self): return self._string @property def is_prerelease(self): raise NotImplementedError('Please implement in subclasses.') class Matcher(object): version_class = None # value is either a callable or the name of a method _operators = { '<': lambda v, c, p: v < c, '>': lambda v, c, p: v > c, '<=': lambda v, c, p: v == c or v < c, '>=': lambda v, c, p: v == c or v > c, '==': lambda v, c, p: v == c, '===': lambda v, c, p: v == c, # by default, compatible => >=. '~=': lambda v, c, p: v == c or v > c, '!=': lambda v, c, p: v != c, } # this is a method only to support alternative implementations # via overriding def parse_requirement(self, s): return parse_requirement(s) def __init__(self, s): if self.version_class is None: raise ValueError('Please specify a version class') self._string = s = s.strip() r = self.parse_requirement(s) if not r: raise ValueError('Not valid: %r' % s) self.name = r.name self.key = self.name.lower() # for case-insensitive comparisons clist = [] if r.constraints: # import pdb; pdb.set_trace() for op, s in r.constraints: if s.endswith('.*'): if op not in ('==', '!='): raise ValueError('\'.*\' not allowed for ' '%r constraints' % op) # Could be a partial version (e.g. for '2.*') which # won't parse as a version, so keep it as a string vn, prefix = s[:-2], True # Just to check that vn is a valid version self.version_class(vn) else: # Should parse as a version, so we can create an # instance for the comparison vn, prefix = self.version_class(s), False clist.append((op, vn, prefix)) self._parts = tuple(clist) def match(self, version): """ Check if the provided version matches the constraints. :param version: The version to match against this instance. :type version: String or :class:`Version` instance. """ if isinstance(version, string_types): version = self.version_class(version) for operator, constraint, prefix in self._parts: f = self._operators.get(operator) if isinstance(f, string_types): f = getattr(self, f) if not f: msg = ('%r not implemented ' 'for %s' % (operator, self.__class__.__name__)) raise NotImplementedError(msg) if not f(version, constraint, prefix): return False return True @property def exact_version(self): result = None if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='): result = self._parts[0][1] return result def _check_compatible(self, other): if type(self) != type(other) or self.name != other.name: raise TypeError('cannot compare %s and %s' % (self, other)) def __eq__(self, other): self._check_compatible(other) return self.key == other.key and self._parts == other._parts def __ne__(self, other): return not self.__eq__(other) # See http://docs.python.org/reference/datamodel#object.__hash__ def __hash__(self): return hash(self.key) + hash(self._parts) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._string) def __str__(self): return self._string PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' r'(\.(post)(\d+))?(\.(dev)(\d+))?' r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') def _pep_440_key(s): s = s.strip() m = PEP440_VERSION_RE.match(s) if not m: raise UnsupportedVersionError('Not a valid version: %s' % s) groups = m.groups() nums = tuple(int(v) for v in groups[1].split('.')) while len(nums) > 1 and nums[-1] == 0: nums = nums[:-1] if not groups[0]: epoch = 0 else: epoch = int(groups[0]) pre = groups[4:6] post = groups[7:9] dev = groups[10:12] local = groups[13] if pre == (None, None): pre = () else: pre = pre[0], int(pre[1]) if post == (None, None): post = () else: post = post[0], int(post[1]) if dev == (None, None): dev = () else: dev = dev[0], int(dev[1]) if local is None: local = () else: parts = [] for part in local.split('.'): # to ensure that numeric compares as > lexicographic, avoid # comparing them directly, but encode a tuple which ensures # correct sorting if part.isdigit(): part = (1, int(part)) else: part = (0, part) parts.append(part) local = tuple(parts) if not pre: # either before pre-release, or final release and after if not post and dev: # before pre-release pre = ('a', -1) # to sort before a0 else: pre = ('z',) # to sort after all pre-releases # now look at the state of post and dev. if not post: post = ('_',) # sort before 'a' if not dev: dev = ('final',) #print('%s -> %s' % (s, m.groups())) return epoch, nums, pre, post, dev, local _normalized_key = _pep_440_key class NormalizedVersion(Version): """A rational version. Good: 1.2 # equivalent to "1.2.0" 1.2.0 1.2a1 1.2.3a2 1.2.3b1 1.2.3c1 1.2.3.4 TODO: fill this out Bad: 1 # minimum two numbers 1.2a # release level must have a release serial 1.2.3b """ def parse(self, s): result = _normalized_key(s) # _normalized_key loses trailing zeroes in the release # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0 # However, PEP 440 prefix matching needs it: for example, # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0). m = PEP440_VERSION_RE.match(s) # must succeed groups = m.groups() self._release_clause = tuple(int(v) for v in groups[1].split('.')) return result PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev']) @property def is_prerelease(self): return any(t[0] in self.PREREL_TAGS for t in self._parts if t) def _match_prefix(x, y): x = str(x) y = str(y) if x == y: return True if not x.startswith(y): return False n = len(y) return x[n] == '.' class NormalizedMatcher(Matcher): version_class = NormalizedVersion # value is either a callable or the name of a method _operators = { '~=': '_match_compatible', '<': '_match_lt', '>': '_match_gt', '<=': '_match_le', '>=': '_match_ge', '==': '_match_eq', '===': '_match_arbitrary', '!=': '_match_ne', } def _adjust_local(self, version, constraint, prefix): if prefix: strip_local = '+' not in constraint and version._parts[-1] else: # both constraint and version are # NormalizedVersion instances. # If constraint does not have a local component, # ensure the version doesn't, either. strip_local = not constraint._parts[-1] and version._parts[-1] if strip_local: s = version._string.split('+', 1)[0] version = self.version_class(s) return version, constraint def _match_lt(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if version >= constraint: return False release_clause = constraint._release_clause pfx = '.'.join([str(i) for i in release_clause]) return not _match_prefix(version, pfx) def _match_gt(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if version <= constraint: return False release_clause = constraint._release_clause pfx = '.'.join([str(i) for i in release_clause]) return not _match_prefix(version, pfx) def _match_le(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) return version <= constraint def _match_ge(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) return version >= constraint def _match_eq(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if not prefix: result = (version == constraint) else: result = _match_prefix(version, constraint) return result def _match_arbitrary(self, version, constraint, prefix): return str(version) == str(constraint) def _match_ne(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if not prefix: result = (version != constraint) else: result = not _match_prefix(version, constraint) return result def _match_compatible(self, version, constraint, prefix): version, constraint = self._adjust_local(version, constraint, prefix) if version == constraint: return True if version < constraint: return False # if not prefix: # return True release_clause = constraint._release_clause if len(release_clause) > 1: release_clause = release_clause[:-1] pfx = '.'.join([str(i) for i in release_clause]) return _match_prefix(version, pfx) _REPLACEMENTS = ( (re.compile('[.+-]$'), ''), # remove trailing puncts (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start (re.compile('^[.-]'), ''), # remove leading puncts (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion) (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion) (re.compile('[.]{2,}'), '.'), # multiple runs of '.' (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha (re.compile(r'\b(pre-alpha|prealpha)\b'), 'pre.alpha'), # standardise (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses ) _SUFFIX_REPLACEMENTS = ( (re.compile('^[:~._+-]+'), ''), # remove leading puncts (re.compile('[,*")([\\]]'), ''), # remove unwanted chars (re.compile('[~:+_ -]'), '.'), # replace illegal chars (re.compile('[.]{2,}'), '.'), # multiple runs of '.' (re.compile(r'\.$'), ''), # trailing '.' ) _NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)') def _suggest_semantic_version(s): """ Try to suggest a semantic form for a version for which _suggest_normalized_version couldn't come up with anything. """ result = s.strip().lower() for pat, repl in _REPLACEMENTS: result = pat.sub(repl, result) if not result: result = '0.0.0' # Now look for numeric prefix, and separate it out from # the rest. #import pdb; pdb.set_trace() m = _NUMERIC_PREFIX.match(result) if not m: prefix = '0.0.0' suffix = result else: prefix = m.groups()[0].split('.') prefix = [int(i) for i in prefix] while len(prefix) < 3: prefix.append(0) if len(prefix) == 3: suffix = result[m.end():] else: suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():] prefix = prefix[:3] prefix = '.'.join([str(i) for i in prefix]) suffix = suffix.strip() if suffix: #import pdb; pdb.set_trace() # massage the suffix. for pat, repl in _SUFFIX_REPLACEMENTS: suffix = pat.sub(repl, suffix) if not suffix: result = prefix else: sep = '-' if 'dev' in suffix else '+' result = prefix + sep + suffix if not is_semver(result): result = None return result def _suggest_normalized_version(s): """Suggest a normalized version close to the given version string. If you have a version string that isn't rational (i.e. NormalizedVersion doesn't like it) then you might be able to get an equivalent (or close) rational version from this function. This does a number of simple normalizations to the given string, based on observation of versions currently in use on PyPI. Given a dump of those version during PyCon 2009, 4287 of them: - 2312 (53.93%) match NormalizedVersion without change with the automatic suggestion - 3474 (81.04%) match when using this suggestion method @param s {str} An irrational version string. @returns A rational version string, or None, if couldn't determine one. """ try: _normalized_key(s) return s # already rational except UnsupportedVersionError: pass rs = s.lower() # part of this could use maketrans for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'), ('beta', 'b'), ('rc', 'c'), ('-final', ''), ('-pre', 'c'), ('-release', ''), ('.release', ''), ('-stable', ''), ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''), ('final', '')): rs = rs.replace(orig, repl) # if something ends with dev or pre, we add a 0 rs = re.sub(r"pre$", r"pre0", rs) rs = re.sub(r"dev$", r"dev0", rs) # if we have something like "b-2" or "a.2" at the end of the # version, that is probably beta, alpha, etc # let's remove the dash or dot rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs) # 1.0-dev-r371 -> 1.0.dev371 # 0.1-dev-r79 -> 0.1.dev79 rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs) # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1 rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs) # Clean: v0.3, v1.0 if rs.startswith('v'): rs = rs[1:] # Clean leading '0's on numbers. #TODO: unintended side-effect on, e.g., "2003.05.09" # PyPI stats: 77 (~2%) better rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers # zero. # PyPI stats: 245 (7.56%) better rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs) # the 'dev-rNNN' tag is a dev tag rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs) # clean the - when used as a pre delimiter rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs) # a terminal "dev" or "devel" can be changed into ".dev0" rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs) # a terminal "dev" can be changed into ".dev0" rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs) # a terminal "final" or "stable" can be removed rs = re.sub(r"(final|stable)$", "", rs) # The 'r' and the '-' tags are post release tags # 0.4a1.r10 -> 0.4a1.post10 # 0.9.33-17222 -> 0.9.33.post17222 # 0.9.33-r17222 -> 0.9.33.post17222 rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs) # Clean 'r' instead of 'dev' usage: # 0.9.33+r17222 -> 0.9.33.dev17222 # 1.0dev123 -> 1.0.dev123 # 1.0.git123 -> 1.0.dev123 # 1.0.bzr123 -> 1.0.dev123 # 0.1a0dev.123 -> 0.1a0.dev123 # PyPI stats: ~150 (~4%) better rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs) # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage: # 0.2.pre1 -> 0.2c1 # 0.2-c1 -> 0.2c1 # 1.0preview123 -> 1.0c123 # PyPI stats: ~21 (0.62%) better rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs) # Tcl/Tk uses "px" for their post release markers rs = re.sub(r"p(\d+)$", r".post\1", rs) try: _normalized_key(rs) except UnsupportedVersionError: rs = None return rs # # Legacy version processing (distribute-compatible) # _VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) _VERSION_REPLACE = { 'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@', '': None, '.': None, } def _legacy_key(s): def get_parts(s): result = [] for p in _VERSION_PART.split(s.lower()): p = _VERSION_REPLACE.get(p, p) if p: if '0' <= p[:1] <= '9': p = p.zfill(8) else: p = '*' + p result.append(p) result.append('*final') return result result = [] for p in get_parts(s): if p.startswith('*'): if p < '*final': while result and result[-1] == '*final-': result.pop() while result and result[-1] == '00000000': result.pop() result.append(p) return tuple(result) class LegacyVersion(Version): def parse(self, s): return _legacy_key(s) @property def is_prerelease(self): result = False for x in self._parts: if (isinstance(x, string_types) and x.startswith('*') and x < '*final'): result = True break return result class LegacyMatcher(Matcher): version_class = LegacyVersion _operators = dict(Matcher._operators) _operators['~='] = '_match_compatible' numeric_re = re.compile(r'^(\d+(\.\d+)*)') def _match_compatible(self, version, constraint, prefix): if version < constraint: return False m = self.numeric_re.match(str(constraint)) if not m: logger.warning('Cannot compute compatible match for version %s ' ' and constraint %s', version, constraint) return True s = m.groups()[0] if '.' in s: s = s.rsplit('.', 1)[0] return _match_prefix(version, s) # # Semantic versioning # _SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) def is_semver(s): return _SEMVER_RE.match(s) def _semantic_key(s): def make_tuple(s, absent): if s is None: result = (absent,) else: parts = s[1:].split('.') # We can't compare ints and strings on Python 3, so fudge it # by zero-filling numeric values so simulate a numeric comparison result = tuple([p.zfill(8) if p.isdigit() else p for p in parts]) return result m = is_semver(s) if not m: raise UnsupportedVersionError(s) groups = m.groups() major, minor, patch = [int(i) for i in groups[:3]] # choose the '|' and '*' so that versions sort correctly pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*') return (major, minor, patch), pre, build class SemanticVersion(Version): def parse(self, s): return _semantic_key(s) @property def is_prerelease(self): return self._parts[1][0] != '|' class SemanticMatcher(Matcher): version_class = SemanticVersion class VersionScheme(object): def __init__(self, key, matcher, suggester=None): self.key = key self.matcher = matcher self.suggester = suggester def is_valid_version(self, s): try: self.matcher.version_class(s) result = True except UnsupportedVersionError: result = False return result def is_valid_matcher(self, s): try: self.matcher(s) result = True except UnsupportedVersionError: result = False return result def is_valid_constraint_list(self, s): """ Used for processing some metadata fields """ return self.is_valid_matcher('dummy_name (%s)' % s) def suggest(self, s): if self.suggester is None: result = None else: result = self.suggester(s) return result _SCHEMES = { 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, _suggest_normalized_version), 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s), 'semantic': VersionScheme(_semantic_key, SemanticMatcher, _suggest_semantic_version), } _SCHEMES['default'] = _SCHEMES['normalized'] def get_scheme(name): if name not in _SCHEMES: raise ValueError('unknown scheme name: %r' % name) return _SCHEMES[name]
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/compat.py
# -*- coding: utf-8 -*- # # Copyright (C) 2013-2017 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import absolute_import import os import re import sys try: import ssl except ImportError: # pragma: no cover ssl = None if sys.version_info[0] < 3: # pragma: no cover from StringIO import StringIO string_types = basestring, text_type = unicode from types import FileType as file_type import __builtin__ as builtins import ConfigParser as configparser from ._backport import shutil from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, pathname2url, ContentTooShortError, splittype) def quote(s): if isinstance(s, unicode): s = s.encode('utf-8') return _quote(s) import urllib2 from urllib2 import (Request, urlopen, URLError, HTTPError, HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPHandler, HTTPRedirectHandler, build_opener) if ssl: from urllib2 import HTTPSHandler import httplib import xmlrpclib import Queue as queue from HTMLParser import HTMLParser import htmlentitydefs raw_input = raw_input from itertools import ifilter as filter from itertools import ifilterfalse as filterfalse _userprog = None def splituser(host): """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" global _userprog if _userprog is None: import re _userprog = re.compile('^(.*)@(.*)$') match = _userprog.match(host) if match: return match.group(1, 2) return None, host else: # pragma: no cover from io import StringIO string_types = str, text_type = str from io import TextIOWrapper as file_type import builtins import configparser import shutil from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, unquote, urlsplit, urlunsplit, splittype) from urllib.request import (urlopen, urlretrieve, Request, url2pathname, pathname2url, HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPHandler, HTTPRedirectHandler, build_opener) if ssl: from urllib.request import HTTPSHandler from urllib.error import HTTPError, URLError, ContentTooShortError import http.client as httplib import urllib.request as urllib2 import xmlrpc.client as xmlrpclib import queue from html.parser import HTMLParser import html.entities as htmlentitydefs raw_input = input from itertools import filterfalse filter = filter try: from ssl import match_hostname, CertificateError except ImportError: # pragma: no cover class CertificateError(ValueError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False parts = dn.split('.') leftmost, remainder = parts[0], parts[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survey of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate, match_hostname needs a " "SSL socket or SSL context with either " "CERT_OPTIONAL or CERT_REQUIRED") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found") try: from types import SimpleNamespace as Container except ImportError: # pragma: no cover class Container(object): """ A generic container for when multiple values need to be returned """ def __init__(self, **kwargs): self.__dict__.update(kwargs) try: from shutil import which except ImportError: # pragma: no cover # Implementation from Python 3.3 def which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the # current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". # If it does match, only test that one, otherwise we have to try # others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if not normdir in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None # ZipFile is a context manager in 2.7, but not in 2.6 from zipfile import ZipFile as BaseZipFile if hasattr(BaseZipFile, '__enter__'): # pragma: no cover ZipFile = BaseZipFile else: # pragma: no cover from zipfile import ZipExtFile as BaseZipExtFile class ZipExtFile(BaseZipExtFile): def __init__(self, base): self.__dict__.update(base.__dict__) def __enter__(self): return self def __exit__(self, *exc_info): self.close() # return None, so if an exception occurred, it will propagate class ZipFile(BaseZipFile): def __enter__(self): return self def __exit__(self, *exc_info): self.close() # return None, so if an exception occurred, it will propagate def open(self, *args, **kwargs): base = BaseZipFile.open(self, *args, **kwargs) return ZipExtFile(base) try: from platform import python_implementation except ImportError: # pragma: no cover def python_implementation(): """Return a string identifying the Python implementation.""" if 'PyPy' in sys.version: return 'PyPy' if os.name == 'java': return 'Jython' if sys.version.startswith('IronPython'): return 'IronPython' return 'CPython' try: import sysconfig except ImportError: # pragma: no cover from ._backport import sysconfig try: callable = callable except NameError: # pragma: no cover from collections.abc import Callable def callable(obj): return isinstance(obj, Callable) try: fsencode = os.fsencode fsdecode = os.fsdecode except AttributeError: # pragma: no cover # Issue #99: on some systems (e.g. containerised), # sys.getfilesystemencoding() returns None, and we need a real value, # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and # sys.getfilesystemencoding(): the return value is "the user’s preference # according to the result of nl_langinfo(CODESET), or None if the # nl_langinfo(CODESET) failed." _fsencoding = sys.getfilesystemencoding() or 'utf-8' if _fsencoding == 'mbcs': _fserrors = 'strict' else: _fserrors = 'surrogateescape' def fsencode(filename): if isinstance(filename, bytes): return filename elif isinstance(filename, text_type): return filename.encode(_fsencoding, _fserrors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) def fsdecode(filename): if isinstance(filename, text_type): return filename elif isinstance(filename, bytes): return filename.decode(_fsencoding, _fserrors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) try: from tokenize import detect_encoding except ImportError: # pragma: no cover from codecs import BOM_UTF8, lookup import re cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ try: filename = readline.__self__.name except AttributeError: filename = None bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return b'' def find_cookie(line): try: # Decode as UTF-8. Either the line is an encoding declaration, # in which case it should be pure ASCII, or it must be UTF-8 # per default encoding. line_string = line.decode('utf-8') except UnicodeDecodeError: msg = "invalid or missing encoding declaration" if filename is not None: msg = '{} for {!r}'.format(msg, filename) raise SyntaxError(msg) matches = cookie_re.findall(line_string) if not matches: return None encoding = _get_normal_name(matches[0]) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter if filename is None: msg = "unknown encoding: " + encoding else: msg = "unknown encoding for {!r}: {}".format(filename, encoding) raise SyntaxError(msg) if bom_found: if codec.name != 'utf-8': # This behaviour mimics the Python interpreter if filename is None: msg = 'encoding problem: utf-8' else: msg = 'encoding problem for {!r}: utf-8'.format(filename) raise SyntaxError(msg) encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] # For converting & <-> &amp; etc. try: from html import escape except ImportError: from cgi import escape if sys.version_info[:2] < (3, 4): unescape = HTMLParser().unescape else: from html import unescape try: from collections import ChainMap except ImportError: # pragma: no cover from collections import MutableMapping try: from reprlib import recursive_repr as _recursive_repr except ImportError: def _recursive_repr(fillvalue='...'): ''' Decorator to make a repr function return fillvalue for a recursive call ''' def decorating_function(user_function): repr_running = set() def wrapper(self): key = id(self), get_ident() if key in repr_running: return fillvalue repr_running.add(key) try: result = user_function(self) finally: repr_running.discard(key) return result # Can't use functools.wraps() here because of bootstrap issues wrapper.__module__ = getattr(user_function, '__module__') wrapper.__doc__ = getattr(user_function, '__doc__') wrapper.__name__ = getattr(user_function, '__name__') wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) return wrapper return decorating_function class ChainMap(MutableMapping): ''' A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can accessed or updated using the *maps* attribute. There is no other state. Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. ''' def __init__(self, *maps): '''Initialize a ChainMap by setting *maps* to the given mappings. If no mappings are provided, a single empty dictionary is used. ''' self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) def __getitem__(self, key): for mapping in self.maps: try: return mapping[key] # can't use 'key in mapping' with defaultdict except KeyError: pass return self.__missing__(key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): return len(set().union(*self.maps)) # reuses stored hash values if possible def __iter__(self): return iter(set().union(*self.maps)) def __contains__(self, key): return any(key in m for m in self.maps) def __bool__(self): return any(self.maps) @_recursive_repr() def __repr__(self): return '{0.__class__.__name__}({1})'.format( self, ', '.join(map(repr, self.maps))) @classmethod def fromkeys(cls, iterable, *args): 'Create a ChainMap with a single dict created from the iterable.' return cls(dict.fromkeys(iterable, *args)) def copy(self): 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' return self.__class__(self.maps[0].copy(), *self.maps[1:]) __copy__ = copy def new_child(self): # like Django's Context.push() 'New ChainMap with a new dict followed by all previous maps.' return self.__class__({}, *self.maps) @property def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) def __setitem__(self, key, value): self.maps[0][key] = value def __delitem__(self, key): try: del self.maps[0][key] except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' try: return self.maps[0].popitem() except KeyError: raise KeyError('No keys found in the first mapping.') def pop(self, key, *args): 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' try: return self.maps[0].pop(key, *args) except KeyError: raise KeyError('Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() try: from importlib.util import cache_from_source # Python >= 3.4 except ImportError: # pragma: no cover try: from imp import cache_from_source except ImportError: # pragma: no cover def cache_from_source(path, debug_override=None): assert path.endswith('.py') if debug_override is None: debug_override = __debug__ if debug_override: suffix = 'c' else: suffix = 'o' return path + suffix try: from collections import OrderedDict except ImportError: # pragma: no cover ## {{{ http://code.activestate.com/recipes/576693/ (r9) # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running=None): 'od.__repr__() <==> repr(od)' if not _repr_running: _repr_running = {} call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self) try: from logging.config import BaseConfigurator, valid_ident except ImportError: # pragma: no cover IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) def valid_ident(s): m = IDENTIFIER.match(s) if not m: raise ValueError('Not a valid Python identifier: %r' % s) return True # The ConvertingXXX classes are wrappers around standard Python containers, # and they serve to convert any suitable values in the container. The # conversion converts base dicts, lists and tuples to their wrapped # equivalents, whereas strings which match a conversion format are converted # appropriately. # # Each wrapper should have a configurator attribute holding the actual # configurator to use for conversion. class ConvertingDict(dict): """A converting dictionary wrapper.""" def __getitem__(self, key): value = dict.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def get(self, key, default=None): value = dict.get(self, key, default) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, key, default=None): value = dict.pop(self, key, default) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class ConvertingList(list): """A converting list wrapper.""" def __getitem__(self, key): value = list.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, idx=-1): value = list.pop(self, idx) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self return result class ConvertingTuple(tuple): """A converting tuple wrapper.""" def __getitem__(self, key): value = tuple.__getitem__(self, key) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class BaseConfigurator(object): """ The configurator base class which defines some useful defaults. """ CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$') WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') DIGIT_PATTERN = re.compile(r'^\d+$') value_converters = { 'ext' : 'ext_convert', 'cfg' : 'cfg_convert', } # We might want to use a different one, e.g. importlib importer = staticmethod(__import__) def __init__(self, config): self.config = ConvertingDict(config) self.config.configurator = self def resolve(self, s): """ Resolve strings to objects using standard import and attribute syntax. """ name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v def ext_convert(self, value): """Default converter for the ext:// protocol.""" return self.resolve(value) def cfg_convert(self, value): """Default converter for the cfg:// protocol.""" rest = value m = self.WORD_PATTERN.match(rest) if m is None: raise ValueError("Unable to convert %r" % value) else: rest = rest[m.end():] d = self.config[m.groups()[0]] #print d, rest while rest: m = self.DOT_PATTERN.match(rest) if m: d = d[m.groups()[0]] else: m = self.INDEX_PATTERN.match(rest) if m: idx = m.groups()[0] if not self.DIGIT_PATTERN.match(idx): d = d[idx] else: try: n = int(idx) # try as number first (most likely) d = d[n] except TypeError: d = d[idx] if m: rest = rest[m.end():] else: raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) #rest should be empty return d def convert(self, value): """ Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. """ if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, string_types): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value def configure_custom(self, config): """Configure an object with a user-supplied factory.""" c = config.pop('()') if not callable(c): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) result = c(**kwargs) if props: for name, value in props.items(): setattr(result, name, value) return result def as_tuple(self, value): """Utility function which converts lists to tuples.""" if isinstance(value, list): value = tuple(value) return value
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/index.py
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import hashlib import logging import os import shutil import subprocess import tempfile try: from threading import Thread except ImportError: from dummy_threading import Thread from . import DistlibException from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, urlparse, build_opener, string_types) from .util import cached_property, zip_dir, ServerProxy logger = logging.getLogger(__name__) DEFAULT_INDEX = 'https://pypi.org/pypi' DEFAULT_REALM = 'pypi' class PackageIndex(object): """ This class represents a package index compatible with PyPI, the Python Package Index. """ boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' def __init__(self, url=None): """ Initialise an instance. :param url: The URL of the index. If not specified, the URL for PyPI is used. """ self.url = url or DEFAULT_INDEX self.read_configuration() scheme, netloc, path, params, query, frag = urlparse(self.url) if params or query or frag or scheme not in ('http', 'https'): raise DistlibException('invalid repository: %s' % self.url) self.password_handler = None self.ssl_verifier = None self.gpg = None self.gpg_home = None with open(os.devnull, 'w') as sink: # Use gpg by default rather than gpg2, as gpg2 insists on # prompting for passwords for s in ('gpg', 'gpg2'): try: rc = subprocess.check_call([s, '--version'], stdout=sink, stderr=sink) if rc == 0: self.gpg = s break except OSError: pass def _get_pypirc_command(self): """ Get the distutils command for interacting with PyPI configurations. :return: the command. """ from distutils.core import Distribution from distutils.config import PyPIRCCommand d = Distribution() return PyPIRCCommand(d) def read_configuration(self): """ Read the PyPI access configuration as supported by distutils, getting PyPI to do the actual work. This populates ``username``, ``password``, ``realm`` and ``url`` attributes from the configuration. """ # get distutils to do the work c = self._get_pypirc_command() c.repository = self.url cfg = c._read_pypirc() self.username = cfg.get('username') self.password = cfg.get('password') self.realm = cfg.get('realm', 'pypi') self.url = cfg.get('repository', self.url) def save_configuration(self): """ Save the PyPI access configuration. You must have set ``username`` and ``password`` attributes before calling this method. Again, distutils is used to do the actual work. """ self.check_credentials() # get distutils to do the work c = self._get_pypirc_command() c._store_pypirc(self.username, self.password) def check_credentials(self): """ Check that ``username`` and ``password`` have been set, and raise an exception if not. """ if self.username is None or self.password is None: raise DistlibException('username and password must be set') pm = HTTPPasswordMgr() _, netloc, _, _, _, _ = urlparse(self.url) pm.add_password(self.realm, netloc, self.username, self.password) self.password_handler = HTTPBasicAuthHandler(pm) def register(self, metadata): """ Register a distribution on PyPI, using the provided metadata. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the distribution to be registered. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() metadata.validate() d = metadata.todict() d[':action'] = 'verify' request = self.encode_request(d.items(), []) response = self.send_request(request) d[':action'] = 'submit' request = self.encode_request(d.items(), []) return self.send_request(request) def _reader(self, name, stream, outbuf): """ Thread runner for reading lines of from a subprocess into a buffer. :param name: The logical name of the stream (used for logging only). :param stream: The stream to read from. This will typically a pipe connected to the output stream of a subprocess. :param outbuf: The list to append the read lines to. """ while True: s = stream.readline() if not s: break s = s.decode('utf-8').rstrip() outbuf.append(s) logger.debug('%s: %s' % (name, s)) stream.close() def get_sign_command(self, filename, signer, sign_password, keystore=None): """ Return a suitable command for signing a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The signing command as a list suitable to be passed to :class:`subprocess.Popen`. """ cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if keystore is None: keystore = self.gpg_home if keystore: cmd.extend(['--homedir', keystore]) if sign_password is not None: cmd.extend(['--batch', '--passphrase-fd', '0']) td = tempfile.mkdtemp() sf = os.path.join(td, os.path.basename(filename) + '.asc') cmd.extend(['--detach-sign', '--armor', '--local-user', signer, '--output', sf, filename]) logger.debug('invoking: %s', ' '.join(cmd)) return cmd, sf def run_command(self, cmd, input_data=None): """ Run a command in a child process , passing it any input data specified. :param cmd: The command to run. :param input_data: If specified, this must be a byte string containing data to be sent to the child process. :return: A tuple consisting of the subprocess' exit code, a list of lines read from the subprocess' ``stdout``, and a list of lines read from the subprocess' ``stderr``. """ kwargs = { 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, } if input_data is not None: kwargs['stdin'] = subprocess.PIPE stdout = [] stderr = [] p = subprocess.Popen(cmd, **kwargs) # We don't use communicate() here because we may need to # get clever with interacting with the command t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) t1.start() t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) t2.start() if input_data is not None: p.stdin.write(input_data) p.stdin.close() p.wait() t1.join() t2.join() return p.returncode, stdout, stderr def sign_file(self, filename, signer, sign_password, keystore=None): """ Sign a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The absolute pathname of the file where the signature is stored. """ cmd, sig_file = self.get_sign_command(filename, signer, sign_password, keystore) rc, stdout, stderr = self.run_command(cmd, sign_password.encode('utf-8')) if rc != 0: raise DistlibException('sign command failed with error ' 'code %s' % rc) return sig_file def upload_file(self, metadata, filename, signer=None, sign_password=None, filetype='sdist', pyversion='source', keystore=None): """ Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.exists(filename): raise DistlibException('not found: %s' % filename) metadata.validate() d = metadata.todict() sig_file = None if signer: if not self.gpg: logger.warning('no signing program available - not signed') else: sig_file = self.sign_file(filename, signer, sign_password, keystore) with open(filename, 'rb') as f: file_data = f.read() md5_digest = hashlib.md5(file_data).hexdigest() sha256_digest = hashlib.sha256(file_data).hexdigest() d.update({ ':action': 'file_upload', 'protocol_version': '1', 'filetype': filetype, 'pyversion': pyversion, 'md5_digest': md5_digest, 'sha256_digest': sha256_digest, }) files = [('content', os.path.basename(filename), file_data)] if sig_file: with open(sig_file, 'rb') as f: sig_data = f.read() files.append(('gpg_signature', os.path.basename(sig_file), sig_data)) shutil.rmtree(os.path.dirname(sig_file)) request = self.encode_request(d.items(), files) return self.send_request(request) def upload_documentation(self, metadata, doc_dir): """ Upload documentation to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the documentation to be uploaded. :param doc_dir: The pathname of the directory which contains the documentation. This should be the directory that contains the ``index.html`` for the documentation. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.isdir(doc_dir): raise DistlibException('not a directory: %r' % doc_dir) fn = os.path.join(doc_dir, 'index.html') if not os.path.exists(fn): raise DistlibException('not found: %r' % fn) metadata.validate() name, version = metadata.name, metadata.version zip_data = zip_dir(doc_dir).getvalue() fields = [(':action', 'doc_upload'), ('name', name), ('version', version)] files = [('content', name, zip_data)] request = self.encode_request(fields, files) return self.send_request(request) def get_verify_command(self, signature_filename, data_filename, keystore=None): """ Return a suitable command for verifying a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The verifying command as a list suitable to be passed to :class:`subprocess.Popen`. """ cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if keystore is None: keystore = self.gpg_home if keystore: cmd.extend(['--homedir', keystore]) cmd.extend(['--verify', signature_filename, data_filename]) logger.debug('invoking: %s', ' '.join(cmd)) return cmd def verify_signature(self, signature_filename, data_filename, keystore=None): """ Verify a signature for a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: True if the signature was verified, else False. """ if not self.gpg: raise DistlibException('verification unavailable because gpg ' 'unavailable') cmd = self.get_verify_command(signature_filename, data_filename, keystore) rc, stdout, stderr = self.run_command(cmd) if rc not in (0, 1): raise DistlibException('verify command failed with error ' 'code %s' % rc) return rc == 0 def download_file(self, url, destfile, digest=None, reporthook=None): """ This is a convenience method for downloading a file from an URL. Normally, this will be a file from the index, though currently no check is made for this (i.e. a file can be downloaded from anywhere). The method is just like the :func:`urlretrieve` function in the standard library, except that it allows digest computation to be done during download and checking that the downloaded data matched any expected value. :param url: The URL of the file to be downloaded (assumed to be available via an HTTP GET request). :param destfile: The pathname where the downloaded file is to be saved. :param digest: If specified, this must be a (hasher, value) tuple, where hasher is the algorithm used (e.g. ``'md5'``) and ``value`` is the expected value. :param reporthook: The same as for :func:`urlretrieve` in the standard library. """ if digest is None: digester = None logger.debug('No digest specified') else: if isinstance(digest, (list, tuple)): hasher, digest = digest else: hasher = 'md5' digester = getattr(hashlib, hasher)() logger.debug('Digest specified: %s' % digest) # The following code is equivalent to urlretrieve. # We need to do it this way so that we can compute the # digest of the file as we go. with open(destfile, 'wb') as dfp: # addinfourl is not a context manager on 2.x # so we have to use try/finally sfp = self.send_request(Request(url)) try: headers = sfp.info() blocksize = 8192 size = -1 read = 0 blocknum = 0 if "content-length" in headers: size = int(headers["Content-Length"]) if reporthook: reporthook(blocknum, blocksize, size) while True: block = sfp.read(blocksize) if not block: break read += len(block) dfp.write(block) if digester: digester.update(block) blocknum += 1 if reporthook: reporthook(blocknum, blocksize, size) finally: sfp.close() # check that we got the whole file, if we can if size >= 0 and read < size: raise DistlibException( 'retrieval incomplete: got only %d out of %d bytes' % (read, size)) # if we have a digest, it must match. if digester: actual = digester.hexdigest() if digest != actual: raise DistlibException('%s digest mismatch for %s: expected ' '%s, got %s' % (hasher, destfile, digest, actual)) logger.debug('Digest verified: %s', digest) def send_request(self, req): """ Send a standard library :class:`Request` to PyPI and return its response. :param req: The request to send. :return: The HTTP response from PyPI (a standard library HTTPResponse). """ handlers = [] if self.password_handler: handlers.append(self.password_handler) if self.ssl_verifier: handlers.append(self.ssl_verifier) opener = build_opener(*handlers) return opener.open(req) def encode_request(self, fields, files): """ Encode fields and files for posting to an HTTP server. :param fields: The fields to send as a list of (fieldname, value) tuples. :param files: The files to send as a list of (fieldname, filename, file_bytes) tuple. """ # Adapted from packaging, which in turn was adapted from # http://code.activestate.com/recipes/146306 parts = [] boundary = self.boundary for k, values in fields: if not isinstance(values, (list, tuple)): values = [values] for v in values: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"' % k).encode('utf-8'), b'', v.encode('utf-8'))) for key, filename, value in files: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)).encode('utf-8'), b'', value)) parts.extend((b'--' + boundary + b'--', b'')) body = b'\r\n'.join(parts) ct = b'multipart/form-data; boundary=' + boundary headers = { 'Content-type': ct, 'Content-length': str(len(body)) } return Request(self.url, body, headers) def search(self, terms, operator=None): if isinstance(terms, string_types): terms = {'name': terms} rpc_proxy = ServerProxy(self.url, timeout=3.0) try: return rpc_proxy.search(terms, operator or 'and') finally: rpc_proxy('close')()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/manifest.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2013 Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """ Class representing the list of files in a distribution. Equivalent to distutils.filelist, but fixes some problems. """ import fnmatch import logging import os import re import sys from . import DistlibException from .compat import fsdecode from .util import convert_path __all__ = ['Manifest'] logger = logging.getLogger(__name__) # a \ followed by some spaces + EOL _COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M) _COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) # # Due to the different results returned by fnmatch.translate, we need # to do slightly different processing for Python 2.7 and 3.2 ... this needed # to be brought in for Python 3.6 onwards. # _PYTHON_VERSION = sys.version_info[:2] class Manifest(object): """A list of files built by on exploring the filesystem and filtered by applying various patterns to what we find there. """ def __init__(self, base=None): """ Initialise an instance. :param base: The base directory to explore under. """ self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) self.prefix = self.base + os.sep self.allfiles = None self.files = set() # # Public API # def findall(self): """Find all files under the base and set ``allfiles`` to the absolute pathnames of files found. """ from stat import S_ISREG, S_ISDIR, S_ISLNK self.allfiles = allfiles = [] root = self.base stack = [root] pop = stack.pop push = stack.append while stack: root = pop() names = os.listdir(root) for name in names: fullname = os.path.join(root, name) # Avoid excess stat calls -- just one will do, thank you! stat = os.stat(fullname) mode = stat.st_mode if S_ISREG(mode): allfiles.append(fsdecode(fullname)) elif S_ISDIR(mode) and not S_ISLNK(mode): push(fullname) def add(self, item): """ Add a file to the manifest. :param item: The pathname to add. This can be relative to the base. """ if not item.startswith(self.prefix): item = os.path.join(self.base, item) self.files.add(os.path.normpath(item)) def add_many(self, items): """ Add a list of files to the manifest. :param items: The pathnames to add. These can be relative to the base. """ for item in items: self.add(item) def sorted(self, wantdirs=False): """ Return sorted files in directory order """ def add_dir(dirs, d): dirs.add(d) logger.debug('add_dir added %s', d) if d != self.base: parent, _ = os.path.split(d) assert parent not in ('', '/') add_dir(dirs, parent) result = set(self.files) # make a copy! if wantdirs: dirs = set() for f in result: add_dir(dirs, os.path.dirname(f)) result |= dirs return [os.path.join(*path_tuple) for path_tuple in sorted(os.path.split(path) for path in result)] def clear(self): """Clear all collected files.""" self.files = set() self.allfiles = [] def process_directive(self, directive): """ Process a directive which either adds some files from ``allfiles`` to ``files``, or removes some files from ``files``. :param directive: The directive to process. This should be in a format compatible with distutils ``MANIFEST.in`` files: http://docs.python.org/distutils/sourcedist.html#commands """ # Parse the line: split it up, make sure the right number of words # is there, and return the relevant words. 'action' is always # defined: it's the first word of the line. Which of the other # three are defined depends on the action; it'll be either # patterns, (dir and patterns), or (dirpattern). action, patterns, thedir, dirpattern = self._parse_directive(directive) # OK, now we know that the action is valid and we have the # right number of words on the line for that action -- so we # can proceed with minimal error-checking. if action == 'include': for pattern in patterns: if not self._include_pattern(pattern, anchor=True): logger.warning('no files found matching %r', pattern) elif action == 'exclude': for pattern in patterns: found = self._exclude_pattern(pattern, anchor=True) #if not found: # logger.warning('no previously-included files ' # 'found matching %r', pattern) elif action == 'global-include': for pattern in patterns: if not self._include_pattern(pattern, anchor=False): logger.warning('no files found matching %r ' 'anywhere in distribution', pattern) elif action == 'global-exclude': for pattern in patterns: found = self._exclude_pattern(pattern, anchor=False) #if not found: # logger.warning('no previously-included files ' # 'matching %r found anywhere in ' # 'distribution', pattern) elif action == 'recursive-include': for pattern in patterns: if not self._include_pattern(pattern, prefix=thedir): logger.warning('no files found matching %r ' 'under directory %r', pattern, thedir) elif action == 'recursive-exclude': for pattern in patterns: found = self._exclude_pattern(pattern, prefix=thedir) #if not found: # logger.warning('no previously-included files ' # 'matching %r found under directory %r', # pattern, thedir) elif action == 'graft': if not self._include_pattern(None, prefix=dirpattern): logger.warning('no directories found matching %r', dirpattern) elif action == 'prune': if not self._exclude_pattern(None, prefix=dirpattern): logger.warning('no previously-included directories found ' 'matching %r', dirpattern) else: # pragma: no cover # This should never happen, as it should be caught in # _parse_template_line raise DistlibException( 'invalid action %r' % action) # # Private API # def _parse_directive(self, directive): """ Validate a directive. :param directive: The directive to validate. :return: A tuple of action, patterns, thedir, dir_patterns """ words = directive.split() if len(words) == 1 and words[0] not in ('include', 'exclude', 'global-include', 'global-exclude', 'recursive-include', 'recursive-exclude', 'graft', 'prune'): # no action given, let's use the default 'include' words.insert(0, 'include') action = words[0] patterns = thedir = dir_pattern = None if action in ('include', 'exclude', 'global-include', 'global-exclude'): if len(words) < 2: raise DistlibException( '%r expects <pattern1> <pattern2> ...' % action) patterns = [convert_path(word) for word in words[1:]] elif action in ('recursive-include', 'recursive-exclude'): if len(words) < 3: raise DistlibException( '%r expects <dir> <pattern1> <pattern2> ...' % action) thedir = convert_path(words[1]) patterns = [convert_path(word) for word in words[2:]] elif action in ('graft', 'prune'): if len(words) != 2: raise DistlibException( '%r expects a single <dir_pattern>' % action) dir_pattern = convert_path(words[1]) else: raise DistlibException('unknown action %r' % action) return action, patterns, thedir, dir_pattern def _include_pattern(self, pattern, anchor=True, prefix=None, is_regex=False): """Select strings (presumably filenames) from 'self.files' that match 'pattern', a Unix-style wildcard (glob) pattern. Patterns are not quite the same as implemented by the 'fnmatch' module: '*' and '?' match non-special characters, where "special" is platform-dependent: slash on Unix; colon, slash, and backslash on DOS/Windows; and colon on Mac OS. If 'anchor' is true (the default), then the pattern match is more stringent: "*.py" will match "foo.py" but not "foo/bar.py". If 'anchor' is false, both of these will match. If 'prefix' is supplied, then only filenames starting with 'prefix' (itself a pattern) and ending with 'pattern', with anything in between them, will match. 'anchor' is ignored in this case. If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and 'pattern' is assumed to be either a string containing a regex or a regex object -- no translation is done, the regex is just compiled and used as-is. Selected strings will be added to self.files. Return True if files are found. """ # XXX docstring lying about what the special chars are? found = False pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) # delayed loading of allfiles list if self.allfiles is None: self.findall() for name in self.allfiles: if pattern_re.search(name): self.files.add(name) found = True return found def _exclude_pattern(self, pattern, anchor=True, prefix=None, is_regex=False): """Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions """ found = False pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) for f in list(self.files): if pattern_re.search(f): self.files.remove(f) found = True return found def _translate_pattern(self, pattern, anchor=True, prefix=None, is_regex=False): """Translate a shell-like wildcard pattern to a compiled regular expression. Return the compiled regex. If 'is_regex' true, then 'pattern' is directly compiled to a regex (if it's a string) or just returned as-is (assumes it's a regex object). """ if is_regex: if isinstance(pattern, str): return re.compile(pattern) else: return pattern if _PYTHON_VERSION > (3, 2): # ditch start and end characters start, _, end = self._glob_to_re('_').partition('_') if pattern: pattern_re = self._glob_to_re(pattern) if _PYTHON_VERSION > (3, 2): assert pattern_re.startswith(start) and pattern_re.endswith(end) else: pattern_re = '' base = re.escape(os.path.join(self.base, '')) if prefix is not None: # ditch end of pattern character if _PYTHON_VERSION <= (3, 2): empty_pattern = self._glob_to_re('') prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] else: prefix_re = self._glob_to_re(prefix) assert prefix_re.startswith(start) and prefix_re.endswith(end) prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] sep = os.sep if os.sep == '\\': sep = r'\\' if _PYTHON_VERSION <= (3, 2): pattern_re = '^' + base + sep.join((prefix_re, '.*' + pattern_re)) else: pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, pattern_re, end) else: # no prefix -- respect anchor flag if anchor: if _PYTHON_VERSION <= (3, 2): pattern_re = '^' + base + pattern_re else: pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):]) return re.compile(pattern_re) def _glob_to_re(self, pattern): """Translate a shell-like glob pattern to a regular expression. Return a string containing the regex. Differs from 'fnmatch.translate()' in that '*' does not match "special characters" (which are platform-specific). """ pattern_re = fnmatch.translate(pattern) # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, # and by extension they shouldn't match such "special characters" under # any OS. So change all non-escaped dots in the RE to match any # character except the special characters (currently: just os.sep). sep = os.sep if os.sep == '\\': # we're using a regex to manipulate a regex, so we need # to escape the backslash twice sep = r'\\\\' escaped = r'\1[^%s]' % sep pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re) return pattern_re
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/util.py
# # Copyright (C) 2012-2017 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # import codecs from collections import deque import contextlib import csv from glob import iglob as std_iglob import io import json import logging import os import py_compile import re import socket try: import ssl except ImportError: # pragma: no cover ssl = None import subprocess import sys import tarfile import tempfile import textwrap try: import threading except ImportError: # pragma: no cover import dummy_threading as threading import time from . import DistlibException from .compat import (string_types, text_type, shutil, raw_input, StringIO, cache_from_source, urlopen, urljoin, httplib, xmlrpclib, splittype, HTTPHandler, BaseConfigurator, valid_ident, Container, configparser, URLError, ZipFile, fsdecode, unquote, urlparse) logger = logging.getLogger(__name__) # # Requirement parsing code as per PEP 508 # IDENTIFIER = re.compile(r'^([\w\.-]+)\s*') VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*') COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*') MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*') OR = re.compile(r'^or\b\s*') AND = re.compile(r'^and\b\s*') NON_SPACE = re.compile(r'(\S+)\s*') STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)') def parse_marker(marker_string): """ Parse a marker string and return a dictionary containing a marker expression. The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in the expression grammar, or strings. A string contained in quotes is to be interpreted as a literal string, and a string not contained in quotes is a variable (such as os_name). """ def marker_var(remaining): # either identifier, or literal string m = IDENTIFIER.match(remaining) if m: result = m.groups()[0] remaining = remaining[m.end():] elif not remaining: raise SyntaxError('unexpected end of input') else: q = remaining[0] if q not in '\'"': raise SyntaxError('invalid expression: %s' % remaining) oq = '\'"'.replace(q, '') remaining = remaining[1:] parts = [q] while remaining: # either a string chunk, or oq, or q to terminate if remaining[0] == q: break elif remaining[0] == oq: parts.append(oq) remaining = remaining[1:] else: m = STRING_CHUNK.match(remaining) if not m: raise SyntaxError('error in string literal: %s' % remaining) parts.append(m.groups()[0]) remaining = remaining[m.end():] else: s = ''.join(parts) raise SyntaxError('unterminated string: %s' % s) parts.append(q) result = ''.join(parts) remaining = remaining[1:].lstrip() # skip past closing quote return result, remaining def marker_expr(remaining): if remaining and remaining[0] == '(': result, remaining = marker(remaining[1:].lstrip()) if remaining[0] != ')': raise SyntaxError('unterminated parenthesis: %s' % remaining) remaining = remaining[1:].lstrip() else: lhs, remaining = marker_var(remaining) while remaining: m = MARKER_OP.match(remaining) if not m: break op = m.groups()[0] remaining = remaining[m.end():] rhs, remaining = marker_var(remaining) lhs = {'op': op, 'lhs': lhs, 'rhs': rhs} result = lhs return result, remaining def marker_and(remaining): lhs, remaining = marker_expr(remaining) while remaining: m = AND.match(remaining) if not m: break remaining = remaining[m.end():] rhs, remaining = marker_expr(remaining) lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs} return lhs, remaining def marker(remaining): lhs, remaining = marker_and(remaining) while remaining: m = OR.match(remaining) if not m: break remaining = remaining[m.end():] rhs, remaining = marker_and(remaining) lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs} return lhs, remaining return marker(marker_string) def parse_requirement(req): """ Parse a requirement passed in as a string. Return a Container whose attributes contain the various parts of the requirement. """ remaining = req.strip() if not remaining or remaining.startswith('#'): return None m = IDENTIFIER.match(remaining) if not m: raise SyntaxError('name expected: %s' % remaining) distname = m.groups()[0] remaining = remaining[m.end():] extras = mark_expr = versions = uri = None if remaining and remaining[0] == '[': i = remaining.find(']', 1) if i < 0: raise SyntaxError('unterminated extra: %s' % remaining) s = remaining[1:i] remaining = remaining[i + 1:].lstrip() extras = [] while s: m = IDENTIFIER.match(s) if not m: raise SyntaxError('malformed extra: %s' % s) extras.append(m.groups()[0]) s = s[m.end():] if not s: break if s[0] != ',': raise SyntaxError('comma expected in extras: %s' % s) s = s[1:].lstrip() if not extras: extras = None if remaining: if remaining[0] == '@': # it's a URI remaining = remaining[1:].lstrip() m = NON_SPACE.match(remaining) if not m: raise SyntaxError('invalid URI: %s' % remaining) uri = m.groups()[0] t = urlparse(uri) # there are issues with Python and URL parsing, so this test # is a bit crude. See bpo-20271, bpo-23505. Python doesn't # always parse invalid URLs correctly - it should raise # exceptions for malformed URLs if not (t.scheme and t.netloc): raise SyntaxError('Invalid URL: %s' % uri) remaining = remaining[m.end():].lstrip() else: def get_versions(ver_remaining): """ Return a list of operator, version tuples if any are specified, else None. """ m = COMPARE_OP.match(ver_remaining) versions = None if m: versions = [] while True: op = m.groups()[0] ver_remaining = ver_remaining[m.end():] m = VERSION_IDENTIFIER.match(ver_remaining) if not m: raise SyntaxError('invalid version: %s' % ver_remaining) v = m.groups()[0] versions.append((op, v)) ver_remaining = ver_remaining[m.end():] if not ver_remaining or ver_remaining[0] != ',': break ver_remaining = ver_remaining[1:].lstrip() m = COMPARE_OP.match(ver_remaining) if not m: raise SyntaxError('invalid constraint: %s' % ver_remaining) if not versions: versions = None return versions, ver_remaining if remaining[0] != '(': versions, remaining = get_versions(remaining) else: i = remaining.find(')', 1) if i < 0: raise SyntaxError('unterminated parenthesis: %s' % remaining) s = remaining[1:i] remaining = remaining[i + 1:].lstrip() # As a special diversion from PEP 508, allow a version number # a.b.c in parentheses as a synonym for ~= a.b.c (because this # is allowed in earlier PEPs) if COMPARE_OP.match(s): versions, _ = get_versions(s) else: m = VERSION_IDENTIFIER.match(s) if not m: raise SyntaxError('invalid constraint: %s' % s) v = m.groups()[0] s = s[m.end():].lstrip() if s: raise SyntaxError('invalid constraint: %s' % s) versions = [('~=', v)] if remaining: if remaining[0] != ';': raise SyntaxError('invalid requirement: %s' % remaining) remaining = remaining[1:].lstrip() mark_expr, remaining = parse_marker(remaining) if remaining and remaining[0] != '#': raise SyntaxError('unexpected trailing data: %s' % remaining) if not versions: rs = distname else: rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions])) return Container(name=distname, extras=extras, constraints=versions, marker=mark_expr, url=uri, requirement=rs) def get_resources_dests(resources_root, rules): """Find destinations for resources files""" def get_rel_path(root, path): # normalizes and returns a lstripped-/-separated path root = root.replace(os.path.sep, '/') path = path.replace(os.path.sep, '/') assert path.startswith(root) return path[len(root):].lstrip('/') destinations = {} for base, suffix, dest in rules: prefix = os.path.join(resources_root, base) for abs_base in iglob(prefix): abs_glob = os.path.join(abs_base, suffix) for abs_path in iglob(abs_glob): resource_file = get_rel_path(resources_root, abs_path) if dest is None: # remove the entry if it was here destinations.pop(resource_file, None) else: rel_path = get_rel_path(abs_base, abs_path) rel_dest = dest.replace(os.path.sep, '/').rstrip('/') destinations[resource_file] = rel_dest + '/' + rel_path return destinations def in_venv(): if hasattr(sys, 'real_prefix'): # virtualenv venvs result = True else: # PEP 405 venvs result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) return result def get_executable(): # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as # changes to the stub launcher mean that sys.executable always points # to the stub on OS X # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' # in os.environ): # result = os.environ['__PYVENV_LAUNCHER__'] # else: # result = sys.executable # return result result = os.path.normcase(sys.executable) if not isinstance(result, text_type): result = fsdecode(result) return result def proceed(prompt, allowed_chars, error_prompt=None, default=None): p = prompt while True: s = raw_input(p) p = prompt if not s and default: s = default if s: c = s[0].lower() if c in allowed_chars: break if error_prompt: p = '%c: %s\n%s' % (c, error_prompt, prompt) return c def extract_by_key(d, keys): if isinstance(keys, string_types): keys = keys.split() result = {} for key in keys: if key in d: result[key] = d[key] return result def read_exports(stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) # Try to load as JSON, falling back on legacy format data = stream.read() stream = StringIO(data) try: jdata = json.load(stream) result = jdata['extensions']['python.exports']['exports'] for group, entries in result.items(): for k, v in entries.items(): s = '%s = %s' % (k, v) entry = get_export_entry(s) assert entry is not None entries[k] = entry return result except Exception: stream.seek(0, 0) def read_stream(cp, stream): if hasattr(cp, 'read_file'): cp.read_file(stream) else: cp.readfp(stream) cp = configparser.ConfigParser() try: read_stream(cp, stream) except configparser.MissingSectionHeaderError: stream.close() data = textwrap.dedent(data) stream = StringIO(data) read_stream(cp, stream) result = {} for key in cp.sections(): result[key] = entries = {} for name, value in cp.items(key): s = '%s = %s' % (name, value) entry = get_export_entry(s) assert entry is not None #entry.dist = self entries[name] = entry return result def write_exports(exports, stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getwriter('utf-8')(stream) cp = configparser.ConfigParser() for k, v in exports.items(): # TODO check k, v for valid values cp.add_section(k) for entry in v.values(): if entry.suffix is None: s = entry.prefix else: s = '%s:%s' % (entry.prefix, entry.suffix) if entry.flags: s = '%s [%s]' % (s, ', '.join(entry.flags)) cp.set(k, entry.name, s) cp.write(stream) @contextlib.contextmanager def tempdir(): td = tempfile.mkdtemp() try: yield td finally: shutil.rmtree(td) @contextlib.contextmanager def chdir(d): cwd = os.getcwd() try: os.chdir(d) yield finally: os.chdir(cwd) @contextlib.contextmanager def socket_timeout(seconds=15): cto = socket.getdefaulttimeout() try: socket.setdefaulttimeout(seconds) yield finally: socket.setdefaulttimeout(cto) class cached_property(object): def __init__(self, func): self.func = func #for attr in ('__name__', '__module__', '__doc__'): # setattr(self, attr, getattr(func, attr, None)) def __get__(self, obj, cls=None): if obj is None: return self value = self.func(obj) object.__setattr__(obj, self.func.__name__, value) #obj.__dict__[self.func.__name__] = value = self.func(obj) return value def convert_path(pathname): """Return 'pathname' as a name that will work on the native filesystem. The path is split on '/' and put back together again using the current directory separator. Needed because filenames in the setup script are always supplied in Unix style, and have to be converted to the local convention before we can actually use them in the filesystem. Raises ValueError on non-Unix-ish systems if 'pathname' either starts or ends with a slash. """ if os.sep == '/': return pathname if not pathname: return pathname if pathname[0] == '/': raise ValueError("path '%s' cannot be absolute" % pathname) if pathname[-1] == '/': raise ValueError("path '%s' cannot end with '/'" % pathname) paths = pathname.split('/') while os.curdir in paths: paths.remove(os.curdir) if not paths: return os.curdir return os.path.join(*paths) class FileOperator(object): def __init__(self, dry_run=False): self.dry_run = dry_run self.ensured = set() self._init_record() def _init_record(self): self.record = False self.files_written = set() self.dirs_created = set() def record_as_written(self, path): if self.record: self.files_written.add(path) def newer(self, source, target): """Tell if the target is newer than the source. Returns true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Returns false if both exist and 'target' is the same age or younger than 'source'. Raise PackagingFileError if 'source' does not exist. Note that this test is not very accurate: files created in the same second will have the same "age". """ if not os.path.exists(source): raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True return os.stat(source).st_mtime > os.stat(target).st_mtime def copy_file(self, infile, outfile, check=True): """Copy a file respecting dry-run and force flags. """ self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying %s to %s', infile, outfile) if not self.dry_run: msg = None if check: if os.path.islink(outfile): msg = '%s is a symlink' % outfile elif os.path.exists(outfile) and not os.path.isfile(outfile): msg = '%s is a non-regular file' % outfile if msg: raise ValueError(msg + ' which would be overwritten') shutil.copyfile(infile, outfile) self.record_as_written(outfile) def copy_stream(self, instream, outfile, encoding=None): assert not os.path.isdir(outfile) self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying stream %s to %s', instream, outfile) if not self.dry_run: if encoding is None: outstream = open(outfile, 'wb') else: outstream = codecs.open(outfile, 'w', encoding=encoding) try: shutil.copyfileobj(instream, outstream) finally: outstream.close() self.record_as_written(outfile) def write_binary_file(self, path, data): self.ensure_dir(os.path.dirname(path)) if not self.dry_run: if os.path.exists(path): os.remove(path) with open(path, 'wb') as f: f.write(data) self.record_as_written(path) def write_text_file(self, path, data, encoding): self.write_binary_file(path, data.encode(encoding)) def set_mode(self, bits, mask, files): if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): # Set the executable bits (owner, group, and world) on # all the files specified. for f in files: if self.dry_run: logger.info("changing mode of %s", f) else: mode = (os.stat(f).st_mode | bits) & mask logger.info("changing mode of %s to %o", f, mode) os.chmod(f, mode) set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) def ensure_dir(self, path): path = os.path.abspath(path) if path not in self.ensured and not os.path.exists(path): self.ensured.add(path) d, f = os.path.split(path) self.ensure_dir(d) logger.info('Creating %s' % path) if not self.dry_run: os.mkdir(path) if self.record: self.dirs_created.add(path) def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False): dpath = cache_from_source(path, not optimize) logger.info('Byte-compiling %s to %s', path, dpath) if not self.dry_run: if force or self.newer(path, dpath): if not prefix: diagpath = None else: assert path.startswith(prefix) diagpath = path[len(prefix):] compile_kwargs = {} if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'): compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error self.record_as_written(dpath) return dpath def ensure_removed(self, path): if os.path.exists(path): if os.path.isdir(path) and not os.path.islink(path): logger.debug('Removing directory tree at %s', path) if not self.dry_run: shutil.rmtree(path) if self.record: if path in self.dirs_created: self.dirs_created.remove(path) else: if os.path.islink(path): s = 'link' else: s = 'file' logger.debug('Removing %s %s', s, path) if not self.dry_run: os.remove(path) if self.record: if path in self.files_written: self.files_written.remove(path) def is_writable(self, path): result = False while not result: if os.path.exists(path): result = os.access(path, os.W_OK) break parent = os.path.dirname(path) if parent == path: break path = parent return result def commit(self): """ Commit recorded changes, turn off recording, return changes. """ assert self.record result = self.files_written, self.dirs_created self._init_record() return result def rollback(self): if not self.dry_run: for f in list(self.files_written): if os.path.exists(f): os.remove(f) # dirs should all be empty now, except perhaps for # __pycache__ subdirs # reverse so that subdirs appear before their parents dirs = sorted(self.dirs_created, reverse=True) for d in dirs: flist = os.listdir(d) if flist: assert flist == ['__pycache__'] sd = os.path.join(d, flist[0]) os.rmdir(sd) os.rmdir(d) # should fail if non-empty self._init_record() def resolve(module_name, dotted_path): if module_name in sys.modules: mod = sys.modules[module_name] else: mod = __import__(module_name) if dotted_path is None: result = mod else: parts = dotted_path.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result class ExportEntry(object): def __init__(self, name, prefix, suffix, flags): self.name = name self.prefix = prefix self.suffix = suffix self.flags = flags @cached_property def value(self): return resolve(self.prefix, self.suffix) def __repr__(self): # pragma: no cover return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix, self.suffix, self.flags) def __eq__(self, other): if not isinstance(other, ExportEntry): result = False else: result = (self.name == other.name and self.prefix == other.prefix and self.suffix == other.suffix and self.flags == other.flags) return result __hash__ = object.__hash__ ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+) \s*=\s*(?P<callable>(\w+)([:\.]\w+)*) \s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? ''', re.VERBOSE) def get_export_entry(specification): m = ENTRY_RE.search(specification) if not m: result = None if '[' in specification or ']' in specification: raise DistlibException("Invalid specification " "'%s'" % specification) else: d = m.groupdict() name = d['name'] path = d['callable'] colons = path.count(':') if colons == 0: prefix, suffix = path, None else: if colons != 1: raise DistlibException("Invalid specification " "'%s'" % specification) prefix, suffix = path.split(':') flags = d['flags'] if flags is None: if '[' in specification or ']' in specification: raise DistlibException("Invalid specification " "'%s'" % specification) flags = [] else: flags = [f.strip() for f in flags.split(',')] result = ExportEntry(name, prefix, suffix, flags) return result def get_cache_base(suffix=None): """ Return the default base location for distlib caches. If the directory does not exist, it is created. Use the suffix provided for the base directory, and default to '.distlib' if it isn't provided. On Windows, if LOCALAPPDATA is defined in the environment, then it is assumed to be a directory, and will be the parent directory of the result. On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home directory - using os.expanduser('~') - will be the parent directory of the result. The result is just the directory '.distlib' in the parent directory as determined above, or with the name specified with ``suffix``. """ if suffix is None: suffix = '.distlib' if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: result = os.path.expandvars('$localappdata') else: # Assume posix, or old Windows result = os.path.expanduser('~') # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if os.path.isdir(result): usable = os.access(result, os.W_OK) if not usable: logger.warning('Directory exists but is not writable: %s', result) else: try: os.makedirs(result) usable = True except OSError: logger.warning('Unable to create %s', result, exc_info=True) usable = False if not usable: result = tempfile.mkdtemp() logger.warning('Default location unusable, using %s', result) return os.path.join(result, suffix) def path_to_cache_dir(path): """ Convert an absolute path to a directory name for use in a cache. The algorithm used is: #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. #. Any occurrence of ``os.sep`` is replaced with ``'--'``. #. ``'.cache'`` is appended. """ d, p = os.path.splitdrive(os.path.abspath(path)) if d: d = d.replace(':', '---') p = p.replace(os.sep, '--') return d + p + '.cache' def ensure_slash(s): if not s.endswith('/'): return s + '/' return s def parse_credentials(netloc): username = password = None if '@' in netloc: prefix, netloc = netloc.rsplit('@', 1) if ':' not in prefix: username = prefix else: username, password = prefix.split(':', 1) if username: username = unquote(username) if password: password = unquote(password) return username, password, netloc def get_process_umask(): result = os.umask(0o22) os.umask(result) return result def is_string_sequence(seq): result = True i = None for i, s in enumerate(seq): if not isinstance(s, string_types): result = False break assert i is not None return result PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' '([a-z0-9_.+-]+)', re.I) PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') def split_filename(filename, project_name=None): """ Extract name, version, python version from a filename (no extension) Return name, version, pyver or None """ result = None pyver = None filename = unquote(filename).replace(' ', '-') m = PYTHON_VERSION.search(filename) if m: pyver = m.group(1) filename = filename[:m.start()] if project_name and len(filename) > len(project_name) + 1: m = re.match(re.escape(project_name) + r'\b', filename) if m: n = m.end() result = filename[:n], filename[n + 1:], pyver if result is None: m = PROJECT_NAME_AND_VERSION.match(filename) if m: result = m.group(1), m.group(3), pyver return result # Allow spaces in name because of legacy dists like "Twisted Core" NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*' r'\(\s*(?P<ver>[^\s)]+)\)$') def parse_name_and_version(p): """ A utility method used to get name and version from a string. From e.g. a Provides-Dist value. :param p: A value in a form 'foo (1.0)' :return: The name and version as a tuple. """ m = NAME_VERSION_RE.match(p) if not m: raise DistlibException('Ill-formed name/version string: \'%s\'' % p) d = m.groupdict() return d['name'].strip().lower(), d['ver'] def get_extras(requested, available): result = set() requested = set(requested or []) available = set(available or []) if '*' in requested: requested.remove('*') result |= available for r in requested: if r == '-': result.add(r) elif r.startswith('-'): unwanted = r[1:] if unwanted not in available: logger.warning('undeclared extra: %s' % unwanted) if unwanted in result: result.remove(unwanted) else: if r not in available: logger.warning('undeclared extra: %s' % r) result.add(r) return result # # Extended metadata functionality # def _get_external_data(url): result = {} try: # urlopen might fail if it runs into redirections, # because of Python issue #13696. Fixed in locators # using a custom redirect handler. resp = urlopen(url) headers = resp.info() ct = headers.get('Content-Type') if not ct.startswith('application/json'): logger.debug('Unexpected response for JSON request: %s', ct) else: reader = codecs.getreader('utf-8')(resp) #data = reader.read().decode('utf-8') #result = json.loads(data) result = json.load(reader) except Exception as e: logger.exception('Failed to get external data for %s: %s', url, e) return result _external_data_base_url = 'https://www.red-dove.com/pypi/projects/' def get_project_data(name): url = '%s/%s/project.json' % (name[0].upper(), name) url = urljoin(_external_data_base_url, url) result = _get_external_data(url) return result def get_package_data(name, version): url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) url = urljoin(_external_data_base_url, url) return _get_external_data(url) class Cache(object): """ A class implementing a cache for resources that need to live in the file system e.g. shared libraries. This class was moved from resources to here because it could be used by other modules, e.g. the wheel module. """ def __init__(self, base): """ Initialise an instance. :param base: The base directory where the cache should be located. """ # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if not os.path.isdir(base): # pragma: no cover os.makedirs(base) if (os.stat(base).st_mode & 0o77) != 0: logger.warning('Directory \'%s\' is not private', base) self.base = os.path.abspath(os.path.normpath(base)) def prefix_to_dir(self, prefix): """ Converts a resource prefix to a directory name in the cache. """ return path_to_cache_dir(prefix) def clear(self): """ Clear the cache. """ not_removed = [] for fn in os.listdir(self.base): fn = os.path.join(self.base, fn) try: if os.path.islink(fn) or os.path.isfile(fn): os.remove(fn) elif os.path.isdir(fn): shutil.rmtree(fn) except Exception: not_removed.append(fn) return not_removed class EventMixin(object): """ A very simple publish/subscribe system. """ def __init__(self): self._subscribers = {} def add(self, event, subscriber, append=True): """ Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event. """ subs = self._subscribers if event not in subs: subs[event] = deque([subscriber]) else: sq = subs[event] if append: sq.append(subscriber) else: sq.appendleft(subscriber) def remove(self, event, subscriber): """ Remove a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be removed. """ subs = self._subscribers if event not in subs: raise ValueError('No subscribers: %r' % event) subs[event].remove(subscriber) def get_subscribers(self, event): """ Return an iterator for the subscribers for an event. :param event: The event to return subscribers for. """ return iter(self._subscribers.get(event, ())) def publish(self, event, *args, **kwargs): """ Publish a event and return a list of values returned by its subscribers. :param event: The event to publish. :param args: The positional arguments to pass to the event's subscribers. :param kwargs: The keyword arguments to pass to the event's subscribers. """ result = [] for subscriber in self.get_subscribers(event): try: value = subscriber(event, *args, **kwargs) except Exception: logger.exception('Exception during event publication') value = None result.append(value) logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result) return result # # Simple sequencing # class Sequencer(object): def __init__(self): self._preds = {} self._succs = {} self._nodes = set() # nodes with no preds/succs def add_node(self, node): self._nodes.add(node) def remove_node(self, node, edges=False): if node in self._nodes: self._nodes.remove(node) if edges: for p in set(self._preds.get(node, ())): self.remove(p, node) for s in set(self._succs.get(node, ())): self.remove(node, s) # Remove empties for k, v in list(self._preds.items()): if not v: del self._preds[k] for k, v in list(self._succs.items()): if not v: del self._succs[k] def add(self, pred, succ): assert pred != succ self._preds.setdefault(succ, set()).add(pred) self._succs.setdefault(pred, set()).add(succ) def remove(self, pred, succ): assert pred != succ try: preds = self._preds[succ] succs = self._succs[pred] except KeyError: # pragma: no cover raise ValueError('%r not a successor of anything' % succ) try: preds.remove(pred) succs.remove(succ) except KeyError: # pragma: no cover raise ValueError('%r not a successor of %r' % (succ, pred)) def is_step(self, step): return (step in self._preds or step in self._succs or step in self._nodes) def get_steps(self, final): if not self.is_step(final): raise ValueError('Unknown: %r' % final) result = [] todo = [] seen = set() todo.append(final) while todo: step = todo.pop(0) if step in seen: # if a step was already seen, # move it to the end (so it will appear earlier # when reversed on return) ... but not for the # final step, as that would be confusing for # users if step != final: result.remove(step) result.append(step) else: seen.add(step) result.append(step) preds = self._preds.get(step, ()) todo.extend(preds) return reversed(result) @property def strong_connections(self): #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm index_counter = [0] stack = [] lowlinks = {} index = {} result = [] graph = self._succs def strongconnect(node): # set the depth index for this node to the smallest unused index index[node] = index_counter[0] lowlinks[node] = index_counter[0] index_counter[0] += 1 stack.append(node) # Consider successors try: successors = graph[node] except Exception: successors = [] for successor in successors: if successor not in lowlinks: # Successor has not yet been visited strongconnect(successor) lowlinks[node] = min(lowlinks[node],lowlinks[successor]) elif successor in stack: # the successor is in the stack and hence in the current # strongly connected component (SCC) lowlinks[node] = min(lowlinks[node],index[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowlinks[node] == index[node]: connected_component = [] while True: successor = stack.pop() connected_component.append(successor) if successor == node: break component = tuple(connected_component) # storing the result result.append(component) for node in graph: if node not in lowlinks: strongconnect(node) return result @property def dot(self): result = ['digraph G {'] for succ in self._preds: preds = self._preds[succ] for pred in preds: result.append(' %s -> %s;' % (pred, succ)) for node in self._nodes: result.append(' %s;' % node) result.append('}') return '\n'.join(result) # # Unarchiving functionality for zip, tar, tgz, tbz, whl # ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz', '.whl') def unarchive(archive_filename, dest_dir, format=None, check=True): def check_path(path): if not isinstance(path, text_type): path = path.decode('utf-8') p = os.path.abspath(os.path.join(dest_dir, path)) if not p.startswith(dest_dir) or p[plen] != os.sep: raise ValueError('path outside destination: %r' % p) dest_dir = os.path.abspath(dest_dir) plen = len(dest_dir) archive = None if format is None: if archive_filename.endswith(('.zip', '.whl')): format = 'zip' elif archive_filename.endswith(('.tar.gz', '.tgz')): format = 'tgz' mode = 'r:gz' elif archive_filename.endswith(('.tar.bz2', '.tbz')): format = 'tbz' mode = 'r:bz2' elif archive_filename.endswith('.tar'): format = 'tar' mode = 'r' else: # pragma: no cover raise ValueError('Unknown format for %r' % archive_filename) try: if format == 'zip': archive = ZipFile(archive_filename, 'r') if check: names = archive.namelist() for name in names: check_path(name) else: archive = tarfile.open(archive_filename, mode) if check: names = archive.getnames() for name in names: check_path(name) if format != 'zip' and sys.version_info[0] < 3: # See Python issue 17153. If the dest path contains Unicode, # tarfile extraction fails on Python 2.x if a member path name # contains non-ASCII characters - it leads to an implicit # bytes -> unicode conversion using ASCII to decode. for tarinfo in archive.getmembers(): if not isinstance(tarinfo.name, text_type): tarinfo.name = tarinfo.name.decode('utf-8') archive.extractall(dest_dir) finally: if archive: archive.close() def zip_dir(directory): """zip a directory tree into a BytesIO object""" result = io.BytesIO() dlen = len(directory) with ZipFile(result, "w") as zf: for root, dirs, files in os.walk(directory): for name in files: full = os.path.join(root, name) rel = root[dlen:] dest = os.path.join(rel, name) zf.write(full, dest) return result # # Simple progress bar # UNITS = ('', 'K', 'M', 'G','T','P') class Progress(object): unknown = 'UNKNOWN' def __init__(self, minval=0, maxval=100): assert maxval is None or maxval >= minval self.min = self.cur = minval self.max = maxval self.started = None self.elapsed = 0 self.done = False def update(self, curval): assert self.min <= curval assert self.max is None or curval <= self.max self.cur = curval now = time.time() if self.started is None: self.started = now else: self.elapsed = now - self.started def increment(self, incr): assert incr >= 0 self.update(self.cur + incr) def start(self): self.update(self.min) return self def stop(self): if self.max is not None: self.update(self.max) self.done = True @property def maximum(self): return self.unknown if self.max is None else self.max @property def percentage(self): if self.done: result = '100 %' elif self.max is None: result = ' ?? %' else: v = 100.0 * (self.cur - self.min) / (self.max - self.min) result = '%3d %%' % v return result def format_duration(self, duration): if (duration <= 0) and self.max is None or self.cur == self.min: result = '??:??:??' #elif duration < 1: # result = '--:--:--' else: result = time.strftime('%H:%M:%S', time.gmtime(duration)) return result @property def ETA(self): if self.done: prefix = 'Done' t = self.elapsed #import pdb; pdb.set_trace() else: prefix = 'ETA ' if self.max is None: t = -1 elif self.elapsed == 0 or (self.cur == self.min): t = 0 else: #import pdb; pdb.set_trace() t = float(self.max - self.min) t /= self.cur - self.min t = (t - 1) * self.elapsed return '%s: %s' % (prefix, self.format_duration(t)) @property def speed(self): if self.elapsed == 0: result = 0.0 else: result = (self.cur - self.min) / self.elapsed for unit in UNITS: if result < 1000: break result /= 1000.0 return '%d %sB/s' % (result, unit) # # Glob functionality # RICH_GLOB = re.compile(r'\{([^}]*)\}') _CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') _CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') def iglob(path_glob): """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" if _CHECK_RECURSIVE_GLOB.search(path_glob): msg = """invalid glob %r: recursive glob "**" must be used alone""" raise ValueError(msg % path_glob) if _CHECK_MISMATCH_SET.search(path_glob): msg = """invalid glob %r: mismatching set marker '{' or '}'""" raise ValueError(msg % path_glob) return _iglob(path_glob) def _iglob(path_glob): rich_path_glob = RICH_GLOB.split(path_glob, 1) if len(rich_path_glob) > 1: assert len(rich_path_glob) == 3, rich_path_glob prefix, set, suffix = rich_path_glob for item in set.split(','): for path in _iglob(''.join((prefix, item, suffix))): yield path else: if '**' not in path_glob: for item in std_iglob(path_glob): yield item else: prefix, radical = path_glob.split('**', 1) if prefix == '': prefix = '.' if radical == '': radical = '*' else: # we support both radical = radical.lstrip('/') radical = radical.lstrip('\\') for path, dir, files in os.walk(prefix): path = os.path.normpath(path) for fn in _iglob(os.path.join(path, radical)): yield fn if ssl: from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, CertificateError) # # HTTPSConnection which verifies certificates/matches domains # class HTTPSConnection(httplib.HTTPSConnection): ca_certs = None # set this to the path to the certs file (.pem) check_domain = True # only used if ca_certs is not None # noinspection PyPropertyAccess def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() if not hasattr(ssl, 'SSLContext'): # For 2.x if self.ca_certs: cert_reqs = ssl.CERT_REQUIRED else: cert_reqs = ssl.CERT_NONE self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=cert_reqs, ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=self.ca_certs) else: # pragma: no cover context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if hasattr(ssl, 'OP_NO_SSLv2'): context.options |= ssl.OP_NO_SSLv2 if self.cert_file: context.load_cert_chain(self.cert_file, self.key_file) kwargs = {} if self.ca_certs: context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(cafile=self.ca_certs) if getattr(ssl, 'HAS_SNI', False): kwargs['server_hostname'] = self.host self.sock = context.wrap_socket(sock, **kwargs) if self.ca_certs and self.check_domain: try: match_hostname(self.sock.getpeercert(), self.host) logger.debug('Host verified: %s', self.host) except CertificateError: # pragma: no cover self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() raise class HTTPSHandler(BaseHTTPSHandler): def __init__(self, ca_certs, check_domain=True): BaseHTTPSHandler.__init__(self) self.ca_certs = ca_certs self.check_domain = check_domain def _conn_maker(self, *args, **kwargs): """ This is called to create a connection instance. Normally you'd pass a connection class to do_open, but it doesn't actually check for a class, and just expects a callable. As long as we behave just as a constructor would have, we should be OK. If it ever changes so that we *must* pass a class, we'll create an UnsafeHTTPSConnection class which just sets check_domain to False in the class definition, and choose which one to pass to do_open. """ result = HTTPSConnection(*args, **kwargs) if self.ca_certs: result.ca_certs = self.ca_certs result.check_domain = self.check_domain return result def https_open(self, req): try: return self.do_open(self._conn_maker, req) except URLError as e: if 'certificate verify failed' in str(e.reason): raise CertificateError('Unable to verify server certificate ' 'for %s' % req.host) else: raise # # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- # Middle proxy using HTTP listens on port 443, or an index mistakenly serves # HTML containing a http://xyz link when it should be https://xyz), # you can use the following handler class, which does not allow HTTP traffic. # # It works by inheriting from HTTPHandler - so build_opener won't add a # handler for HTTP itself. # class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): def http_open(self, req): raise URLError('Unexpected HTTP request on what should be a secure ' 'connection: %s' % req) # # XML-RPC with timeouts # _ver_info = sys.version_info[:2] if _ver_info == (2, 6): class HTTP(httplib.HTTP): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) if ssl: class HTTPS(httplib.HTTPS): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) class Transport(xmlrpclib.Transport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.Transport.__init__(self, use_datetime) def make_connection(self, host): h, eh, x509 = self.get_host_info(host) if _ver_info == (2, 6): result = HTTP(h, timeout=self.timeout) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPConnection(h) result = self._connection[1] return result if ssl: class SafeTransport(xmlrpclib.SafeTransport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.SafeTransport.__init__(self, use_datetime) def make_connection(self, host): h, eh, kwargs = self.get_host_info(host) if not kwargs: kwargs = {} kwargs['timeout'] = self.timeout if _ver_info == (2, 6): result = HTTPS(host, None, **kwargs) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPSConnection(h, None, **kwargs) result = self._connection[1] return result class ServerProxy(xmlrpclib.ServerProxy): def __init__(self, uri, **kwargs): self.timeout = timeout = kwargs.pop('timeout', None) # The above classes only come into play if a timeout # is specified if timeout is not None: scheme, _ = splittype(uri) use_datetime = kwargs.get('use_datetime', 0) if scheme == 'https': tcls = SafeTransport else: tcls = Transport kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) self.transport = t xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) # # CSV functionality. This is provided because on 2.x, the csv module can't # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. # def _csv_open(fn, mode, **kwargs): if sys.version_info[0] < 3: mode += 'b' else: kwargs['newline'] = '' # Python 3 determines encoding from locale. Force 'utf-8' # file encoding to match other forced utf-8 encoding kwargs['encoding'] = 'utf-8' return open(fn, mode, **kwargs) class CSVBase(object): defaults = { 'delimiter': str(','), # The strs are used because we need native 'quotechar': str('"'), # str in the csv API (2.x won't take 'lineterminator': str('\n') # Unicode) } def __enter__(self): return self def __exit__(self, *exc_info): self.stream.close() class CSVReader(CSVBase): def __init__(self, **kwargs): if 'stream' in kwargs: stream = kwargs['stream'] if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) self.stream = stream else: self.stream = _csv_open(kwargs['path'], 'r') self.reader = csv.reader(self.stream, **self.defaults) def __iter__(self): return self def next(self): result = next(self.reader) if sys.version_info[0] < 3: for i, item in enumerate(result): if not isinstance(item, text_type): result[i] = item.decode('utf-8') return result __next__ = next class CSVWriter(CSVBase): def __init__(self, fn, **kwargs): self.stream = _csv_open(fn, 'w') self.writer = csv.writer(self.stream, **self.defaults) def writerow(self, row): if sys.version_info[0] < 3: r = [] for item in row: if isinstance(item, text_type): item = item.encode('utf-8') r.append(item) row = r self.writer.writerow(row) # # Configurator functionality # class Configurator(BaseConfigurator): value_converters = dict(BaseConfigurator.value_converters) value_converters['inc'] = 'inc_convert' def __init__(self, config, base=None): super(Configurator, self).__init__(config) self.base = base or os.getcwd() def configure_custom(self, config): def convert(o): if isinstance(o, (list, tuple)): result = type(o)([convert(i) for i in o]) elif isinstance(o, dict): if '()' in o: result = self.configure_custom(o) else: result = {} for k in o: result[k] = convert(o[k]) else: result = self.convert(o) return result c = config.pop('()') if not callable(c): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers args = config.pop('[]', ()) if args: args = tuple([convert(o) for o in args]) items = [(k, convert(config[k])) for k in config if valid_ident(k)] kwargs = dict(items) result = c(*args, **kwargs) if props: for n, v in props.items(): setattr(result, n, convert(v)) return result def __getitem__(self, key): result = self.config[key] if isinstance(result, dict) and '()' in result: self.config[key] = result = self.configure_custom(result) return result def inc_convert(self, value): """Default converter for the inc:// protocol.""" if not os.path.isabs(value): value = os.path.join(self.base, value) with codecs.open(value, 'r', encoding='utf-8') as f: result = json.load(f) return result class SubprocessMixin(object): """ Mixin for running subprocesses and capturing their output """ def __init__(self, verbose=False, progress=None): self.verbose = verbose self.progress = progress def reader(self, stream, context): """ Read lines from a subprocess' output stream and either pass to a progress callable (if specified) or write progress information to sys.stderr. """ progress = self.progress verbose = self.verbose while True: s = stream.readline() if not s: break if progress is not None: progress(s, context) else: if not verbose: sys.stderr.write('.') else: sys.stderr.write(s.decode('utf-8')) sys.stderr.flush() stream.close() def run_command(self, cmd, **kwargs): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) t1.start() t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) t2.start() p.wait() t1.join() t2.join() if self.progress is not None: self.progress('done.', 'main') elif self.verbose: sys.stderr.write('done.\n') return p def normalize_name(name): """Normalize a python package name a la PEP 503""" # https://www.python.org/dev/peps/pep-0503/#normalized-names return re.sub('[-_.]+', '-', name).lower()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/database.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2017 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """PEP 376 implementation.""" from __future__ import unicode_literals import base64 import codecs import contextlib import hashlib import logging import os import posixpath import sys import zipimport from . import DistlibException, resources from .compat import StringIO from .version import get_scheme, UnsupportedVersionError from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME) from .util import (parse_requirement, cached_property, parse_name_and_version, read_exports, write_exports, CSVReader, CSVWriter) __all__ = ['Distribution', 'BaseInstalledDistribution', 'InstalledDistribution', 'EggInfoDistribution', 'DistributionPath'] logger = logging.getLogger(__name__) EXPORTS_FILENAME = 'pydist-exports.json' COMMANDS_FILENAME = 'pydist-commands.json' DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', 'RESOURCES', EXPORTS_FILENAME, 'SHARED') DISTINFO_EXT = '.dist-info' class _Cache(object): """ A simple cache mapping names and .dist-info paths to distributions """ def __init__(self): """ Initialise an instance. There is normally one for each DistributionPath. """ self.name = {} self.path = {} self.generated = False def clear(self): """ Clear the cache, setting it to its initial state. """ self.name.clear() self.path.clear() self.generated = False def add(self, dist): """ Add a distribution to the cache. :param dist: The distribution to add. """ if dist.path not in self.path: self.path[dist.path] = dist self.name.setdefault(dist.key, []).append(dist) class DistributionPath(object): """ Represents a set of distributions installed on a path (typically sys.path). """ def __init__(self, path=None, include_egg=False): """ Create an instance from a path, optionally including legacy (distutils/ setuptools/distribute) distributions. :param path: The path to use, as a list of directories. If not specified, sys.path is used. :param include_egg: If True, this instance will look for and return legacy distributions as well as those based on PEP 376. """ if path is None: path = sys.path self.path = path self._include_dist = True self._include_egg = include_egg self._cache = _Cache() self._cache_egg = _Cache() self._cache_enabled = True self._scheme = get_scheme('default') def _get_cache_enabled(self): return self._cache_enabled def _set_cache_enabled(self, value): self._cache_enabled = value cache_enabled = property(_get_cache_enabled, _set_cache_enabled) def clear_cache(self): """ Clears the internal cache. """ self._cache.clear() self._cache_egg.clear() def _yield_distributions(self): """ Yield .dist-info and/or .egg(-info) distributions. """ # We need to check if we've seen some resources already, because on # some Linux systems (e.g. some Debian/Ubuntu variants) there are # symlinks which alias other files in the environment. seen = set() for path in self.path: finder = resources.finder_for_path(path) if finder is None: continue r = finder.find('') if not r or not r.is_container: continue rset = sorted(r.resources) for entry in rset: r = finder.find(entry) if not r or r.path in seen: continue if self._include_dist and entry.endswith(DISTINFO_EXT): possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME] for metadata_filename in possible_filenames: metadata_path = posixpath.join(entry, metadata_filename) pydist = finder.find(metadata_path) if pydist: break else: continue with contextlib.closing(pydist.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme='legacy') logger.debug('Found %s', r.path) seen.add(r.path) yield new_dist_class(r.path, metadata=metadata, env=self) elif self._include_egg and entry.endswith(('.egg-info', '.egg')): logger.debug('Found %s', r.path) seen.add(r.path) yield old_dist_class(r.path, self) def _generate_cache(self): """ Scan the path for distributions and populate the cache with those that are found. """ gen_dist = not self._cache.generated gen_egg = self._include_egg and not self._cache_egg.generated if gen_dist or gen_egg: for dist in self._yield_distributions(): if isinstance(dist, InstalledDistribution): self._cache.add(dist) else: self._cache_egg.add(dist) if gen_dist: self._cache.generated = True if gen_egg: self._cache_egg.generated = True @classmethod def distinfo_dirname(cls, name, version): """ The *name* and *version* parameters are converted into their filename-escaped form, i.e. any ``'-'`` characters are replaced with ``'_'`` other than the one in ``'dist-info'`` and the one separating the name from the version number. :parameter name: is converted to a standard distribution name by replacing any runs of non- alphanumeric characters with a single ``'-'``. :type name: string :parameter version: is converted to a standard version string. Spaces become dots, and all other non-alphanumeric characters (except dots) become dashes, with runs of multiple dashes condensed to a single dash. :type version: string :returns: directory name :rtype: string""" name = name.replace('-', '_') return '-'.join([name, version]) + DISTINFO_EXT def get_distributions(self): """ Provides an iterator that looks for distributions and returns :class:`InstalledDistribution` or :class:`EggInfoDistribution` instances for each one of them. :rtype: iterator of :class:`InstalledDistribution` and :class:`EggInfoDistribution` instances """ if not self._cache_enabled: for dist in self._yield_distributions(): yield dist else: self._generate_cache() for dist in self._cache.path.values(): yield dist if self._include_egg: for dist in self._cache_egg.path.values(): yield dist def get_distribution(self, name): """ Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None`` """ result = None name = name.lower() if not self._cache_enabled: for dist in self._yield_distributions(): if dist.key == name: result = dist break else: self._generate_cache() if name in self._cache.name: result = self._cache.name[name][0] elif self._include_egg and name in self._cache_egg.name: result = self._cache_egg.name[name][0] return result def provides_distribution(self, name, version=None): """ Iterates over all distributions to find which distributions provide *name*. If a *version* is provided, it will be used to filter the results. This function only returns the first result found, since no more than one values are expected. If the directory is not found, returns ``None``. :parameter version: a version specifier that indicates the version required, conforming to the format in ``PEP-345`` :type name: string :type version: string """ matcher = None if version is not None: try: matcher = self._scheme.matcher('%s (%s)' % (name, version)) except ValueError: raise DistlibException('invalid name or version: %r, %r' % (name, version)) for dist in self.get_distributions(): # We hit a problem on Travis where enum34 was installed and doesn't # have a provides attribute ... if not hasattr(dist, 'provides'): logger.debug('No "provides": %s', dist) else: provided = dist.provides for p in provided: p_name, p_ver = parse_name_and_version(p) if matcher is None: if p_name == name: yield dist break else: if p_name == name and matcher.match(p_ver): yield dist break def get_file_path(self, name, relative_path): """ Return the path to a resource file. """ dist = self.get_distribution(name) if dist is None: raise LookupError('no distribution named %r found' % name) return dist.get_resource_path(relative_path) def get_exported_entries(self, category, name=None): """ Return all of the exported entries in a particular category. :param category: The category to search for entries. :param name: If specified, only entries with that name are returned. """ for dist in self.get_distributions(): r = dist.exports if category in r: d = r[category] if name is not None: if name in d: yield d[name] else: for v in d.values(): yield v class Distribution(object): """ A base class for distributions, whether installed or from indexes. Either way, it must have some metadata, so that's all that's needed for construction. """ build_time_dependency = False """ Set to True if it's known to be only a build-time dependency (i.e. not needed after installation). """ requested = False """A boolean that indicates whether the ``REQUESTED`` metadata file is present (in other words, whether the package was installed by user request or it was installed as a dependency).""" def __init__(self, metadata): """ Initialise an instance. :param metadata: The instance of :class:`Metadata` describing this distribution. """ self.metadata = metadata self.name = metadata.name self.key = self.name.lower() # for case-insensitive comparisons self.version = metadata.version self.locator = None self.digest = None self.extras = None # additional features requested self.context = None # environment marker overrides self.download_urls = set() self.digests = {} @property def source_url(self): """ The source archive download URL for this distribution. """ return self.metadata.source_url download_url = source_url # Backward compatibility @property def name_and_version(self): """ A utility property which displays the name and version in parentheses. """ return '%s (%s)' % (self.name, self.version) @property def provides(self): """ A set of distribution names and versions provided by this distribution. :return: A set of "name (version)" strings. """ plist = self.metadata.provides s = '%s (%s)' % (self.name, self.version) if s not in plist: plist.append(s) return plist def _get_requirements(self, req_attr): md = self.metadata logger.debug('Getting requirements from metadata %r', md.todict()) reqts = getattr(md, req_attr) return set(md.get_requirements(reqts, extras=self.extras, env=self.context)) @property def run_requires(self): return self._get_requirements('run_requires') @property def meta_requires(self): return self._get_requirements('meta_requires') @property def build_requires(self): return self._get_requirements('build_requires') @property def test_requires(self): return self._get_requirements('test_requires') @property def dev_requires(self): return self._get_requirements('dev_requires') def matches_requirement(self, req): """ Say if this instance matches (fulfills) a requirement. :param req: The requirement to match. :rtype req: str :return: True if it matches, else False. """ # Requirement may contain extras - parse to lose those # from what's passed to the matcher r = parse_requirement(req) scheme = get_scheme(self.metadata.scheme) try: matcher = scheme.matcher(r.requirement) except UnsupportedVersionError: # XXX compat-mode if cannot read the version logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) name = matcher.key # case-insensitive result = False for p in self.provides: p_name, p_ver = parse_name_and_version(p) if p_name != name: continue try: result = matcher.match(p_ver) break except UnsupportedVersionError: pass return result def __repr__(self): """ Return a textual representation of this instance, """ if self.source_url: suffix = ' [%s]' % self.source_url else: suffix = '' return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix) def __eq__(self, other): """ See if this distribution is the same as another. :param other: The distribution to compare with. To be equal to one another. distributions must have the same type, name, version and source_url. :return: True if it is the same, else False. """ if type(other) is not type(self): result = False else: result = (self.name == other.name and self.version == other.version and self.source_url == other.source_url) return result def __hash__(self): """ Compute hash in a way which matches the equality test. """ return hash(self.name) + hash(self.version) + hash(self.source_url) class BaseInstalledDistribution(Distribution): """ This is the base class for installed distributions (whether PEP 376 or legacy). """ hasher = None def __init__(self, metadata, path, env=None): """ Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found. """ super(BaseInstalledDistribution, self).__init__(metadata) self.path = path self.dist_path = env def get_hash(self, data, hasher=None): """ Get the hash of some data, using a particular hash algorithm, if specified. :param data: The data to be hashed. :type data: bytes :param hasher: The name of a hash implementation, supported by hashlib, or ``None``. Examples of valid values are ``'sha1'``, ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and ``'sha512'``. If no hasher is specified, the ``hasher`` attribute of the :class:`InstalledDistribution` instance is used. If the hasher is determined to be ``None``, MD5 is used as the hashing algorithm. :returns: The hash of the data. If a hasher was explicitly specified, the returned hash will be prefixed with the specified hasher followed by '='. :rtype: str """ if hasher is None: hasher = self.hasher if hasher is None: hasher = hashlib.md5 prefix = '' else: hasher = getattr(hashlib, hasher) prefix = '%s=' % self.hasher digest = hasher(data).digest() digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii') return '%s%s' % (prefix, digest) class InstalledDistribution(BaseInstalledDistribution): """ Created with the *path* of the ``.dist-info`` directory provided to the constructor. It reads the metadata contained in ``pydist.json`` when it is instantiated., or uses a passed in Metadata instance (useful for when dry-run mode is being used). """ hasher = 'sha256' def __init__(self, path, metadata=None, env=None): self.modules = [] self.finder = finder = resources.finder_for_path(path) if finder is None: raise ValueError('finder unavailable for %s' % path) if env and env._cache_enabled and path in env._cache.path: metadata = env._cache.path[path].metadata elif metadata is None: r = finder.find(METADATA_FILENAME) # Temporary - for Wheel 0.23 support if r is None: r = finder.find(WHEEL_METADATA_FILENAME) # Temporary - for legacy support if r is None: r = finder.find(LEGACY_METADATA_FILENAME) if r is None: raise ValueError('no %s found in %s' % (METADATA_FILENAME, path)) with contextlib.closing(r.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme='legacy') super(InstalledDistribution, self).__init__(metadata, path, env) if env and env._cache_enabled: env._cache.add(self) r = finder.find('REQUESTED') self.requested = r is not None p = os.path.join(path, 'top_level.txt') if os.path.exists(p): with open(p, 'rb') as f: data = f.read().decode('utf-8') self.modules = data.splitlines() def __repr__(self): return '<InstalledDistribution %r %s at %r>' % ( self.name, self.version, self.path) def __str__(self): return "%s %s" % (self.name, self.version) def _get_records(self): """ Get the list of installed files for the distribution :return: A list of tuples of path, hash and size. Note that hash and size might be ``None`` for some entries. The path is exactly as stored in the file (which is as in PEP 376). """ results = [] r = self.get_distinfo_resource('RECORD') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as record_reader: # Base location is parent dir of .dist-info dir #base_location = os.path.dirname(self.path) #base_location = os.path.abspath(base_location) for row in record_reader: missing = [None for i in range(len(row), 3)] path, checksum, size = row + missing #if not os.path.isabs(path): # path = path.replace('/', os.sep) # path = os.path.join(base_location, path) results.append((path, checksum, size)) return results @cached_property def exports(self): """ Return the information exported by this distribution. :return: A dictionary of exports, mapping an export category to a dict of :class:`ExportEntry` instances describing the individual export entries, and keyed by name. """ result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: result = self.read_exports() return result def read_exports(self): """ Read exports data from a file in .ini format. :return: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ result = {} r = self.get_distinfo_resource(EXPORTS_FILENAME) if r: with contextlib.closing(r.as_stream()) as stream: result = read_exports(stream) return result def write_exports(self, exports): """ Write a dictionary of exports to a file in .ini format. :param exports: A dictionary of exports, mapping an export category to a list of :class:`ExportEntry` instances describing the individual export entries. """ rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f) def get_resource_path(self, relative_path): """ NOTE: This API may change in the future. Return the absolute path to a resource file with the given relative path. :param relative_path: The path, relative to .dist-info, of the resource of interest. :return: The absolute path where the resource is to be found. """ r = self.get_distinfo_resource('RESOURCES') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as resources_reader: for relative, destination in resources_reader: if relative == relative_path: return destination raise KeyError('no resource file with relative path %r ' 'is installed' % relative_path) def list_installed_files(self): """ Iterates over the ``RECORD`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: iterator of (path, hash, size) """ for result in self._get_records(): yield result def write_installed_files(self, paths, prefix, dry_run=False): """ Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any existing ``RECORD`` file is silently overwritten. prefix is used to determine when to write absolute paths. """ prefix = os.path.join(prefix, '') base = os.path.dirname(self.path) base_under_prefix = base.startswith(prefix) base = os.path.join(base, '') record_path = self.get_distinfo_file('RECORD') logger.info('creating %s', record_path) if dry_run: return None with CSVWriter(record_path) as writer: for path in paths: if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')): # do not put size and hash, as in PEP-376 hash_value = size = '' else: size = '%d' % os.path.getsize(path) with open(path, 'rb') as fp: hash_value = self.get_hash(fp.read()) if path.startswith(base) or (base_under_prefix and path.startswith(prefix)): path = os.path.relpath(path, base) writer.writerow((path, hash_value, size)) # add the RECORD file itself if record_path.startswith(base): record_path = os.path.relpath(record_path, base) writer.writerow((record_path, '', '')) return record_path def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] base = os.path.dirname(self.path) record_path = self.get_distinfo_file('RECORD') for path, hash_value, size in self.list_installed_files(): if not os.path.isabs(path): path = os.path.join(base, path) if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) elif os.path.isfile(path): actual_size = str(os.path.getsize(path)) if size and actual_size != size: mismatches.append((path, 'size', size, actual_size)) elif hash_value: if '=' in hash_value: hasher = hash_value.split('=', 1)[0] else: hasher = None with open(path, 'rb') as f: actual_hash = self.get_hash(f.read(), hasher) if actual_hash != hash_value: mismatches.append((path, 'hash', hash_value, actual_hash)) return mismatches @cached_property def shared_locations(self): """ A dictionary of shared locations whose keys are in the set 'prefix', 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'. The corresponding value is the absolute path of that category for this distribution, and takes into account any paths selected by the user at installation time (e.g. via command-line arguments). In the case of the 'namespace' key, this would be a list of absolute paths for the roots of namespace packages in this distribution. The first time this property is accessed, the relevant information is read from the SHARED file in the .dist-info directory. """ result = {} shared_path = os.path.join(self.path, 'SHARED') if os.path.isfile(shared_path): with codecs.open(shared_path, 'r', encoding='utf-8') as f: lines = f.read().splitlines() for line in lines: key, value = line.split('=', 1) if key == 'namespace': result.setdefault(key, []).append(value) else: result[key] = value return result def write_shared_locations(self, paths, dry_run=False): """ Write shared location information to the SHARED file in .dist-info. :param paths: A dictionary as described in the documentation for :meth:`shared_locations`. :param dry_run: If True, the action is logged but no file is actually written. :return: The path of the file written to. """ shared_path = os.path.join(self.path, 'SHARED') logger.info('creating %s', shared_path) if dry_run: return None lines = [] for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): path = paths[key] if os.path.isdir(paths[key]): lines.append('%s=%s' % (key, path)) for ns in paths.get('namespace', ()): lines.append('namespace=%s' % ns) with codecs.open(shared_path, 'w', encoding='utf-8') as f: f.write('\n'.join(lines)) return shared_path def get_distinfo_resource(self, path): if path not in DIST_FILES: raise DistlibException('invalid path for a dist-info file: ' '%r at %r' % (path, self.path)) finder = resources.finder_for_path(self.path) if finder is None: raise DistlibException('Unable to get a finder for %s' % self.path) return finder.find(path) def get_distinfo_file(self, path): """ Returns a path located under the ``.dist-info`` directory. Returns a string representing the path. :parameter path: a ``'/'``-separated path relative to the ``.dist-info`` directory or an absolute path; If *path* is an absolute path and doesn't start with the ``.dist-info`` directory path, a :class:`DistlibException` is raised :type path: str :rtype: str """ # Check if it is an absolute path # XXX use relpath, add tests if path.find(os.sep) >= 0: # it's an absolute path? distinfo_dirname, path = path.split(os.sep)[-2:] if distinfo_dirname != self.path.split(os.sep)[-1]: raise DistlibException( 'dist-info file %r does not belong to the %r %s ' 'distribution' % (path, self.name, self.version)) # The file must be relative if path not in DIST_FILES: raise DistlibException('invalid path for a dist-info file: ' '%r at %r' % (path, self.path)) return os.path.join(self.path, path) def list_distinfo_files(self): """ Iterates over the ``RECORD`` entries and returns paths for each line if the path is pointing to a file located in the ``.dist-info`` directory or one of its subdirectories. :returns: iterator of paths """ base = os.path.dirname(self.path) for path, checksum, size in self._get_records(): # XXX add separator or use real relpath algo if not os.path.isabs(path): path = os.path.join(base, path) if path.startswith(self.path): yield path def __eq__(self, other): return (isinstance(other, InstalledDistribution) and self.path == other.path) # See http://docs.python.org/reference/datamodel#object.__hash__ __hash__ = object.__hash__ class EggInfoDistribution(BaseInstalledDistribution): """Created with the *path* of the ``.egg-info`` directory or file provided to the constructor. It reads the metadata contained in the file itself, or if the given path happens to be a directory, the metadata is read from the file ``PKG-INFO`` under that directory.""" requested = True # as we have no way of knowing, assume it was shared_locations = {} def __init__(self, path, env=None): def set_name_and_version(s, n, v): s.name = n s.key = n.lower() # for case-insensitive comparisons s.version = v self.path = path self.dist_path = env if env and env._cache_enabled and path in env._cache_egg.path: metadata = env._cache_egg.path[path].metadata set_name_and_version(self, metadata.name, metadata.version) else: metadata = self._get_metadata(path) # Need to be set before caching set_name_and_version(self, metadata.name, metadata.version) if env and env._cache_enabled: env._cache_egg.add(self) super(EggInfoDistribution, self).__init__(metadata, path, env) def _get_metadata(self, path): requires = None def parse_requires_data(data): """Create a list of dependencies from a requires.txt file. *data*: the contents of a setuptools-produced requires.txt file. """ reqs = [] lines = data.splitlines() for line in lines: line = line.strip() if line.startswith('['): logger.warning('Unexpected line: quitting requirement scan: %r', line) break r = parse_requirement(line) if not r: logger.warning('Not recognised as a requirement: %r', line) continue if r.extras: logger.warning('extra requirements in requires.txt are ' 'not supported') if not r.constraints: reqs.append(r.name) else: cons = ', '.join('%s%s' % c for c in r.constraints) reqs.append('%s (%s)' % (r.name, cons)) return reqs def parse_requires_path(req_path): """Create a list of dependencies from a requires.txt file. *req_path*: the path to a setuptools-produced requires.txt file. """ reqs = [] try: with codecs.open(req_path, 'r', 'utf-8') as fp: reqs = parse_requires_data(fp.read()) except IOError: pass return reqs tl_path = tl_data = None if path.endswith('.egg'): if os.path.isdir(path): p = os.path.join(path, 'EGG-INFO') meta_path = os.path.join(p, 'PKG-INFO') metadata = Metadata(path=meta_path, scheme='legacy') req_path = os.path.join(p, 'requires.txt') tl_path = os.path.join(p, 'top_level.txt') requires = parse_requires_path(req_path) else: # FIXME handle the case where zipfile is not available zipf = zipimport.zipimporter(path) fileobj = StringIO( zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) metadata = Metadata(fileobj=fileobj, scheme='legacy') try: data = zipf.get_data('EGG-INFO/requires.txt') tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8') requires = parse_requires_data(data.decode('utf-8')) except IOError: requires = None elif path.endswith('.egg-info'): if os.path.isdir(path): req_path = os.path.join(path, 'requires.txt') requires = parse_requires_path(req_path) path = os.path.join(path, 'PKG-INFO') tl_path = os.path.join(path, 'top_level.txt') metadata = Metadata(path=path, scheme='legacy') else: raise DistlibException('path must end with .egg-info or .egg, ' 'got %r' % path) if requires: metadata.add_requirements(requires) # look for top-level modules in top_level.txt, if present if tl_data is None: if tl_path is not None and os.path.exists(tl_path): with open(tl_path, 'rb') as f: tl_data = f.read().decode('utf-8') if not tl_data: tl_data = [] else: tl_data = tl_data.splitlines() self.modules = tl_data return metadata def __repr__(self): return '<EggInfoDistribution %r %s at %r>' % ( self.name, self.version, self.path) def __str__(self): return "%s %s" % (self.name, self.version) def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): for path, _, _ in self.list_installed_files(): if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) return mismatches def list_installed_files(self): """ Iterates over the ``installed-files.txt`` entries and returns a tuple ``(path, hash, size)`` for each line. :returns: a list of (path, hash, size) """ def _md5(path): f = open(path, 'rb') try: content = f.read() finally: f.close() return hashlib.md5(content).hexdigest() def _size(path): return os.stat(path).st_size record_path = os.path.join(self.path, 'installed-files.txt') result = [] if os.path.exists(record_path): with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() p = os.path.normpath(os.path.join(self.path, line)) # "./" is present as a marker between installed files # and installation metadata files if not os.path.exists(p): logger.warning('Non-existent file: %s', p) if p.endswith(('.pyc', '.pyo')): continue #otherwise fall through and fail if not os.path.isdir(p): result.append((p, _md5(p), _size(p))) result.append((record_path, None, None)) return result def list_distinfo_files(self, absolute=False): """ Iterates over the ``installed-files.txt`` entries and returns paths for each line if the path is pointing to a file located in the ``.egg-info`` directory or one of its subdirectories. :parameter absolute: If *absolute* is ``True``, each returned path is transformed into a local absolute path. Otherwise the raw value from ``installed-files.txt`` is returned. :type absolute: boolean :returns: iterator of paths """ record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): skip = True with codecs.open(record_path, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if line == './': skip = False continue if not skip: p = os.path.normpath(os.path.join(self.path, line)) if p.startswith(self.path): if absolute: yield p else: yield line def __eq__(self, other): return (isinstance(other, EggInfoDistribution) and self.path == other.path) # See http://docs.python.org/reference/datamodel#object.__hash__ __hash__ = object.__hash__ new_dist_class = InstalledDistribution old_dist_class = EggInfoDistribution class DependencyGraph(object): """ Represents a dependency graph between distributions. The dependency relationships are stored in an ``adjacency_list`` that maps distributions to a list of ``(other, label)`` tuples where ``other`` is a distribution and the edge is labeled with ``label`` (i.e. the version specifier, if such was provided). Also, for more efficient traversal, for every distribution ``x``, a list of predecessors is kept in ``reverse_list[x]``. An edge from distribution ``a`` to distribution ``b`` means that ``a`` depends on ``b``. If any missing dependencies are found, they are stored in ``missing``, which is a dictionary that maps distributions to a list of requirements that were not provided by any other distributions. """ def __init__(self): self.adjacency_list = {} self.reverse_list = {} self.missing = {} def add_distribution(self, distribution): """Add the *distribution* to the graph. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` """ self.adjacency_list[distribution] = [] self.reverse_list[distribution] = [] #self.missing[distribution] = [] def add_edge(self, x, y, label=None): """Add an edge from distribution *x* to distribution *y* with the given *label*. :type x: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type y: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type label: ``str`` or ``None`` """ self.adjacency_list[x].append((y, label)) # multiple edges are allowed, so be careful if x not in self.reverse_list[y]: self.reverse_list[y].append(x) def add_missing(self, distribution, requirement): """ Add a missing *requirement* for the given *distribution*. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type requirement: ``str`` """ logger.debug('%s missing %r', distribution, requirement) self.missing.setdefault(distribution, []).append(requirement) def _repr_dist(self, dist): return '%s %s' % (dist.name, dist.version) def repr_node(self, dist, level=1): """Prints only a subgraph""" output = [self._repr_dist(dist)] for other, label in self.adjacency_list[dist]: dist = self._repr_dist(other) if label is not None: dist = '%s [%s]' % (dist, label) output.append(' ' * level + str(dist)) suboutput = self.repr_node(other, level + 1) subs = suboutput.split('\n') output.extend(subs[1:]) return '\n'.join(output) def to_dot(self, f, skip_disconnected=True): """Writes a DOT output for the graph to the provided file *f*. If *skip_disconnected* is set to ``True``, then all distributions that are not dependent on any other distribution are skipped. :type f: has to support ``file``-like operations :type skip_disconnected: ``bool`` """ disconnected = [] f.write("digraph dependencies {\n") for dist, adjs in self.adjacency_list.items(): if len(adjs) == 0 and not skip_disconnected: disconnected.append(dist) for other, label in adjs: if not label is None: f.write('"%s" -> "%s" [label="%s"]\n' % (dist.name, other.name, label)) else: f.write('"%s" -> "%s"\n' % (dist.name, other.name)) if not skip_disconnected and len(disconnected) > 0: f.write('subgraph disconnected {\n') f.write('label = "Disconnected"\n') f.write('bgcolor = red\n') for dist in disconnected: f.write('"%s"' % dist.name) f.write('\n') f.write('}\n') f.write('}\n') def topological_sort(self): """ Perform a topological sort of the graph. :return: A tuple, the first element of which is a topologically sorted list of distributions, and the second element of which is a list of distributions that cannot be sorted because they have circular dependencies and so form a cycle. """ result = [] # Make a shallow copy of the adjacency list alist = {} for k, v in self.adjacency_list.items(): alist[k] = v[:] while True: # See what we can remove in this run to_remove = [] for k, v in list(alist.items())[:]: if not v: to_remove.append(k) del alist[k] if not to_remove: # What's left in alist (if anything) is a cycle. break # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys()) def __repr__(self): """Representation of the graph""" output = [] for dist, adjs in self.adjacency_list.items(): output.append(self.repr_node(dist)) return '\n'.join(output) def make_graph(dists, scheme='default'): """Makes a dependency graph from the given distributions. :parameter dists: a list of distributions :type dists: list of :class:`distutils2.database.InstalledDistribution` and :class:`distutils2.database.EggInfoDistribution` instances :rtype: a :class:`DependencyGraph` instance """ scheme = get_scheme(scheme) graph = DependencyGraph() provided = {} # maps names to lists of (version, dist) tuples # first, build the graph and find out what's provided for dist in dists: graph.add_distribution(dist) for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Add to provided: %s, %s, %s', name, version, dist) provided.setdefault(name, []).append((version, dist)) # now make the edges for dist in dists: requires = (dist.run_requires | dist.meta_requires | dist.build_requires | dist.dev_requires) for req in requires: try: matcher = scheme.matcher(req) except UnsupportedVersionError: # XXX compat-mode if cannot read the version logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) name = matcher.key # case-insensitive matched = False if name in provided: for version, provider in provided[name]: try: match = matcher.match(version) except UnsupportedVersionError: match = False if match: graph.add_edge(dist, provider, req) matched = True break if not matched: graph.add_missing(dist, req) return graph def get_dependent_dists(dists, dist): """Recursively generate a list of distributions from *dists* that are dependent on *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) dep = [dist] # dependent distributions todo = graph.reverse_list[dist] # list of nodes we should inspect while todo: d = todo.pop() dep.append(d) for succ in graph.reverse_list[d]: if succ not in dep: todo.append(succ) dep.pop(0) # remove dist from dep, was there to prevent infinite loops return dep def get_required_dists(dists, dist): """Recursively generate a list of distributions from *dists* that are required by *dist*. :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) req = [] # required distributions todo = graph.adjacency_list[dist] # list of nodes we should inspect while todo: d = todo.pop()[0] req.append(d) for pred in graph.adjacency_list[d]: if pred not in req: todo.append(pred) return req def make_dist(name, version, **kwargs): """ A convenience method for making a dist given just a name and version. """ summary = kwargs.pop('summary', 'Placeholder for summary') md = Metadata(**kwargs) md.name = name md.version = version md.summary = summary or 'Placeholder for summary' return Distribution(md)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/__init__.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2019 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import logging __version__ = '0.3.1' class DistlibException(Exception): pass try: from logging import NullHandler except ImportError: # pragma: no cover class NullHandler(logging.Handler): def handle(self, record): pass def emit(self, record): pass def createLock(self): self.lock = None logger = logging.getLogger(__name__) logger.addHandler(NullHandler())
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/markers.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2017 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # """ Parser for the environment markers micro-language defined in PEP 508. """ # Note: In PEP 345, the micro-language was Python compatible, so the ast # module could be used to parse it. However, PEP 508 introduced operators such # as ~= and === which aren't in Python, necessitating a different approach. import os import sys import platform import re from .compat import python_implementation, urlparse, string_types from .util import in_venv, parse_marker __all__ = ['interpret'] def _is_literal(o): if not isinstance(o, string_types) or not o: return False return o[0] in '\'"' class Evaluator(object): """ This class is used to evaluate marker expessions. """ operations = { '==': lambda x, y: x == y, '===': lambda x, y: x == y, '~=': lambda x, y: x == y or x > y, '!=': lambda x, y: x != y, '<': lambda x, y: x < y, '<=': lambda x, y: x == y or x < y, '>': lambda x, y: x > y, '>=': lambda x, y: x == y or x > y, 'and': lambda x, y: x and y, 'or': lambda x, y: x or y, 'in': lambda x, y: x in y, 'not in': lambda x, y: x not in y, } def evaluate(self, expr, context): """ Evaluate a marker expression returned by the :func:`parse_requirement` function in the specified context. """ if isinstance(expr, string_types): if expr[0] in '\'"': result = expr[1:-1] else: if expr not in context: raise SyntaxError('unknown variable: %s' % expr) result = context[expr] else: assert isinstance(expr, dict) op = expr['op'] if op not in self.operations: raise NotImplementedError('op not implemented: %s' % op) elhs = expr['lhs'] erhs = expr['rhs'] if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) result = self.operations[op](lhs, rhs) return result def default_context(): def format_full_version(info): version = '%s.%s.%s' % (info.major, info.minor, info.micro) kind = info.releaselevel if kind != 'final': version += kind[0] + str(info.serial) return version if hasattr(sys, 'implementation'): implementation_version = format_full_version(sys.implementation.version) implementation_name = sys.implementation.name else: implementation_version = '0' implementation_name = '' result = { 'implementation_name': implementation_name, 'implementation_version': implementation_version, 'os_name': os.name, 'platform_machine': platform.machine(), 'platform_python_implementation': platform.python_implementation(), 'platform_release': platform.release(), 'platform_system': platform.system(), 'platform_version': platform.version(), 'platform_in_venv': str(in_venv()), 'python_full_version': platform.python_version(), 'python_version': platform.python_version()[:3], 'sys_platform': sys.platform, } return result DEFAULT_CONTEXT = default_context() del default_context evaluator = Evaluator() def interpret(marker, execution_context=None): """ Interpret a marker and return a result depending on environment. :param marker: The marker to interpret. :type marker: str :param execution_context: The context used for name lookup. :type execution_context: mapping """ try: expr, rest = parse_marker(marker) except Exception as e: raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e)) if rest and rest[0] != '#': raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest)) context = dict(DEFAULT_CONTEXT) if execution_context: context.update(execution_context) return evaluator.evaluate(expr, context)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/resources.py
# -*- coding: utf-8 -*- # # Copyright (C) 2013-2017 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import unicode_literals import bisect import io import logging import os import pkgutil import shutil import sys import types import zipimport from . import DistlibException from .util import cached_property, get_cache_base, path_to_cache_dir, Cache logger = logging.getLogger(__name__) cache = None # created when needed class ResourceCache(Cache): def __init__(self, base=None): if base is None: # Use native string to avoid issues on 2.x: see Python #20140. base = os.path.join(get_cache_base(), str('resource-cache')) super(ResourceCache, self).__init__(base) def is_stale(self, resource, path): """ Is the cache stale for the given resource? :param resource: The :class:`Resource` being cached. :param path: The path of the resource in the cache. :return: True if the cache is stale. """ # Cache invalidation is a hard problem :-) return True def get(self, resource): """ Get a resource into the cache, :param resource: A :class:`Resource` instance. :return: The pathname of the resource in the cache. """ prefix, path = resource.finder.get_cache_info(resource) if prefix is None: result = path else: result = os.path.join(self.base, self.prefix_to_dir(prefix), path) dirname = os.path.dirname(result) if not os.path.isdir(dirname): os.makedirs(dirname) if not os.path.exists(result): stale = True else: stale = self.is_stale(resource, path) if stale: # write the bytes of the resource to the cache location with open(result, 'wb') as f: f.write(resource.bytes) return result class ResourceBase(object): def __init__(self, finder, name): self.finder = finder self.name = name class Resource(ResourceBase): """ A class representing an in-package resource, such as a data file. This is not normally instantiated by user code, but rather by a :class:`ResourceFinder` which manages the resource. """ is_container = False # Backwards compatibility def as_stream(self): """ Get the resource as a stream. This is not a property to make it obvious that it returns a new stream each time. """ return self.finder.get_stream(self) @cached_property def file_path(self): global cache if cache is None: cache = ResourceCache() return cache.get(self) @cached_property def bytes(self): return self.finder.get_bytes(self) @cached_property def size(self): return self.finder.get_size(self) class ResourceContainer(ResourceBase): is_container = True # Backwards compatibility @cached_property def resources(self): return self.finder.get_resources(self) class ResourceFinder(object): """ Resource finder for file system resources. """ if sys.platform.startswith('java'): skipped_extensions = ('.pyc', '.pyo', '.class') else: skipped_extensions = ('.pyc', '.pyo') def __init__(self, module): self.module = module self.loader = getattr(module, '__loader__', None) self.base = os.path.dirname(getattr(module, '__file__', '')) def _adjust_path(self, path): return os.path.realpath(path) def _make_path(self, resource_name): # Issue #50: need to preserve type of path on Python 2.x # like os.path._get_sep if isinstance(resource_name, bytes): # should only happen on 2.x sep = b'/' else: sep = '/' parts = resource_name.split(sep) parts.insert(0, self.base) result = os.path.join(*parts) return self._adjust_path(result) def _find(self, path): return os.path.exists(path) def get_cache_info(self, resource): return None, resource.path def find(self, resource_name): path = self._make_path(resource_name) if not self._find(path): result = None else: if self._is_directory(path): result = ResourceContainer(self, resource_name) else: result = Resource(self, resource_name) result.path = path return result def get_stream(self, resource): return open(resource.path, 'rb') def get_bytes(self, resource): with open(resource.path, 'rb') as f: return f.read() def get_size(self, resource): return os.path.getsize(resource.path) def get_resources(self, resource): def allowed(f): return (f != '__pycache__' and not f.endswith(self.skipped_extensions)) return set([f for f in os.listdir(resource.path) if allowed(f)]) def is_container(self, resource): return self._is_directory(resource.path) _is_directory = staticmethod(os.path.isdir) def iterator(self, resource_name): resource = self.find(resource_name) if resource is not None: todo = [resource] while todo: resource = todo.pop(0) yield resource if resource.is_container: rname = resource.name for name in resource.resources: if not rname: new_name = name else: new_name = '/'.join([rname, name]) child = self.find(new_name) if child.is_container: todo.append(child) else: yield child class ZipResourceFinder(ResourceFinder): """ Resource finder for resources in .zip files. """ def __init__(self, module): super(ZipResourceFinder, self).__init__(module) archive = self.loader.archive self.prefix_len = 1 + len(archive) # PyPy doesn't have a _files attr on zipimporter, and you can't set one if hasattr(self.loader, '_files'): self._files = self.loader._files else: self._files = zipimport._zip_directory_cache[archive] self.index = sorted(self._files) def _adjust_path(self, path): return path def _find(self, path): path = path[self.prefix_len:] if path in self._files: result = True else: if path and path[-1] != os.sep: path = path + os.sep i = bisect.bisect(self.index, path) try: result = self.index[i].startswith(path) except IndexError: result = False if not result: logger.debug('_find failed: %r %r', path, self.loader.prefix) else: logger.debug('_find worked: %r %r', path, self.loader.prefix) return result def get_cache_info(self, resource): prefix = self.loader.archive path = resource.path[1 + len(prefix):] return prefix, path def get_bytes(self, resource): return self.loader.get_data(resource.path) def get_stream(self, resource): return io.BytesIO(self.get_bytes(resource)) def get_size(self, resource): path = resource.path[self.prefix_len:] return self._files[path][3] def get_resources(self, resource): path = resource.path[self.prefix_len:] if path and path[-1] != os.sep: path += os.sep plen = len(path) result = set() i = bisect.bisect(self.index, path) while i < len(self.index): if not self.index[i].startswith(path): break s = self.index[i][plen:] result.add(s.split(os.sep, 1)[0]) # only immediate children i += 1 return result def _is_directory(self, path): path = path[self.prefix_len:] if path and path[-1] != os.sep: path += os.sep i = bisect.bisect(self.index, path) try: result = self.index[i].startswith(path) except IndexError: result = False return result _finder_registry = { type(None): ResourceFinder, zipimport.zipimporter: ZipResourceFinder } try: # In Python 3.6, _frozen_importlib -> _frozen_importlib_external try: import _frozen_importlib_external as _fi except ImportError: import _frozen_importlib as _fi _finder_registry[_fi.SourceFileLoader] = ResourceFinder _finder_registry[_fi.FileFinder] = ResourceFinder del _fi except (ImportError, AttributeError): pass def register_finder(loader, finder_maker): _finder_registry[type(loader)] = finder_maker _finder_cache = {} def finder(package): """ Return a resource finder for a package. :param package: The name of the package. :return: A :class:`ResourceFinder` instance for the package. """ if package in _finder_cache: result = _finder_cache[package] else: if package not in sys.modules: __import__(package) module = sys.modules[package] path = getattr(module, '__path__', None) if path is None: raise DistlibException('You cannot get a finder for a module, ' 'only for a package') loader = getattr(module, '__loader__', None) finder_maker = _finder_registry.get(type(loader)) if finder_maker is None: raise DistlibException('Unable to locate finder for %r' % package) result = finder_maker(module) _finder_cache[package] = result return result _dummy_module = types.ModuleType(str('__dummy__')) def finder_for_path(path): """ Return a resource finder for a path, which should represent a container. :param path: The path. :return: A :class:`ResourceFinder` instance for the path. """ result = None # calls any path hooks, gets importer into cache pkgutil.get_importer(path) loader = sys.path_importer_cache.get(path) finder = _finder_registry.get(type(loader)) if finder: module = _dummy_module module.__file__ = os.path.join(path, '') module.__loader__ = loader result = finder(module) return result
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/scripts.py
# -*- coding: utf-8 -*- # # Copyright (C) 2013-2015 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from io import BytesIO import logging import os import re import struct import sys from .compat import sysconfig, detect_encoding, ZipFile from .resources import finder from .util import (FileOperator, get_export_entry, convert_path, get_executable, in_venv) logger = logging.getLogger(__name__) _DEFAULT_MANIFEST = ''' <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> <assemblyIdentity version="1.0.0.0" processorArchitecture="X86" name="%s" type="win32"/> <!-- Identify the application security requirements. --> <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> <security> <requestedPrivileges> <requestedExecutionLevel level="asInvoker" uiAccess="false"/> </requestedPrivileges> </security> </trustInfo> </assembly>'''.strip() # check if Python is called on the first line with this expression FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$') SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*- import re import sys from %(module)s import %(import_name)s if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) sys.exit(%(func)s()) ''' def enquote_executable(executable): if ' ' in executable: # make sure we quote only the executable in case of env # for example /usr/bin/env "/dir with spaces/bin/jython" # instead of "/usr/bin/env /dir with spaces/bin/jython" # otherwise whole if executable.startswith('/usr/bin/env '): env, _executable = executable.split(' ', 1) if ' ' in _executable and not _executable.startswith('"'): executable = '%s "%s"' % (env, _executable) else: if not executable.startswith('"'): executable = '"%s"' % executable return executable # Keep the old name around (for now), as there is at least one project using it! _enquote_executable = enquote_executable class ScriptMaker(object): """ A class to copy or create scripts from source scripts or callable specifications. """ script_template = SCRIPT_TEMPLATE executable = None # for shebangs def __init__(self, source_dir, target_dir, add_launchers=True, dry_run=False, fileop=None): self.source_dir = source_dir self.target_dir = target_dir self.add_launchers = add_launchers self.force = False self.clobber = False # It only makes sense to set mode bits on POSIX. self.set_mode = (os.name == 'posix') or (os.name == 'java' and os._name == 'posix') self.variants = set(('', 'X.Y')) self._fileop = fileop or FileOperator(dry_run) self._is_nt = os.name == 'nt' or ( os.name == 'java' and os._name == 'nt') self.version_info = sys.version_info def _get_alternate_executable(self, executable, options): if options.get('gui', False) and self._is_nt: # pragma: no cover dn, fn = os.path.split(executable) fn = fn.replace('python', 'pythonw') executable = os.path.join(dn, fn) return executable if sys.platform.startswith('java'): # pragma: no cover def _is_shell(self, executable): """ Determine if the specified executable is a script (contains a #! line) """ try: with open(executable) as fp: return fp.read(2) == '#!' except (OSError, IOError): logger.warning('Failed to open %s', executable) return False def _fix_jython_executable(self, executable): if self._is_shell(executable): # Workaround for Jython is not needed on Linux systems. import java if java.lang.System.getProperty('os.name') == 'Linux': return executable elif executable.lower().endswith('jython.exe'): # Use wrapper exe for Jython on Windows return executable return '/usr/bin/env %s' % executable def _build_shebang(self, executable, post_interp): """ Build a shebang line. In the simple case (on Windows, or a shebang line which is not too long or contains spaces) use a simple formulation for the shebang. Otherwise, use /bin/sh as the executable, with a contrived shebang which allows the script to run either under Python or sh, using suitable quoting. Thanks to Harald Nordgren for his input. See also: http://www.in-ulm.de/~mascheck/various/shebang/#length https://hg.mozilla.org/mozilla-central/file/tip/mach """ if os.name != 'posix': simple_shebang = True else: # Add 3 for '#!' prefix and newline suffix. shebang_length = len(executable) + len(post_interp) + 3 if sys.platform == 'darwin': max_shebang_length = 512 else: max_shebang_length = 127 simple_shebang = ((b' ' not in executable) and (shebang_length <= max_shebang_length)) if simple_shebang: result = b'#!' + executable + post_interp + b'\n' else: result = b'#!/bin/sh\n' result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' result += b"' '''" return result def _get_shebang(self, encoding, post_interp=b'', options=None): enquote = True if self.executable: executable = self.executable enquote = False # assume this will be taken care of elif not sysconfig.is_python_build(): executable = get_executable() elif in_venv(): # pragma: no cover executable = os.path.join(sysconfig.get_path('scripts'), 'python%s' % sysconfig.get_config_var('EXE')) else: # pragma: no cover executable = os.path.join( sysconfig.get_config_var('BINDIR'), 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))) if options: executable = self._get_alternate_executable(executable, options) if sys.platform.startswith('java'): # pragma: no cover executable = self._fix_jython_executable(executable) # Normalise case for Windows - COMMENTED OUT # executable = os.path.normcase(executable) # N.B. The normalising operation above has been commented out: See # issue #124. Although paths in Windows are generally case-insensitive, # they aren't always. For example, a path containing a ẞ (which is a # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by # Windows as equivalent in path names. # If the user didn't specify an executable, it may be necessary to # cater for executable paths with spaces (not uncommon on Windows) if enquote: executable = enquote_executable(executable) # Issue #51: don't use fsencode, since we later try to # check that the shebang is decodable using utf-8. executable = executable.encode('utf-8') # in case of IronPython, play safe and enable frames support if (sys.platform == 'cli' and '-X:Frames' not in post_interp and '-X:FullFrames' not in post_interp): # pragma: no cover post_interp += b' -X:Frames' shebang = self._build_shebang(executable, post_interp) # Python parser starts to read a script using UTF-8 until # it gets a #coding:xxx cookie. The shebang has to be the # first line of a file, the #coding:xxx cookie cannot be # written before. So the shebang has to be decodable from # UTF-8. try: shebang.decode('utf-8') except UnicodeDecodeError: # pragma: no cover raise ValueError( 'The shebang (%r) is not decodable from utf-8' % shebang) # If the script is encoded to a custom encoding (use a # #coding:xxx cookie), the shebang has to be decodable from # the script encoding too. if encoding != 'utf-8': try: shebang.decode(encoding) except UnicodeDecodeError: # pragma: no cover raise ValueError( 'The shebang (%r) is not decodable ' 'from the script encoding (%r)' % (shebang, encoding)) return shebang def _get_script_text(self, entry): return self.script_template % dict(module=entry.prefix, import_name=entry.suffix.split('.')[0], func=entry.suffix) manifest = _DEFAULT_MANIFEST def get_manifest(self, exename): base = os.path.basename(exename) return self.manifest % base def _write_script(self, names, shebang, script_bytes, filenames, ext): use_launcher = self.add_launchers and self._is_nt linesep = os.linesep.encode('utf-8') if not shebang.endswith(linesep): shebang += linesep if not use_launcher: script_bytes = shebang + script_bytes else: # pragma: no cover if ext == 'py': launcher = self._get_launcher('t') else: launcher = self._get_launcher('w') stream = BytesIO() with ZipFile(stream, 'w') as zf: zf.writestr('__main__.py', script_bytes) zip_data = stream.getvalue() script_bytes = launcher + shebang + zip_data for name in names: outname = os.path.join(self.target_dir, name) if use_launcher: # pragma: no cover n, e = os.path.splitext(outname) if e.startswith('.py'): outname = n outname = '%s.exe' % outname try: self._fileop.write_binary_file(outname, script_bytes) except Exception: # Failed writing an executable - it might be in use. logger.warning('Failed to write executable - trying to ' 'use .deleteme logic') dfname = '%s.deleteme' % outname if os.path.exists(dfname): os.remove(dfname) # Not allowed to fail here os.rename(outname, dfname) # nor here self._fileop.write_binary_file(outname, script_bytes) logger.debug('Able to replace executable using ' '.deleteme logic') try: os.remove(dfname) except Exception: pass # still in use - ignore error else: if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover outname = '%s.%s' % (outname, ext) if os.path.exists(outname) and not self.clobber: logger.warning('Skipping existing file %s', outname) continue self._fileop.write_binary_file(outname, script_bytes) if self.set_mode: self._fileop.set_executable_mode([outname]) filenames.append(outname) def _make_script(self, entry, filenames, options=None): post_interp = b'' if options: args = options.get('interpreter_args', []) if args: args = ' %s' % ' '.join(args) post_interp = args.encode('utf-8') shebang = self._get_shebang('utf-8', post_interp, options=options) script = self._get_script_text(entry).encode('utf-8') name = entry.name scriptnames = set() if '' in self.variants: scriptnames.add(name) if 'X' in self.variants: scriptnames.add('%s%s' % (name, self.version_info[0])) if 'X.Y' in self.variants: scriptnames.add('%s-%s.%s' % (name, self.version_info[0], self.version_info[1])) if options and options.get('gui', False): ext = 'pyw' else: ext = 'py' self._write_script(scriptnames, shebang, script, filenames, ext) def _copy_script(self, script, filenames): adjust = False script = os.path.join(self.source_dir, convert_path(script)) outname = os.path.join(self.target_dir, os.path.basename(script)) if not self.force and not self._fileop.newer(script, outname): logger.debug('not copying %s (up-to-date)', script) return # Always open the file, but ignore failures in dry-run mode -- # that way, we'll get accurate feedback if we can read the # script. try: f = open(script, 'rb') except IOError: # pragma: no cover if not self.dry_run: raise f = None else: first_line = f.readline() if not first_line: # pragma: no cover logger.warning('%s: %s is an empty file (skipping)', self.get_command_name(), script) return match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) if match: adjust = True post_interp = match.group(1) or b'' if not adjust: if f: f.close() self._fileop.copy_file(script, outname) if self.set_mode: self._fileop.set_executable_mode([outname]) filenames.append(outname) else: logger.info('copying and adjusting %s -> %s', script, self.target_dir) if not self._fileop.dry_run: encoding, lines = detect_encoding(f.readline) f.seek(0) shebang = self._get_shebang(encoding, post_interp) if b'pythonw' in first_line: # pragma: no cover ext = 'pyw' else: ext = 'py' n = os.path.basename(outname) self._write_script([n], shebang, f.read(), filenames, ext) if f: f.close() @property def dry_run(self): return self._fileop.dry_run @dry_run.setter def dry_run(self, value): self._fileop.dry_run = value if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover # Executable launcher support. # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ def _get_launcher(self, kind): if struct.calcsize('P') == 8: # 64-bit bits = '64' else: bits = '32' name = '%s%s.exe' % (kind, bits) # Issue 31: don't hardcode an absolute package name, but # determine it relative to the current package distlib_package = __name__.rsplit('.', 1)[0] resource = finder(distlib_package).find(name) if not resource: msg = ('Unable to find resource %s in package %s' % (name, distlib_package)) raise ValueError(msg) return resource.bytes # Public API follows def make(self, specification, options=None): """ Make a script. :param specification: The specification, which is either a valid export entry specification (to make a script from a callable) or a filename (to make a script by copying from a source location). :param options: A dictionary of options controlling script generation. :return: A list of all absolute pathnames written to. """ filenames = [] entry = get_export_entry(specification) if entry is None: self._copy_script(specification, filenames) else: self._make_script(entry, filenames, options=options) return filenames def make_multiple(self, specifications, options=None): """ Take a list of specifications and make scripts from them, :param specifications: A list of specifications. :return: A list of all absolute pathnames written to, """ filenames = [] for specification in specifications: filenames.extend(self.make(specification, options)) return filenames
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/wheel.py
# -*- coding: utf-8 -*- # # Copyright (C) 2013-2017 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import unicode_literals import base64 import codecs import datetime import distutils.util from email import message_from_file import hashlib import imp import json import logging import os import posixpath import re import shutil import sys import tempfile import zipfile from . import __version__, DistlibException from .compat import sysconfig, ZipFile, fsdecode, text_type, filter from .database import InstalledDistribution from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME) from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, cached_property, get_cache_base, read_exports, tempdir) from .version import NormalizedVersion, UnsupportedVersionError logger = logging.getLogger(__name__) cache = None # created when needed if hasattr(sys, 'pypy_version_info'): # pragma: no cover IMP_PREFIX = 'pp' elif sys.platform.startswith('java'): # pragma: no cover IMP_PREFIX = 'jy' elif sys.platform == 'cli': # pragma: no cover IMP_PREFIX = 'ip' else: IMP_PREFIX = 'cp' VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') if not VER_SUFFIX: # pragma: no cover VER_SUFFIX = '%s%s' % sys.version_info[:2] PYVER = 'py' + VER_SUFFIX IMPVER = IMP_PREFIX + VER_SUFFIX ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_') ABI = sysconfig.get_config_var('SOABI') if ABI and ABI.startswith('cpython-'): ABI = ABI.replace('cpython-', 'cp') else: def _derive_abi(): parts = ['cp', VER_SUFFIX] if sysconfig.get_config_var('Py_DEBUG'): parts.append('d') if sysconfig.get_config_var('WITH_PYMALLOC'): parts.append('m') if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4: parts.append('u') return ''.join(parts) ABI = _derive_abi() del _derive_abi FILENAME_RE = re.compile(r''' (?P<nm>[^-]+) -(?P<vn>\d+[^-]*) (-(?P<bn>\d+[^-]*))? -(?P<py>\w+\d+(\.\w+\d+)*) -(?P<bi>\w+) -(?P<ar>\w+(\.\w+)*) \.whl$ ''', re.IGNORECASE | re.VERBOSE) NAME_VERSION_RE = re.compile(r''' (?P<nm>[^-]+) -(?P<vn>\d+[^-]*) (-(?P<bn>\d+[^-]*))?$ ''', re.IGNORECASE | re.VERBOSE) SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') SHEBANG_PYTHON = b'#!python' SHEBANG_PYTHONW = b'#!pythonw' if os.sep == '/': to_posix = lambda o: o else: to_posix = lambda o: o.replace(os.sep, '/') class Mounter(object): def __init__(self): self.impure_wheels = {} self.libs = {} def add(self, pathname, extensions): self.impure_wheels[pathname] = extensions self.libs.update(extensions) def remove(self, pathname): extensions = self.impure_wheels.pop(pathname) for k, v in extensions: if k in self.libs: del self.libs[k] def find_module(self, fullname, path=None): if fullname in self.libs: result = self else: result = None return result def load_module(self, fullname): if fullname in sys.modules: result = sys.modules[fullname] else: if fullname not in self.libs: raise ImportError('unable to find extension for %s' % fullname) result = imp.load_dynamic(fullname, self.libs[fullname]) result.__loader__ = self parts = fullname.rsplit('.', 1) if len(parts) > 1: result.__package__ = parts[0] return result _hook = Mounter() class Wheel(object): """ Class to build and install from Wheel files (PEP 427). """ wheel_version = (1, 1) hash_kind = 'sha256' def __init__(self, filename=None, sign=False, verify=False): """ Initialise an instance using a (valid) filename. """ self.sign = sign self.should_verify = verify self.buildver = '' self.pyver = [PYVER] self.abi = ['none'] self.arch = ['any'] self.dirname = os.getcwd() if filename is None: self.name = 'dummy' self.version = '0.1' self._filename = self.filename else: m = NAME_VERSION_RE.match(filename) if m: info = m.groupdict('') self.name = info['nm'] # Reinstate the local version separator self.version = info['vn'].replace('_', '-') self.buildver = info['bn'] self._filename = self.filename else: dirname, filename = os.path.split(filename) m = FILENAME_RE.match(filename) if not m: raise DistlibException('Invalid name or ' 'filename: %r' % filename) if dirname: self.dirname = os.path.abspath(dirname) self._filename = filename info = m.groupdict('') self.name = info['nm'] self.version = info['vn'] self.buildver = info['bn'] self.pyver = info['py'].split('.') self.abi = info['bi'].split('.') self.arch = info['ar'].split('.') @property def filename(self): """ Build and return a filename from the various components. """ if self.buildver: buildver = '-' + self.buildver else: buildver = '' pyver = '.'.join(self.pyver) abi = '.'.join(self.abi) arch = '.'.join(self.arch) # replace - with _ as a local version separator version = self.version.replace('-', '_') return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, pyver, abi, arch) @property def exists(self): path = os.path.join(self.dirname, self.filename) return os.path.isfile(path) @property def tags(self): for pyver in self.pyver: for abi in self.abi: for arch in self.arch: yield pyver, abi, arch @cached_property def metadata(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: wheel_metadata = self.get_wheel_metadata(zf) wv = wheel_metadata['Wheel-Version'].split('.', 1) file_version = tuple([int(i) for i in wv]) # if file_version < (1, 1): # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, # LEGACY_METADATA_FILENAME] # else: # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME] fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME] result = None for fn in fns: try: metadata_filename = posixpath.join(info_dir, fn) with zf.open(metadata_filename) as bf: wf = wrapper(bf) result = Metadata(fileobj=wf) if result: break except KeyError: pass if not result: raise ValueError('Invalid wheel, because metadata is ' 'missing: looked in %s' % ', '.join(fns)) return result def get_wheel_metadata(self, zf): name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver metadata_filename = posixpath.join(info_dir, 'WHEEL') with zf.open(metadata_filename) as bf: wf = codecs.getreader('utf-8')(bf) message = message_from_file(wf) return dict(message) @cached_property def info(self): pathname = os.path.join(self.dirname, self.filename) with ZipFile(pathname, 'r') as zf: result = self.get_wheel_metadata(zf) return result def process_shebang(self, data): m = SHEBANG_RE.match(data) if m: end = m.end() shebang, data_after_shebang = data[:end], data[end:] # Preserve any arguments after the interpreter if b'pythonw' in shebang.lower(): shebang_python = SHEBANG_PYTHONW else: shebang_python = SHEBANG_PYTHON m = SHEBANG_DETAIL_RE.match(shebang) if m: args = b' ' + m.groups()[-1] else: args = b'' shebang = shebang_python + args data = shebang + data_after_shebang else: cr = data.find(b'\r') lf = data.find(b'\n') if cr < 0 or cr > lf: term = b'\n' else: if data[cr:cr + 2] == b'\r\n': term = b'\r\n' else: term = b'\r' data = SHEBANG_PYTHON + term + data return data def get_hash(self, data, hash_kind=None): if hash_kind is None: hash_kind = self.hash_kind try: hasher = getattr(hashlib, hash_kind) except AttributeError: raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) result = hasher(data).digest() result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') return hash_kind, result def write_record(self, records, record_path, base): records = list(records) # make a copy, as mutated p = to_posix(os.path.relpath(record_path, base)) records.append((p, '', '')) with CSVWriter(record_path) as writer: for row in records: writer.writerow(row) def write_records(self, info, libdir, archive_paths): records = [] distinfo, info_dir = info hasher = getattr(hashlib, self.hash_kind) for ap, p in archive_paths: with open(p, 'rb') as f: data = f.read() digest = '%s=%s' % self.get_hash(data) size = os.path.getsize(p) records.append((ap, digest, size)) p = os.path.join(distinfo, 'RECORD') self.write_record(records, p, libdir) ap = to_posix(os.path.join(info_dir, 'RECORD')) archive_paths.append((ap, p)) def build_zip(self, pathname, archive_paths): with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: for ap, p in archive_paths: logger.debug('Wrote %s to %s in wheel', p, ap) zf.write(p, ap) def build(self, paths, tags=None, wheel_version=None): """ Build a wheel from files in specified paths, and use any specified tags when determining the name of the wheel. """ if tags is None: tags = {} libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] if libkey == 'platlib': is_pure = 'false' default_pyver = [IMPVER] default_abi = [ABI] default_arch = [ARCH] else: is_pure = 'true' default_pyver = [PYVER] default_abi = ['none'] default_arch = ['any'] self.pyver = tags.get('pyver', default_pyver) self.abi = tags.get('abi', default_abi) self.arch = tags.get('arch', default_arch) libdir = paths[libkey] name_ver = '%s-%s' % (self.name, self.version) data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver archive_paths = [] # First, stuff which is not in site-packages for key in ('data', 'headers', 'scripts'): if key not in paths: continue path = paths[key] if os.path.isdir(path): for root, dirs, files in os.walk(path): for fn in files: p = fsdecode(os.path.join(root, fn)) rp = os.path.relpath(p, path) ap = to_posix(os.path.join(data_dir, key, rp)) archive_paths.append((ap, p)) if key == 'scripts' and not p.endswith('.exe'): with open(p, 'rb') as f: data = f.read() data = self.process_shebang(data) with open(p, 'wb') as f: f.write(data) # Now, stuff which is in site-packages, other than the # distinfo stuff. path = libdir distinfo = None for root, dirs, files in os.walk(path): if root == path: # At the top level only, save distinfo for later # and skip it for now for i, dn in enumerate(dirs): dn = fsdecode(dn) if dn.endswith('.dist-info'): distinfo = os.path.join(root, dn) del dirs[i] break assert distinfo, '.dist-info directory expected, not found' for fn in files: # comment out next suite to leave .pyc files in if fsdecode(fn).endswith(('.pyc', '.pyo')): continue p = os.path.join(root, fn) rp = to_posix(os.path.relpath(p, path)) archive_paths.append((rp, p)) # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. files = os.listdir(distinfo) for fn in files: if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): p = fsdecode(os.path.join(distinfo, fn)) ap = to_posix(os.path.join(info_dir, fn)) archive_paths.append((ap, p)) wheel_metadata = [ 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), 'Generator: distlib %s' % __version__, 'Root-Is-Purelib: %s' % is_pure, ] for pyver, abi, arch in self.tags: wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) p = os.path.join(distinfo, 'WHEEL') with open(p, 'w') as f: f.write('\n'.join(wheel_metadata)) ap = to_posix(os.path.join(info_dir, 'WHEEL')) archive_paths.append((ap, p)) # sort the entries by archive path. Not needed by any spec, but it # keeps the archive listing and RECORD tidier than they would otherwise # be. Use the number of path segments to keep directory entries together, # and keep the dist-info stuff at the end. def sorter(t): ap = t[0] n = ap.count('/') if '.dist-info' in ap: n += 10000 return (n, ap) archive_paths = sorted(archive_paths, key=sorter) # Now, at last, RECORD. # Paths in here are archive paths - nothing else makes sense. self.write_records((distinfo, info_dir), libdir, archive_paths) # Now, ready to build the zip file pathname = os.path.join(self.dirname, self.filename) self.build_zip(pathname, archive_paths) return pathname def skip_entry(self, arcname): """ Determine whether an archive entry should be skipped when verifying or installing. """ # The signature file won't be in RECORD, # and we don't currently don't do anything with it # We also skip directories, as they won't be in RECORD # either. See: # # https://github.com/pypa/wheel/issues/294 # https://github.com/pypa/wheel/issues/287 # https://github.com/pypa/wheel/pull/289 # return arcname.endswith(('/', '/RECORD.jws')) def install(self, paths, maker, **kwargs): """ Install a wheel to the specified paths. If kwarg ``warner`` is specified, it should be a callable, which will be called with two tuples indicating the wheel version of this software and the wheel version in the file, if there is a discrepancy in the versions. This can be used to issue any warnings to raise any exceptions. If kwarg ``lib_only`` is True, only the purelib/platlib files are installed, and the headers, scripts, data and dist-info metadata are not written. If kwarg ``bytecode_hashed_invalidation`` is True, written bytecode will try to use file-hash based invalidation (PEP-552) on supported interpreter versions (CPython 2.7+). The return value is a :class:`InstalledDistribution` instance unless ``options.lib_only`` is True, in which case the return value is ``None``. """ dry_run = maker.dry_run warner = kwargs.get('warner') lib_only = kwargs.get('lib_only', False) bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False) pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME) wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') record_name = posixpath.join(info_dir, 'RECORD') wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: with zf.open(wheel_metadata_name) as bwf: wf = wrapper(bwf) message = message_from_file(wf) wv = message['Wheel-Version'].split('.', 1) file_version = tuple([int(i) for i in wv]) if (file_version != self.wheel_version) and warner: warner(self.wheel_version, file_version) if message['Root-Is-Purelib'] == 'true': libdir = paths['purelib'] else: libdir = paths['platlib'] records = {} with zf.open(record_name) as bf: with CSVReader(stream=bf) as reader: for row in reader: p = row[0] records[p] = row data_pfx = posixpath.join(data_dir, '') info_pfx = posixpath.join(info_dir, '') script_pfx = posixpath.join(data_dir, 'scripts', '') # make a new instance rather than a copy of maker's, # as we mutate it fileop = FileOperator(dry_run=dry_run) fileop.record = True # so we can rollback if needed bc = not sys.dont_write_bytecode # Double negatives. Lovely! outfiles = [] # for RECORD writing # for script copying/shebang processing workdir = tempfile.mkdtemp() # set target dir later # we default add_launchers to False, as the # Python Launcher should be used instead maker.source_dir = workdir maker.target_dir = None try: for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') if self.skip_entry(u_arcname): continue row = records[u_arcname] if row[2] and str(zinfo.file_size) != row[2]: raise DistlibException('size mismatch for ' '%s' % u_arcname) if row[1]: kind, value = row[1].split('=', 1) with zf.open(arcname) as bf: data = bf.read() _, digest = self.get_hash(data, kind) if digest != value: raise DistlibException('digest mismatch for ' '%s' % arcname) if lib_only and u_arcname.startswith((info_pfx, data_pfx)): logger.debug('lib_only: skipping %s', u_arcname) continue is_script = (u_arcname.startswith(script_pfx) and not u_arcname.endswith('.exe')) if u_arcname.startswith(data_pfx): _, where, rp = u_arcname.split('/', 2) outfile = os.path.join(paths[where], convert_path(rp)) else: # meant for site-packages. if u_arcname in (wheel_metadata_name, record_name): continue outfile = os.path.join(libdir, convert_path(u_arcname)) if not is_script: with zf.open(arcname) as bf: fileop.copy_stream(bf, outfile) outfiles.append(outfile) # Double check the digest of the written file if not dry_run and row[1]: with open(outfile, 'rb') as bf: data = bf.read() _, newdigest = self.get_hash(data, kind) if newdigest != digest: raise DistlibException('digest mismatch ' 'on write for ' '%s' % outfile) if bc and outfile.endswith('.py'): try: pyc = fileop.byte_compile(outfile, hashed_invalidation=bc_hashed_invalidation) outfiles.append(pyc) except Exception: # Don't give up if byte-compilation fails, # but log it and perhaps warn the user logger.warning('Byte-compilation failed', exc_info=True) else: fn = os.path.basename(convert_path(arcname)) workname = os.path.join(workdir, fn) with zf.open(arcname) as bf: fileop.copy_stream(bf, workname) dn, fn = os.path.split(outfile) maker.target_dir = dn filenames = maker.make(fn) fileop.set_executable_mode(filenames) outfiles.extend(filenames) if lib_only: logger.debug('lib_only: returning None') dist = None else: # Generate scripts # Try to get pydist.json so we can see if there are # any commands to generate. If this fails (e.g. because # of a legacy wheel), log a warning but don't give up. commands = None file_version = self.info['Wheel-Version'] if file_version == '1.0': # Use legacy info ep = posixpath.join(info_dir, 'entry_points.txt') try: with zf.open(ep) as bwf: epdata = read_exports(bwf) commands = {} for key in ('console', 'gui'): k = '%s_scripts' % key if k in epdata: commands['wrap_%s' % key] = d = {} for v in epdata[k].values(): s = '%s:%s' % (v.prefix, v.suffix) if v.flags: s += ' [%s]' % ','.join(v.flags) d[v.name] = s except Exception: logger.warning('Unable to read legacy script ' 'metadata, so cannot generate ' 'scripts') else: try: with zf.open(metadata_name) as bwf: wf = wrapper(bwf) commands = json.load(wf).get('extensions') if commands: commands = commands.get('python.commands') except Exception: logger.warning('Unable to read JSON metadata, so ' 'cannot generate scripts') if commands: console_scripts = commands.get('wrap_console', {}) gui_scripts = commands.get('wrap_gui', {}) if console_scripts or gui_scripts: script_dir = paths.get('scripts', '') if not os.path.isdir(script_dir): raise ValueError('Valid script path not ' 'specified') maker.target_dir = script_dir for k, v in console_scripts.items(): script = '%s = %s' % (k, v) filenames = maker.make(script) fileop.set_executable_mode(filenames) if gui_scripts: options = {'gui': True } for k, v in gui_scripts.items(): script = '%s = %s' % (k, v) filenames = maker.make(script, options) fileop.set_executable_mode(filenames) p = os.path.join(libdir, info_dir) dist = InstalledDistribution(p) # Write SHARED paths = dict(paths) # don't change passed in dict del paths['purelib'] del paths['platlib'] paths['lib'] = libdir p = dist.write_shared_locations(paths, dry_run) if p: outfiles.append(p) # Write RECORD dist.write_installed_files(outfiles, paths['prefix'], dry_run) return dist except Exception: # pragma: no cover logger.exception('installation failed.') fileop.rollback() raise finally: shutil.rmtree(workdir) def _get_dylib_cache(self): global cache if cache is None: # Use native string to avoid issues on 2.x: see Python #20140. base = os.path.join(get_cache_base(), str('dylib-cache'), '%s.%s' % sys.version_info[:2]) cache = Cache(base) return cache def _get_extensions(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver arcname = posixpath.join(info_dir, 'EXTENSIONS') wrapper = codecs.getreader('utf-8') result = [] with ZipFile(pathname, 'r') as zf: try: with zf.open(arcname) as bf: wf = wrapper(bf) extensions = json.load(wf) cache = self._get_dylib_cache() prefix = cache.prefix_to_dir(pathname) cache_base = os.path.join(cache.base, prefix) if not os.path.isdir(cache_base): os.makedirs(cache_base) for name, relpath in extensions.items(): dest = os.path.join(cache_base, convert_path(relpath)) if not os.path.exists(dest): extract = True else: file_time = os.stat(dest).st_mtime file_time = datetime.datetime.fromtimestamp(file_time) info = zf.getinfo(relpath) wheel_time = datetime.datetime(*info.date_time) extract = wheel_time > file_time if extract: zf.extract(relpath, cache_base) result.append((name, dest)) except KeyError: pass return result def is_compatible(self): """ Determine if a wheel is compatible with the running system. """ return is_compatible(self) def is_mountable(self): """ Determine if a wheel is asserted as mountable by its metadata. """ return True # for now - metadata details TBD def mount(self, append=False): pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) if not self.is_compatible(): msg = 'Wheel %s not compatible with this Python.' % pathname raise DistlibException(msg) if not self.is_mountable(): msg = 'Wheel %s is marked as not mountable.' % pathname raise DistlibException(msg) if pathname in sys.path: logger.debug('%s already in path', pathname) else: if append: sys.path.append(pathname) else: sys.path.insert(0, pathname) extensions = self._get_extensions() if extensions: if _hook not in sys.meta_path: sys.meta_path.append(_hook) _hook.add(pathname, extensions) def unmount(self): pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) if pathname not in sys.path: logger.debug('%s not in path', pathname) else: sys.path.remove(pathname) if pathname in _hook.impure_wheels: _hook.remove(pathname) if not _hook.impure_wheels: if _hook in sys.meta_path: sys.meta_path.remove(_hook) def verify(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME) wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') record_name = posixpath.join(info_dir, 'RECORD') wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: with zf.open(wheel_metadata_name) as bwf: wf = wrapper(bwf) message = message_from_file(wf) wv = message['Wheel-Version'].split('.', 1) file_version = tuple([int(i) for i in wv]) # TODO version verification records = {} with zf.open(record_name) as bf: with CSVReader(stream=bf) as reader: for row in reader: p = row[0] records[p] = row for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') # See issue #115: some wheels have .. in their entries, but # in the filename ... e.g. __main__..py ! So the check is # updated to look for .. in the directory portions p = u_arcname.split('/') if '..' in p: raise DistlibException('invalid entry in ' 'wheel: %r' % u_arcname) if self.skip_entry(u_arcname): continue row = records[u_arcname] if row[2] and str(zinfo.file_size) != row[2]: raise DistlibException('size mismatch for ' '%s' % u_arcname) if row[1]: kind, value = row[1].split('=', 1) with zf.open(arcname) as bf: data = bf.read() _, digest = self.get_hash(data, kind) if digest != value: raise DistlibException('digest mismatch for ' '%s' % arcname) def update(self, modifier, dest_dir=None, **kwargs): """ Update the contents of a wheel in a generic way. The modifier should be a callable which expects a dictionary argument: its keys are archive-entry paths, and its values are absolute filesystem paths where the contents the corresponding archive entries can be found. The modifier is free to change the contents of the files pointed to, add new entries and remove entries, before returning. This method will extract the entire contents of the wheel to a temporary location, call the modifier, and then use the passed (and possibly updated) dictionary to write a new wheel. If ``dest_dir`` is specified, the new wheel is written there -- otherwise, the original wheel is overwritten. The modifier should return True if it updated the wheel, else False. This method returns the same value the modifier returns. """ def get_version(path_map, info_dir): version = path = None key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME) if key not in path_map: key = '%s/PKG-INFO' % info_dir if key in path_map: path = path_map[key] version = Metadata(path=path).version return version, path def update_version(version, path): updated = None try: v = NormalizedVersion(version) i = version.find('-') if i < 0: updated = '%s+1' % version else: parts = [int(s) for s in version[i + 1:].split('.')] parts[-1] += 1 updated = '%s+%s' % (version[:i], '.'.join(str(i) for i in parts)) except UnsupportedVersionError: logger.debug('Cannot update non-compliant (PEP-440) ' 'version %r', version) if updated: md = Metadata(path=path) md.version = updated legacy = path.endswith(LEGACY_METADATA_FILENAME) md.write(path=path, legacy=legacy) logger.debug('Version updated from %r to %r', version, updated) pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) info_dir = '%s.dist-info' % name_ver record_name = posixpath.join(info_dir, 'RECORD') with tempdir() as workdir: with ZipFile(pathname, 'r') as zf: path_map = {} for zinfo in zf.infolist(): arcname = zinfo.filename if isinstance(arcname, text_type): u_arcname = arcname else: u_arcname = arcname.decode('utf-8') if u_arcname == record_name: continue if '..' in u_arcname: raise DistlibException('invalid entry in ' 'wheel: %r' % u_arcname) zf.extract(zinfo, workdir) path = os.path.join(workdir, convert_path(u_arcname)) path_map[u_arcname] = path # Remember the version. original_version, _ = get_version(path_map, info_dir) # Files extracted. Call the modifier. modified = modifier(path_map, **kwargs) if modified: # Something changed - need to build a new wheel. current_version, path = get_version(path_map, info_dir) if current_version and (current_version == original_version): # Add or update local version to signify changes. update_version(current_version, path) # Decide where the new wheel goes. if dest_dir is None: fd, newpath = tempfile.mkstemp(suffix='.whl', prefix='wheel-update-', dir=workdir) os.close(fd) else: if not os.path.isdir(dest_dir): raise DistlibException('Not a directory: %r' % dest_dir) newpath = os.path.join(dest_dir, self.filename) archive_paths = list(path_map.items()) distinfo = os.path.join(workdir, info_dir) info = distinfo, info_dir self.write_records(info, workdir, archive_paths) self.build_zip(newpath, archive_paths) if dest_dir is None: shutil.copyfile(newpath, pathname) return modified def compatible_tags(): """ Return (pyver, abi, arch) tuples compatible with this Python. """ versions = [VER_SUFFIX] major = VER_SUFFIX[0] for minor in range(sys.version_info[1] - 1, - 1, -1): versions.append(''.join([major, str(minor)])) abis = [] for suffix, _, _ in imp.get_suffixes(): if suffix.startswith('.abi'): abis.append(suffix.split('.', 2)[1]) abis.sort() if ABI != 'none': abis.insert(0, ABI) abis.append('none') result = [] arches = [ARCH] if sys.platform == 'darwin': m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) if m: name, major, minor, arch = m.groups() minor = int(minor) matches = [arch] if arch in ('i386', 'ppc'): matches.append('fat') if arch in ('i386', 'ppc', 'x86_64'): matches.append('fat3') if arch in ('ppc64', 'x86_64'): matches.append('fat64') if arch in ('i386', 'x86_64'): matches.append('intel') if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): matches.append('universal') while minor >= 0: for match in matches: s = '%s_%s_%s_%s' % (name, major, minor, match) if s != ARCH: # already there arches.append(s) minor -= 1 # Most specific - our Python version, ABI and arch for abi in abis: for arch in arches: result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) # where no ABI / arch dependency, but IMP_PREFIX dependency for i, version in enumerate(versions): result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) if i == 0: result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) # no IMP_PREFIX, ABI or arch dependency for i, version in enumerate(versions): result.append((''.join(('py', version)), 'none', 'any')) if i == 0: result.append((''.join(('py', version[0])), 'none', 'any')) return set(result) COMPATIBLE_TAGS = compatible_tags() del compatible_tags def is_compatible(wheel, tags=None): if not isinstance(wheel, Wheel): wheel = Wheel(wheel) # assume it's a filename result = False if tags is None: tags = COMPATIBLE_TAGS for ver, abi, arch in tags: if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: result = True break return result
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Utility functions for copying and archiving files and directory trees. XXX The functions here don't copy the resource fork or other metadata on Mac. """ import os import sys import stat from os.path import abspath import fnmatch try: from collections.abc import Callable except ImportError: from collections import Callable import errno from . import tarfile try: import bz2 _BZ2_SUPPORTED = True except ImportError: _BZ2_SUPPORTED = False try: from pwd import getpwnam except ImportError: getpwnam = None try: from grp import getgrnam except ImportError: getgrnam = None __all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", "copytree", "move", "rmtree", "Error", "SpecialFileError", "ExecError", "make_archive", "get_archive_formats", "register_archive_format", "unregister_archive_format", "get_unpack_formats", "register_unpack_format", "unregister_unpack_format", "unpack_archive", "ignore_patterns"] class Error(EnvironmentError): pass class SpecialFileError(EnvironmentError): """Raised when trying to do a kind of operation (e.g. copying) which is not supported on a special file (e.g. a named pipe)""" class ExecError(EnvironmentError): """Raised when a command could not be executed""" class ReadError(EnvironmentError): """Raised when an archive cannot be read""" class RegistryError(Exception): """Raised when a registry operation with the archiving and unpacking registries fails""" try: WindowsError except NameError: WindowsError = None def copyfileobj(fsrc, fdst, length=16*1024): """copy data from file-like object fsrc to file-like object fdst""" while 1: buf = fsrc.read(length) if not buf: break fdst.write(buf) def _samefile(src, dst): # Macintosh, Unix. if hasattr(os.path, 'samefile'): try: return os.path.samefile(src, dst) except OSError: return False # All other platforms: check for same pathname. return (os.path.normcase(os.path.abspath(src)) == os.path.normcase(os.path.abspath(dst))) def copyfile(src, dst): """Copy data from src to dst""" if _samefile(src, dst): raise Error("`%s` and `%s` are the same file" % (src, dst)) for fn in [src, dst]: try: st = os.stat(fn) except OSError: # File most likely does not exist pass else: # XXX What about other special files? (sockets, devices...) if stat.S_ISFIFO(st.st_mode): raise SpecialFileError("`%s` is a named pipe" % fn) with open(src, 'rb') as fsrc: with open(dst, 'wb') as fdst: copyfileobj(fsrc, fdst) def copymode(src, dst): """Copy mode bits from src to dst""" if hasattr(os, 'chmod'): st = os.stat(src) mode = stat.S_IMODE(st.st_mode) os.chmod(dst, mode) def copystat(src, dst): """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" st = os.stat(src) mode = stat.S_IMODE(st.st_mode) if hasattr(os, 'utime'): os.utime(dst, (st.st_atime, st.st_mtime)) if hasattr(os, 'chmod'): os.chmod(dst, mode) if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): try: os.chflags(dst, st.st_flags) except OSError as why: if (not hasattr(errno, 'EOPNOTSUPP') or why.errno != errno.EOPNOTSUPP): raise def copy(src, dst): """Copy data and mode bits ("cp src dst"). The destination may be a directory. """ if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) copyfile(src, dst) copymode(src, dst) def copy2(src, dst): """Copy data and all stat info ("cp -p src dst"). The destination may be a directory. """ if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) copyfile(src, dst) copystat(src, dst) def ignore_patterns(*patterns): """Function that can be used as copytree() ignore parameter. Patterns is a sequence of glob-style patterns that are used to exclude files""" def _ignore_patterns(path, names): ignored_names = [] for pattern in patterns: ignored_names.extend(fnmatch.filter(names, pattern)) return set(ignored_names) return _ignore_patterns def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, ignore_dangling_symlinks=False): """Recursively copy a directory tree. The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. If the file pointed by the symlink doesn't exist, an exception will be added in the list of errors raised in an Error exception at the end of the copy process. You can set the optional ignore_dangling_symlinks flag to true if you want to silence this exception. Notice that this has no effect on platforms that don't support os.symlink. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. The optional copy_function argument is a callable that will be used to copy each file. It will be called with the source path and the destination path as arguments. By default, copy2() is used, but any function that supports the same signature (like copy()) can be used. """ names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() os.makedirs(dst) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if os.path.islink(srcname): linkto = os.readlink(srcname) if symlinks: os.symlink(linkto, dstname) else: # ignore dangling symlink if the flag is on if not os.path.exists(linkto) and ignore_dangling_symlinks: continue # otherwise let the copy occurs. copy2 will raise an error copy_function(srcname, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks, ignore, copy_function) else: # Will raise a SpecialFileError for unsupported file types copy_function(srcname, dstname) # catch the Error from the recursive copytree so that we can # continue with other files except Error as err: errors.extend(err.args[0]) except EnvironmentError as why: errors.append((srcname, dstname, str(why))) try: copystat(src, dst) except OSError as why: if WindowsError is not None and isinstance(why, WindowsError): # Copying file access times may fail on Windows pass else: errors.extend((src, dst, str(why))) if errors: raise Error(errors) def rmtree(path, ignore_errors=False, onerror=None): """Recursively delete a directory tree. If ignore_errors is set, errors are ignored; otherwise, if onerror is set, it is called to handle the error with arguments (func, path, exc_info) where func is os.listdir, os.remove, or os.rmdir; path is the argument to that function that caused it to fail; and exc_info is a tuple returned by sys.exc_info(). If ignore_errors is false and onerror is None, an exception is raised. """ if ignore_errors: def onerror(*args): pass elif onerror is None: def onerror(*args): raise try: if os.path.islink(path): # symlinks to directories are forbidden, see bug #1669 raise OSError("Cannot call rmtree on a symbolic link") except OSError: onerror(os.path.islink, path, sys.exc_info()) # can't continue even if onerror hook returns return names = [] try: names = os.listdir(path) except os.error: onerror(os.listdir, path, sys.exc_info()) for name in names: fullname = os.path.join(path, name) try: mode = os.lstat(fullname).st_mode except os.error: mode = 0 if stat.S_ISDIR(mode): rmtree(fullname, ignore_errors, onerror) else: try: os.remove(fullname) except os.error: onerror(os.remove, fullname, sys.exc_info()) try: os.rmdir(path) except os.error: onerror(os.rmdir, path, sys.exc_info()) def _basename(path): # A basename() variant which first strips the trailing slash, if present. # Thus we always get the last component of the path, even for directories. return os.path.basename(path.rstrip(os.path.sep)) def move(src, dst): """Recursively move a file or directory to another location. This is similar to the Unix "mv" command. If the destination is a directory or a symlink to a directory, the source is moved inside the directory. The destination path must not already exist. If the destination already exists but is not a directory, it may be overwritten depending on os.rename() semantics. If the destination is on our current filesystem, then rename() is used. Otherwise, src is copied to the destination and then removed. A lot more could be done here... A look at a mv.c shows a lot of the issues this implementation glosses over. """ real_dst = dst if os.path.isdir(dst): if _samefile(src, dst): # We might be on a case insensitive filesystem, # perform the rename anyway. os.rename(src, dst) return real_dst = os.path.join(dst, _basename(src)) if os.path.exists(real_dst): raise Error("Destination path '%s' already exists" % real_dst) try: os.rename(src, real_dst) except OSError: if os.path.isdir(src): if _destinsrc(src, dst): raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) copytree(src, real_dst, symlinks=True) rmtree(src) else: copy2(src, real_dst) os.unlink(src) def _destinsrc(src, dst): src = abspath(src) dst = abspath(dst) if not src.endswith(os.path.sep): src += os.path.sep if not dst.endswith(os.path.sep): dst += os.path.sep return dst.startswith(src) def _get_gid(name): """Returns a gid, given a group name.""" if getgrnam is None or name is None: return None try: result = getgrnam(name) except KeyError: result = None if result is not None: return result[2] return None def _get_uid(name): """Returns an uid, given a user name.""" if getpwnam is None or name is None: return None try: result = getpwnam(name) except KeyError: result = None if result is not None: return result[2] return None def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, owner=None, group=None, logger=None): """Create a (possibly compressed) tar file from all the files under 'base_dir'. 'compress' must be "gzip" (the default), "bzip2", or None. 'owner' and 'group' can be used to define an owner and a group for the archive that is being built. If not provided, the current owner and group will be used. The output tar file will be named 'base_name' + ".tar", possibly plus the appropriate compression extension (".gz", or ".bz2"). Returns the output filename. """ tar_compression = {'gzip': 'gz', None: ''} compress_ext = {'gzip': '.gz'} if _BZ2_SUPPORTED: tar_compression['bzip2'] = 'bz2' compress_ext['bzip2'] = '.bz2' # flags for compression program, each element of list will be an argument if compress is not None and compress not in compress_ext: raise ValueError("bad value for 'compress', or compression format not " "supported : {0}".format(compress)) archive_name = base_name + '.tar' + compress_ext.get(compress, '') archive_dir = os.path.dirname(archive_name) if not os.path.exists(archive_dir): if logger is not None: logger.info("creating %s", archive_dir) if not dry_run: os.makedirs(archive_dir) # creating the tarball if logger is not None: logger.info('Creating tar archive') uid = _get_uid(owner) gid = _get_gid(group) def _set_uid_gid(tarinfo): if gid is not None: tarinfo.gid = gid tarinfo.gname = group if uid is not None: tarinfo.uid = uid tarinfo.uname = owner return tarinfo if not dry_run: tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) try: tar.add(base_dir, filter=_set_uid_gid) finally: tar.close() return archive_name def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): # XXX see if we want to keep an external call here if verbose: zipoptions = "-r" else: zipoptions = "-rq" from distutils.errors import DistutilsExecError from distutils.spawn import spawn try: spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed". raise ExecError("unable to create zip file '%s': " "could neither import the 'zipfile' module nor " "find a standalone zip utility") % zip_filename def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_name' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises ExecError. Returns the name of the output zip file. """ zip_filename = base_name + ".zip" archive_dir = os.path.dirname(base_name) if not os.path.exists(archive_dir): if logger is not None: logger.info("creating %s", archive_dir) if not dry_run: os.makedirs(archive_dir) # If zipfile module is not available, try spawning an external 'zip' # command. try: import zipfile except ImportError: zipfile = None if zipfile is None: _call_external_zip(base_dir, zip_filename, verbose, dry_run) else: if logger is not None: logger.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) if not dry_run: zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) for dirpath, dirnames, filenames in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) if logger is not None: logger.info("adding '%s'", path) zip.close() return zip_filename _ARCHIVE_FORMATS = { 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), 'zip': (_make_zipfile, [], "ZIP file"), } if _BZ2_SUPPORTED: _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file") def get_archive_formats(): """Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description) """ formats = [(name, registry[2]) for name, registry in _ARCHIVE_FORMATS.items()] formats.sort() return formats def register_archive_format(name, function, extra_args=None, description=''): """Registers an archive format. name is the name of the format. function is the callable that will be used to create archives. If provided, extra_args is a sequence of (name, value) tuples that will be passed as arguments to the callable. description can be provided to describe the format, and will be returned by the get_archive_formats() function. """ if extra_args is None: extra_args = [] if not isinstance(function, Callable): raise TypeError('The %s object is not callable' % function) if not isinstance(extra_args, (tuple, list)): raise TypeError('extra_args needs to be a sequence') for element in extra_args: if not isinstance(element, (tuple, list)) or len(element) !=2: raise TypeError('extra_args elements are : (arg_name, value)') _ARCHIVE_FORMATS[name] = (function, extra_args, description) def unregister_archive_format(name): del _ARCHIVE_FORMATS[name] def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, dry_run=0, owner=None, group=None, logger=None): """Create an archive file (eg. zip or tar). 'base_name' is the name of the file to create, minus any format-specific extension; 'format' is the archive format: one of "zip", "tar", "bztar" or "gztar". 'root_dir' is a directory that will be the root directory of the archive; ie. we typically chdir into 'root_dir' before creating the archive. 'base_dir' is the directory where we start archiving from; ie. 'base_dir' will be the common prefix of all files and directories in the archive. 'root_dir' and 'base_dir' both default to the current directory. Returns the name of the archive file. 'owner' and 'group' are used when creating a tar archive. By default, uses the current owner and group. """ save_cwd = os.getcwd() if root_dir is not None: if logger is not None: logger.debug("changing into '%s'", root_dir) base_name = os.path.abspath(base_name) if not dry_run: os.chdir(root_dir) if base_dir is None: base_dir = os.curdir kwargs = {'dry_run': dry_run, 'logger': logger} try: format_info = _ARCHIVE_FORMATS[format] except KeyError: raise ValueError("unknown archive format '%s'" % format) func = format_info[0] for arg, val in format_info[1]: kwargs[arg] = val if format != 'zip': kwargs['owner'] = owner kwargs['group'] = group try: filename = func(base_name, base_dir, **kwargs) finally: if root_dir is not None: if logger is not None: logger.debug("changing back to '%s'", save_cwd) os.chdir(save_cwd) return filename def get_unpack_formats(): """Returns a list of supported formats for unpacking. Each element of the returned sequence is a tuple (name, extensions, description) """ formats = [(name, info[0], info[3]) for name, info in _UNPACK_FORMATS.items()] formats.sort() return formats def _check_unpack_options(extensions, function, extra_args): """Checks what gets registered as an unpacker.""" # first make sure no other unpacker is registered for this extension existing_extensions = {} for name, info in _UNPACK_FORMATS.items(): for ext in info[0]: existing_extensions[ext] = name for extension in extensions: if extension in existing_extensions: msg = '%s is already registered for "%s"' raise RegistryError(msg % (extension, existing_extensions[extension])) if not isinstance(function, Callable): raise TypeError('The registered function must be a callable') def register_unpack_format(name, extensions, function, extra_args=None, description=''): """Registers an unpack format. `name` is the name of the format. `extensions` is a list of extensions corresponding to the format. `function` is the callable that will be used to unpack archives. The callable will receive archives to unpack. If it's unable to handle an archive, it needs to raise a ReadError exception. If provided, `extra_args` is a sequence of (name, value) tuples that will be passed as arguments to the callable. description can be provided to describe the format, and will be returned by the get_unpack_formats() function. """ if extra_args is None: extra_args = [] _check_unpack_options(extensions, function, extra_args) _UNPACK_FORMATS[name] = extensions, function, extra_args, description def unregister_unpack_format(name): """Removes the pack format from the registry.""" del _UNPACK_FORMATS[name] def _ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _unpack_zipfile(filename, extract_dir): """Unpack zip `filename` to `extract_dir` """ try: import zipfile except ImportError: raise ReadError('zlib not supported, cannot unpack this archive.') if not zipfile.is_zipfile(filename): raise ReadError("%s is not a zip file" % filename) zip = zipfile.ZipFile(filename) try: for info in zip.infolist(): name = info.filename # don't extract absolute paths or ones with .. in them if name.startswith('/') or '..' in name: continue target = os.path.join(extract_dir, *name.split('/')) if not target: continue _ensure_directory(target) if not name.endswith('/'): # file data = zip.read(info.filename) f = open(target, 'wb') try: f.write(data) finally: f.close() del data finally: zip.close() def _unpack_tarfile(filename, extract_dir): """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` """ try: tarobj = tarfile.open(filename) except tarfile.TarError: raise ReadError( "%s is not a compressed or uncompressed tar file" % filename) try: tarobj.extractall(extract_dir) finally: tarobj.close() _UNPACK_FORMATS = { 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") } if _BZ2_SUPPORTED: _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], "bzip2'ed tar-file") def _find_unpack_format(filename): for name, info in _UNPACK_FORMATS.items(): for extension in info[0]: if filename.endswith(extension): return name return None def unpack_archive(filename, extract_dir=None, format=None): """Unpack an archive. `filename` is the name of the archive. `extract_dir` is the name of the target directory, where the archive is unpacked. If not provided, the current working directory is used. `format` is the archive format: one of "zip", "tar", or "gztar". Or any other registered format. If not provided, unpack_archive will use the filename extension and see if an unpacker was registered for that extension. In case none is found, a ValueError is raised. """ if extract_dir is None: extract_dir = os.getcwd() if format is not None: try: format_info = _UNPACK_FORMATS[format] except KeyError: raise ValueError("Unknown unpack format '{0}'".format(format)) func = format_info[1] func(filename, extract_dir, **dict(format_info[2])) else: # we need to look at the registered unpackers supported extensions format = _find_unpack_format(filename) if format is None: raise ReadError("Unknown archive format '{0}'".format(filename)) func = _UNPACK_FORMATS[format][1] kwargs = dict(_UNPACK_FORMATS[format][2]) func(filename, extract_dir, **kwargs)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/_backport/misc.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Backports for individual classes and functions.""" import os import sys __all__ = ['cache_from_source', 'callable', 'fsencode'] try: from imp import cache_from_source except ImportError: def cache_from_source(py_file, debug=__debug__): ext = debug and 'c' or 'o' return py_file + ext try: callable = callable except NameError: from collections import Callable def callable(obj): return isinstance(obj, Callable) try: fsencode = os.fsencode except AttributeError: def fsencode(filename): if isinstance(filename, bytes): return filename elif isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/_backport/__init__.py
"""Modules copied from Python 3 standard libraries, for internal use only. Individual classes and functions are found in d2._backport.misc. Intended usage is to always import things missing from 3.1 from that module: the built-in/stdlib objects will be used if found. """
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg
[posix_prefix] # Configuration directories. Some of these come straight out of the # configure script. They are for implementing the other variables, not to # be used directly in [resource_locations]. confdir = /etc datadir = /usr/share libdir = /usr/lib statedir = /var # User resource directory local = ~/.local/{distribution.name} stdlib = {base}/lib/python{py_version_short} platstdlib = {platbase}/lib/python{py_version_short} purelib = {base}/lib/python{py_version_short}/site-packages platlib = {platbase}/lib/python{py_version_short}/site-packages include = {base}/include/python{py_version_short}{abiflags} platinclude = {platbase}/include/python{py_version_short}{abiflags} data = {base} [posix_home] stdlib = {base}/lib/python platstdlib = {base}/lib/python purelib = {base}/lib/python platlib = {base}/lib/python include = {base}/include/python platinclude = {base}/include/python scripts = {base}/bin data = {base} [nt] stdlib = {base}/Lib platstdlib = {base}/Lib purelib = {base}/Lib/site-packages platlib = {base}/Lib/site-packages include = {base}/Include platinclude = {base}/Include scripts = {base}/Scripts data = {base} [os2] stdlib = {base}/Lib platstdlib = {base}/Lib purelib = {base}/Lib/site-packages platlib = {base}/Lib/site-packages include = {base}/Include platinclude = {base}/Include scripts = {base}/Scripts data = {base} [os2_home] stdlib = {userbase}/lib/python{py_version_short} platstdlib = {userbase}/lib/python{py_version_short} purelib = {userbase}/lib/python{py_version_short}/site-packages platlib = {userbase}/lib/python{py_version_short}/site-packages include = {userbase}/include/python{py_version_short} scripts = {userbase}/bin data = {userbase} [nt_user] stdlib = {userbase}/Python{py_version_nodot} platstdlib = {userbase}/Python{py_version_nodot} purelib = {userbase}/Python{py_version_nodot}/site-packages platlib = {userbase}/Python{py_version_nodot}/site-packages include = {userbase}/Python{py_version_nodot}/Include scripts = {userbase}/Scripts data = {userbase} [posix_user] stdlib = {userbase}/lib/python{py_version_short} platstdlib = {userbase}/lib/python{py_version_short} purelib = {userbase}/lib/python{py_version_short}/site-packages platlib = {userbase}/lib/python{py_version_short}/site-packages include = {userbase}/include/python{py_version_short} scripts = {userbase}/bin data = {userbase} [osx_framework_user] stdlib = {userbase}/lib/python platstdlib = {userbase}/lib/python purelib = {userbase}/lib/python/site-packages platlib = {userbase}/lib/python/site-packages include = {userbase}/include scripts = {userbase}/bin data = {userbase}
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py
#------------------------------------------------------------------- # tarfile.py #------------------------------------------------------------------- # Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de> # All rights reserved. # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # from __future__ import print_function """Read from and write to tar format archives. """ __version__ = "$Revision$" version = "0.9.0" __author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" __date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" __cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" __credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." #--------- # Imports #--------- import sys import os import stat import errno import time import struct import copy import re try: import grp, pwd except ImportError: grp = pwd = None # os.symlink on Windows prior to 6.0 raises NotImplementedError symlink_exception = (AttributeError, NotImplementedError) try: # WindowsError (1314) will be raised if the caller does not hold the # SeCreateSymbolicLinkPrivilege privilege symlink_exception += (WindowsError,) except NameError: pass # from tarfile import * __all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins _open = builtins.open # Since 'open' is TarFile.open #--------------------------------------------------------- # tar constants #--------------------------------------------------------- NUL = b"\0" # the null character BLOCKSIZE = 512 # length of processing blocks RECORDSIZE = BLOCKSIZE * 20 # length of records GNU_MAGIC = b"ustar \0" # magic gnu tar string POSIX_MAGIC = b"ustar\x0000" # magic posix tar string LENGTH_NAME = 100 # maximum length of a filename LENGTH_LINK = 100 # maximum length of a linkname LENGTH_PREFIX = 155 # maximum length of the prefix field REGTYPE = b"0" # regular file AREGTYPE = b"\0" # regular file LNKTYPE = b"1" # link (inside tarfile) SYMTYPE = b"2" # symbolic link CHRTYPE = b"3" # character special device BLKTYPE = b"4" # block special device DIRTYPE = b"5" # directory FIFOTYPE = b"6" # fifo special device CONTTYPE = b"7" # contiguous file GNUTYPE_LONGNAME = b"L" # GNU tar longname GNUTYPE_LONGLINK = b"K" # GNU tar longlink GNUTYPE_SPARSE = b"S" # GNU tar sparse file XHDTYPE = b"x" # POSIX.1-2001 extended header XGLTYPE = b"g" # POSIX.1-2001 global header SOLARIS_XHDTYPE = b"X" # Solaris extended header USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format GNU_FORMAT = 1 # GNU tar format PAX_FORMAT = 2 # POSIX.1-2001 (pax) format DEFAULT_FORMAT = GNU_FORMAT #--------------------------------------------------------- # tarfile constants #--------------------------------------------------------- # File types that tarfile supports: SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) # File types that will be treated as a regular file. REGULAR_TYPES = (REGTYPE, AREGTYPE, CONTTYPE, GNUTYPE_SPARSE) # File types that are part of the GNU tar format. GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, GNUTYPE_SPARSE) # Fields from a pax header that override a TarInfo attribute. PAX_FIELDS = ("path", "linkpath", "size", "mtime", "uid", "gid", "uname", "gname") # Fields from a pax header that are affected by hdrcharset. PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) # Fields in a pax header that are numbers, all other fields # are treated as strings. PAX_NUMBER_FIELDS = { "atime": float, "ctime": float, "mtime": float, "uid": int, "gid": int, "size": int } #--------------------------------------------------------- # Bits used in the mode field, values in octal. #--------------------------------------------------------- S_IFLNK = 0o120000 # symbolic link S_IFREG = 0o100000 # regular file S_IFBLK = 0o060000 # block device S_IFDIR = 0o040000 # directory S_IFCHR = 0o020000 # character device S_IFIFO = 0o010000 # fifo TSUID = 0o4000 # set UID on execution TSGID = 0o2000 # set GID on execution TSVTX = 0o1000 # reserved TUREAD = 0o400 # read by owner TUWRITE = 0o200 # write by owner TUEXEC = 0o100 # execute/search by owner TGREAD = 0o040 # read by group TGWRITE = 0o020 # write by group TGEXEC = 0o010 # execute/search by group TOREAD = 0o004 # read by other TOWRITE = 0o002 # write by other TOEXEC = 0o001 # execute/search by other #--------------------------------------------------------- # initialization #--------------------------------------------------------- if os.name in ("nt", "ce"): ENCODING = "utf-8" else: ENCODING = sys.getfilesystemencoding() #--------------------------------------------------------- # Some useful functions #--------------------------------------------------------- def stn(s, length, encoding, errors): """Convert a string to a null-terminated bytes object. """ s = s.encode(encoding, errors) return s[:length] + (length - len(s)) * NUL def nts(s, encoding, errors): """Convert a null-terminated bytes object to a string. """ p = s.find(b"\0") if p != -1: s = s[:p] return s.decode(encoding, errors) def nti(s): """Convert a number field to a python number. """ # There are two possible encodings for a number field, see # itn() below. if s[0] != chr(0o200): try: n = int(nts(s, "ascii", "strict") or "0", 8) except ValueError: raise InvalidHeaderError("invalid header") else: n = 0 for i in range(len(s) - 1): n <<= 8 n += ord(s[i + 1]) return n def itn(n, digits=8, format=DEFAULT_FORMAT): """Convert a python number to a number field. """ # POSIX 1003.1-1988 requires numbers to be encoded as a string of # octal digits followed by a null-byte, this allows values up to # (8**(digits-1))-1. GNU tar allows storing numbers greater than # that if necessary. A leading 0o200 byte indicates this particular # encoding, the following digits-1 bytes are a big-endian # representation. This allows values up to (256**(digits-1))-1. if 0 <= n < 8 ** (digits - 1): s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL else: if format != GNU_FORMAT or n >= 256 ** (digits - 1): raise ValueError("overflow in number field") if n < 0: # XXX We mimic GNU tar's behaviour with negative numbers, # this could raise OverflowError. n = struct.unpack("L", struct.pack("l", n))[0] s = bytearray() for i in range(digits - 1): s.insert(0, n & 0o377) n >>= 8 s.insert(0, 0o200) return s def calc_chksums(buf): """Calculate the checksum for a member's header by summing up all characters except for the chksum field which is treated as if it was filled with spaces. According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, which will be different if there are chars in the buffer with the high bit set. So we calculate two checksums, unsigned and signed. """ unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) return unsigned_chksum, signed_chksum def copyfileobj(src, dst, length=None): """Copy length bytes from fileobj src to fileobj dst. If length is None, copy the entire content. """ if length == 0: return if length is None: while True: buf = src.read(16*1024) if not buf: break dst.write(buf) return BUFSIZE = 16 * 1024 blocks, remainder = divmod(length, BUFSIZE) for b in range(blocks): buf = src.read(BUFSIZE) if len(buf) < BUFSIZE: raise IOError("end of file reached") dst.write(buf) if remainder != 0: buf = src.read(remainder) if len(buf) < remainder: raise IOError("end of file reached") dst.write(buf) return filemode_table = ( ((S_IFLNK, "l"), (S_IFREG, "-"), (S_IFBLK, "b"), (S_IFDIR, "d"), (S_IFCHR, "c"), (S_IFIFO, "p")), ((TUREAD, "r"),), ((TUWRITE, "w"),), ((TUEXEC|TSUID, "s"), (TSUID, "S"), (TUEXEC, "x")), ((TGREAD, "r"),), ((TGWRITE, "w"),), ((TGEXEC|TSGID, "s"), (TSGID, "S"), (TGEXEC, "x")), ((TOREAD, "r"),), ((TOWRITE, "w"),), ((TOEXEC|TSVTX, "t"), (TSVTX, "T"), (TOEXEC, "x")) ) def filemode(mode): """Convert a file's mode to a string of the form -rwxrwxrwx. Used by TarFile.list() """ perm = [] for table in filemode_table: for bit, char in table: if mode & bit == bit: perm.append(char) break else: perm.append("-") return "".join(perm) class TarError(Exception): """Base exception.""" pass class ExtractError(TarError): """General exception for extract errors.""" pass class ReadError(TarError): """Exception for unreadable tar archives.""" pass class CompressionError(TarError): """Exception for unavailable compression methods.""" pass class StreamError(TarError): """Exception for unsupported operations on stream-like TarFiles.""" pass class HeaderError(TarError): """Base exception for header errors.""" pass class EmptyHeaderError(HeaderError): """Exception for empty headers.""" pass class TruncatedHeaderError(HeaderError): """Exception for truncated headers.""" pass class EOFHeaderError(HeaderError): """Exception for end of file headers.""" pass class InvalidHeaderError(HeaderError): """Exception for invalid headers.""" pass class SubsequentHeaderError(HeaderError): """Exception for missing and invalid extended headers.""" pass #--------------------------- # internal stream interface #--------------------------- class _LowLevelFile(object): """Low-level file object. Supports reading and writing. It is used instead of a regular file object for streaming access. """ def __init__(self, name, mode): mode = { "r": os.O_RDONLY, "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, }[mode] if hasattr(os, "O_BINARY"): mode |= os.O_BINARY self.fd = os.open(name, mode, 0o666) def close(self): os.close(self.fd) def read(self, size): return os.read(self.fd, size) def write(self, s): os.write(self.fd, s) class _Stream(object): """Class that serves as an adapter between TarFile and a stream-like object. The stream-like object only needs to have a read() or write() method and is accessed blockwise. Use of gzip or bzip2 compression is possible. A stream-like object could be for example: sys.stdin, sys.stdout, a socket, a tape device etc. _Stream is intended to be used only internally. """ def __init__(self, name, mode, comptype, fileobj, bufsize): """Construct a _Stream object. """ self._extfileobj = True if fileobj is None: fileobj = _LowLevelFile(name, mode) self._extfileobj = False if comptype == '*': # Enable transparent compression detection for the # stream interface fileobj = _StreamProxy(fileobj) comptype = fileobj.getcomptype() self.name = name or "" self.mode = mode self.comptype = comptype self.fileobj = fileobj self.bufsize = bufsize self.buf = b"" self.pos = 0 self.closed = False try: if comptype == "gz": try: import zlib except ImportError: raise CompressionError("zlib module is not available") self.zlib = zlib self.crc = zlib.crc32(b"") if mode == "r": self._init_read_gz() else: self._init_write_gz() if comptype == "bz2": try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") if mode == "r": self.dbuf = b"" self.cmp = bz2.BZ2Decompressor() else: self.cmp = bz2.BZ2Compressor() except: if not self._extfileobj: self.fileobj.close() self.closed = True raise def __del__(self): if hasattr(self, "closed") and not self.closed: self.close() def _init_write_gz(self): """Initialize for writing with gzip compression. """ self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, -self.zlib.MAX_WBITS, self.zlib.DEF_MEM_LEVEL, 0) timestamp = struct.pack("<L", int(time.time())) self.__write(b"\037\213\010\010" + timestamp + b"\002\377") if self.name.endswith(".gz"): self.name = self.name[:-3] # RFC1952 says we must use ISO-8859-1 for the FNAME field. self.__write(self.name.encode("iso-8859-1", "replace") + NUL) def write(self, s): """Write string s to the stream. """ if self.comptype == "gz": self.crc = self.zlib.crc32(s, self.crc) self.pos += len(s) if self.comptype != "tar": s = self.cmp.compress(s) self.__write(s) def __write(self, s): """Write string s to the stream if a whole new block is ready to be written. """ self.buf += s while len(self.buf) > self.bufsize: self.fileobj.write(self.buf[:self.bufsize]) self.buf = self.buf[self.bufsize:] def close(self): """Close the _Stream object. No operation should be done on it afterwards. """ if self.closed: return if self.mode == "w" and self.comptype != "tar": self.buf += self.cmp.flush() if self.mode == "w" and self.buf: self.fileobj.write(self.buf) self.buf = b"" if self.comptype == "gz": # The native zlib crc is an unsigned 32-bit integer, but # the Python wrapper implicitly casts that to a signed C # long. So, on a 32-bit box self.crc may "look negative", # while the same crc on a 64-bit box may "look positive". # To avoid irksome warnings from the `struct` module, force # it to look positive on all boxes. self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff)) self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF)) if not self._extfileobj: self.fileobj.close() self.closed = True def _init_read_gz(self): """Initialize for reading a gzip compressed fileobj. """ self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS) self.dbuf = b"" # taken from gzip.GzipFile with some alterations if self.__read(2) != b"\037\213": raise ReadError("not a gzip file") if self.__read(1) != b"\010": raise CompressionError("unsupported compression method") flag = ord(self.__read(1)) self.__read(6) if flag & 4: xlen = ord(self.__read(1)) + 256 * ord(self.__read(1)) self.read(xlen) if flag & 8: while True: s = self.__read(1) if not s or s == NUL: break if flag & 16: while True: s = self.__read(1) if not s or s == NUL: break if flag & 2: self.__read(2) def tell(self): """Return the stream's file pointer position. """ return self.pos def seek(self, pos=0): """Set the stream's file pointer to pos. Negative seeking is forbidden. """ if pos - self.pos >= 0: blocks, remainder = divmod(pos - self.pos, self.bufsize) for i in range(blocks): self.read(self.bufsize) self.read(remainder) else: raise StreamError("seeking backwards is not allowed") return self.pos def read(self, size=None): """Return the next size number of bytes from the stream. If size is not defined, return all bytes of the stream up to EOF. """ if size is None: t = [] while True: buf = self._read(self.bufsize) if not buf: break t.append(buf) buf = "".join(t) else: buf = self._read(size) self.pos += len(buf) return buf def _read(self, size): """Return size bytes from the stream. """ if self.comptype == "tar": return self.__read(size) c = len(self.dbuf) while c < size: buf = self.__read(self.bufsize) if not buf: break try: buf = self.cmp.decompress(buf) except IOError: raise ReadError("invalid compressed data") self.dbuf += buf c += len(buf) buf = self.dbuf[:size] self.dbuf = self.dbuf[size:] return buf def __read(self, size): """Return size bytes from stream. If internal buffer is empty, read another block from the stream. """ c = len(self.buf) while c < size: buf = self.fileobj.read(self.bufsize) if not buf: break self.buf += buf c += len(buf) buf = self.buf[:size] self.buf = self.buf[size:] return buf # class _Stream class _StreamProxy(object): """Small proxy class that enables transparent compression detection for the Stream interface (mode 'r|*'). """ def __init__(self, fileobj): self.fileobj = fileobj self.buf = self.fileobj.read(BLOCKSIZE) def read(self, size): self.read = self.fileobj.read return self.buf def getcomptype(self): if self.buf.startswith(b"\037\213\010"): return "gz" if self.buf.startswith(b"BZh91"): return "bz2" return "tar" def close(self): self.fileobj.close() # class StreamProxy class _BZ2Proxy(object): """Small proxy class that enables external file object support for "r:bz2" and "w:bz2" modes. This is actually a workaround for a limitation in bz2 module's BZ2File class which (unlike gzip.GzipFile) has no support for a file object argument. """ blocksize = 16 * 1024 def __init__(self, fileobj, mode): self.fileobj = fileobj self.mode = mode self.name = getattr(self.fileobj, "name", None) self.init() def init(self): import bz2 self.pos = 0 if self.mode == "r": self.bz2obj = bz2.BZ2Decompressor() self.fileobj.seek(0) self.buf = b"" else: self.bz2obj = bz2.BZ2Compressor() def read(self, size): x = len(self.buf) while x < size: raw = self.fileobj.read(self.blocksize) if not raw: break data = self.bz2obj.decompress(raw) self.buf += data x += len(data) buf = self.buf[:size] self.buf = self.buf[size:] self.pos += len(buf) return buf def seek(self, pos): if pos < self.pos: self.init() self.read(pos - self.pos) def tell(self): return self.pos def write(self, data): self.pos += len(data) raw = self.bz2obj.compress(data) self.fileobj.write(raw) def close(self): if self.mode == "w": raw = self.bz2obj.flush() self.fileobj.write(raw) # class _BZ2Proxy #------------------------ # Extraction file object #------------------------ class _FileInFile(object): """A thin wrapper around an existing file object that provides a part of its data as an individual file object. """ def __init__(self, fileobj, offset, size, blockinfo=None): self.fileobj = fileobj self.offset = offset self.size = size self.position = 0 if blockinfo is None: blockinfo = [(0, size)] # Construct a map with data and zero blocks. self.map_index = 0 self.map = [] lastpos = 0 realpos = self.offset for offset, size in blockinfo: if offset > lastpos: self.map.append((False, lastpos, offset, None)) self.map.append((True, offset, offset + size, realpos)) realpos += size lastpos = offset + size if lastpos < self.size: self.map.append((False, lastpos, self.size, None)) def seekable(self): if not hasattr(self.fileobj, "seekable"): # XXX gzip.GzipFile and bz2.BZ2File return True return self.fileobj.seekable() def tell(self): """Return the current file position. """ return self.position def seek(self, position): """Seek to a position in the file. """ self.position = position def read(self, size=None): """Read data from the file. """ if size is None: size = self.size - self.position else: size = min(size, self.size - self.position) buf = b"" while size > 0: while True: data, start, stop, offset = self.map[self.map_index] if start <= self.position < stop: break else: self.map_index += 1 if self.map_index == len(self.map): self.map_index = 0 length = min(size, stop - self.position) if data: self.fileobj.seek(offset + (self.position - start)) buf += self.fileobj.read(length) else: buf += NUL * length size -= length self.position += length return buf #class _FileInFile class ExFileObject(object): """File-like object for reading an archive member. Is returned by TarFile.extractfile(). """ blocksize = 1024 def __init__(self, tarfile, tarinfo): self.fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data, tarinfo.size, tarinfo.sparse) self.name = tarinfo.name self.mode = "r" self.closed = False self.size = tarinfo.size self.position = 0 self.buffer = b"" def readable(self): return True def writable(self): return False def seekable(self): return self.fileobj.seekable() def read(self, size=None): """Read at most size bytes from the file. If size is not present or None, read all data until EOF is reached. """ if self.closed: raise ValueError("I/O operation on closed file") buf = b"" if self.buffer: if size is None: buf = self.buffer self.buffer = b"" else: buf = self.buffer[:size] self.buffer = self.buffer[size:] if size is None: buf += self.fileobj.read() else: buf += self.fileobj.read(size - len(buf)) self.position += len(buf) return buf # XXX TextIOWrapper uses the read1() method. read1 = read def readline(self, size=-1): """Read one entire line from the file. If size is present and non-negative, return a string with at most that size, which may be an incomplete line. """ if self.closed: raise ValueError("I/O operation on closed file") pos = self.buffer.find(b"\n") + 1 if pos == 0: # no newline found. while True: buf = self.fileobj.read(self.blocksize) self.buffer += buf if not buf or b"\n" in buf: pos = self.buffer.find(b"\n") + 1 if pos == 0: # no newline found. pos = len(self.buffer) break if size != -1: pos = min(size, pos) buf = self.buffer[:pos] self.buffer = self.buffer[pos:] self.position += len(buf) return buf def readlines(self): """Return a list with all remaining lines. """ result = [] while True: line = self.readline() if not line: break result.append(line) return result def tell(self): """Return the current file position. """ if self.closed: raise ValueError("I/O operation on closed file") return self.position def seek(self, pos, whence=os.SEEK_SET): """Seek to a position in the file. """ if self.closed: raise ValueError("I/O operation on closed file") if whence == os.SEEK_SET: self.position = min(max(pos, 0), self.size) elif whence == os.SEEK_CUR: if pos < 0: self.position = max(self.position + pos, 0) else: self.position = min(self.position + pos, self.size) elif whence == os.SEEK_END: self.position = max(min(self.size + pos, self.size), 0) else: raise ValueError("Invalid argument") self.buffer = b"" self.fileobj.seek(self.position) def close(self): """Close the file object. """ self.closed = True def __iter__(self): """Get an iterator over the file's lines. """ while True: line = self.readline() if not line: break yield line #class ExFileObject #------------------ # Exported Classes #------------------ class TarInfo(object): """Informational class which holds the details about an archive member given by a tar header block. TarInfo objects are returned by TarFile.getmember(), TarFile.getmembers() and TarFile.gettarinfo() and are usually created internally. """ __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", "chksum", "type", "linkname", "uname", "gname", "devmajor", "devminor", "offset", "offset_data", "pax_headers", "sparse", "tarfile", "_sparse_structs", "_link_target") def __init__(self, name=""): """Construct a TarInfo object. name is the optional name of the member. """ self.name = name # member name self.mode = 0o644 # file permissions self.uid = 0 # user id self.gid = 0 # group id self.size = 0 # file size self.mtime = 0 # modification time self.chksum = 0 # header checksum self.type = REGTYPE # member type self.linkname = "" # link name self.uname = "" # user name self.gname = "" # group name self.devmajor = 0 # device major number self.devminor = 0 # device minor number self.offset = 0 # the tar header starts here self.offset_data = 0 # the file's data starts here self.sparse = None # sparse member information self.pax_headers = {} # pax header information # In pax headers the "name" and "linkname" field are called # "path" and "linkpath". def _getpath(self): return self.name def _setpath(self, name): self.name = name path = property(_getpath, _setpath) def _getlinkpath(self): return self.linkname def _setlinkpath(self, linkname): self.linkname = linkname linkpath = property(_getlinkpath, _setlinkpath) def __repr__(self): return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) def get_info(self): """Return the TarInfo's attributes as a dictionary. """ info = { "name": self.name, "mode": self.mode & 0o7777, "uid": self.uid, "gid": self.gid, "size": self.size, "mtime": self.mtime, "chksum": self.chksum, "type": self.type, "linkname": self.linkname, "uname": self.uname, "gname": self.gname, "devmajor": self.devmajor, "devminor": self.devminor } if info["type"] == DIRTYPE and not info["name"].endswith("/"): info["name"] += "/" return info def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): """Return a tar header as a string of 512 byte blocks. """ info = self.get_info() if format == USTAR_FORMAT: return self.create_ustar_header(info, encoding, errors) elif format == GNU_FORMAT: return self.create_gnu_header(info, encoding, errors) elif format == PAX_FORMAT: return self.create_pax_header(info, encoding) else: raise ValueError("invalid format") def create_ustar_header(self, info, encoding, errors): """Return the object as a ustar header block. """ info["magic"] = POSIX_MAGIC if len(info["linkname"]) > LENGTH_LINK: raise ValueError("linkname is too long") if len(info["name"]) > LENGTH_NAME: info["prefix"], info["name"] = self._posix_split_name(info["name"]) return self._create_header(info, USTAR_FORMAT, encoding, errors) def create_gnu_header(self, info, encoding, errors): """Return the object as a GNU header block sequence. """ info["magic"] = GNU_MAGIC buf = b"" if len(info["linkname"]) > LENGTH_LINK: buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) if len(info["name"]) > LENGTH_NAME: buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) return buf + self._create_header(info, GNU_FORMAT, encoding, errors) def create_pax_header(self, info, encoding): """Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information. """ info["magic"] = POSIX_MAGIC pax_headers = self.pax_headers.copy() # Test string fields for values that exceed the field length or cannot # be represented in ASCII encoding. for name, hname, length in ( ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), ("uname", "uname", 32), ("gname", "gname", 32)): if hname in pax_headers: # The pax header has priority. continue # Try to encode the string as ASCII. try: info[name].encode("ascii", "strict") except UnicodeEncodeError: pax_headers[hname] = info[name] continue if len(info[name]) > length: pax_headers[hname] = info[name] # Test number fields for values that exceed the field limit or values # that like to be stored as float. for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): if name in pax_headers: # The pax header has priority. Avoid overflow. info[name] = 0 continue val = info[name] if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): pax_headers[name] = str(val) info[name] = 0 # Create a pax extended header if necessary. if pax_headers: buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) else: buf = b"" return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") @classmethod def create_pax_global_header(cls, pax_headers): """Return the object as a pax global header block sequence. """ return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") def _posix_split_name(self, name): """Split a name longer than 100 chars into a prefix and a name part. """ prefix = name[:LENGTH_PREFIX + 1] while prefix and prefix[-1] != "/": prefix = prefix[:-1] name = name[len(prefix):] prefix = prefix[:-1] if not prefix or len(name) > LENGTH_NAME: raise ValueError("name is too long") return prefix, name @staticmethod def _create_header(info, format, encoding, errors): """Return a header block. info is a dictionary with file information, format must be one of the *_FORMAT constants. """ parts = [ stn(info.get("name", ""), 100, encoding, errors), itn(info.get("mode", 0) & 0o7777, 8, format), itn(info.get("uid", 0), 8, format), itn(info.get("gid", 0), 8, format), itn(info.get("size", 0), 12, format), itn(info.get("mtime", 0), 12, format), b" ", # checksum field info.get("type", REGTYPE), stn(info.get("linkname", ""), 100, encoding, errors), info.get("magic", POSIX_MAGIC), stn(info.get("uname", ""), 32, encoding, errors), stn(info.get("gname", ""), 32, encoding, errors), itn(info.get("devmajor", 0), 8, format), itn(info.get("devminor", 0), 8, format), stn(info.get("prefix", ""), 155, encoding, errors) ] buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) chksum = calc_chksums(buf[-BLOCKSIZE:])[0] buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] return buf @staticmethod def _create_payload(payload): """Return the string payload filled with zero bytes up to the next 512 byte border. """ blocks, remainder = divmod(len(payload), BLOCKSIZE) if remainder > 0: payload += (BLOCKSIZE - remainder) * NUL return payload @classmethod def _create_gnu_long_header(cls, name, type, encoding, errors): """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence for name. """ name = name.encode(encoding, errors) + NUL info = {} info["name"] = "././@LongLink" info["type"] = type info["size"] = len(name) info["magic"] = GNU_MAGIC # create extended header + name blocks. return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ cls._create_payload(name) @classmethod def _create_pax_generic_header(cls, pax_headers, type, encoding): """Return a POSIX.1-2008 extended or global header sequence that contains a list of keyword, value pairs. The values must be strings. """ # Check if one of the fields contains surrogate characters and thereby # forces hdrcharset=BINARY, see _proc_pax() for more information. binary = False for keyword, value in pax_headers.items(): try: value.encode("utf8", "strict") except UnicodeEncodeError: binary = True break records = b"" if binary: # Put the hdrcharset field at the beginning of the header. records += b"21 hdrcharset=BINARY\n" for keyword, value in pax_headers.items(): keyword = keyword.encode("utf8") if binary: # Try to restore the original byte representation of `value'. # Needless to say, that the encoding must match the string. value = value.encode(encoding, "surrogateescape") else: value = value.encode("utf8") l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' n = p = 0 while True: n = l + len(str(p)) if n == p: break p = n records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" # We use a hardcoded "././@PaxHeader" name like star does # instead of the one that POSIX recommends. info = {} info["name"] = "././@PaxHeader" info["type"] = type info["size"] = len(records) info["magic"] = POSIX_MAGIC # Create pax header + record blocks. return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ cls._create_payload(records) @classmethod def frombuf(cls, buf, encoding, errors): """Construct a TarInfo object from a 512 byte bytes object. """ if len(buf) == 0: raise EmptyHeaderError("empty header") if len(buf) != BLOCKSIZE: raise TruncatedHeaderError("truncated header") if buf.count(NUL) == BLOCKSIZE: raise EOFHeaderError("end of file header") chksum = nti(buf[148:156]) if chksum not in calc_chksums(buf): raise InvalidHeaderError("bad checksum") obj = cls() obj.name = nts(buf[0:100], encoding, errors) obj.mode = nti(buf[100:108]) obj.uid = nti(buf[108:116]) obj.gid = nti(buf[116:124]) obj.size = nti(buf[124:136]) obj.mtime = nti(buf[136:148]) obj.chksum = chksum obj.type = buf[156:157] obj.linkname = nts(buf[157:257], encoding, errors) obj.uname = nts(buf[265:297], encoding, errors) obj.gname = nts(buf[297:329], encoding, errors) obj.devmajor = nti(buf[329:337]) obj.devminor = nti(buf[337:345]) prefix = nts(buf[345:500], encoding, errors) # Old V7 tar format represents a directory as a regular # file with a trailing slash. if obj.type == AREGTYPE and obj.name.endswith("/"): obj.type = DIRTYPE # The old GNU sparse format occupies some of the unused # space in the buffer for up to 4 sparse structures. # Save the them for later processing in _proc_sparse(). if obj.type == GNUTYPE_SPARSE: pos = 386 structs = [] for i in range(4): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[482]) origsize = nti(buf[483:495]) obj._sparse_structs = (structs, isextended, origsize) # Remove redundant slashes from directories. if obj.isdir(): obj.name = obj.name.rstrip("/") # Reconstruct a ustar longname. if prefix and obj.type not in GNU_TYPES: obj.name = prefix + "/" + obj.name return obj @classmethod def fromtarfile(cls, tarfile): """Return the next TarInfo object from TarFile object tarfile. """ buf = tarfile.fileobj.read(BLOCKSIZE) obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) obj.offset = tarfile.fileobj.tell() - BLOCKSIZE return obj._proc_member(tarfile) #-------------------------------------------------------------------------- # The following are methods that are called depending on the type of a # member. The entry point is _proc_member() which can be overridden in a # subclass to add custom _proc_*() methods. A _proc_*() method MUST # implement the following # operations: # 1. Set self.offset_data to the position where the data blocks begin, # if there is data that follows. # 2. Set tarfile.offset to the position where the next member's header will # begin. # 3. Return self or another valid TarInfo object. def _proc_member(self, tarfile): """Choose the right processing method depending on the type and call it. """ if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): return self._proc_gnulong(tarfile) elif self.type == GNUTYPE_SPARSE: return self._proc_sparse(tarfile) elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): return self._proc_pax(tarfile) else: return self._proc_builtin(tarfile) def _proc_builtin(self, tarfile): """Process a builtin type or an unknown type which will be treated as a regular file. """ self.offset_data = tarfile.fileobj.tell() offset = self.offset_data if self.isreg() or self.type not in SUPPORTED_TYPES: # Skip the following data blocks. offset += self._block(self.size) tarfile.offset = offset # Patch the TarInfo object with saved global # header information. self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) return self def _proc_gnulong(self, tarfile): """Process the blocks that hold a GNU longname or longlink member. """ buf = tarfile.fileobj.read(self._block(self.size)) # Fetch the next header and process it. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError("missing or bad subsequent header") # Patch the TarInfo object from the next header with # the longname information. next.offset = self.offset if self.type == GNUTYPE_LONGNAME: next.name = nts(buf, tarfile.encoding, tarfile.errors) elif self.type == GNUTYPE_LONGLINK: next.linkname = nts(buf, tarfile.encoding, tarfile.errors) return next def _proc_sparse(self, tarfile): """Process a GNU sparse header plus extra headers. """ # We already collected some sparse structures in frombuf(). structs, isextended, origsize = self._sparse_structs del self._sparse_structs # Collect sparse structures from extended header blocks. while isextended: buf = tarfile.fileobj.read(BLOCKSIZE) pos = 0 for i in range(21): try: offset = nti(buf[pos:pos + 12]) numbytes = nti(buf[pos + 12:pos + 24]) except ValueError: break if offset and numbytes: structs.append((offset, numbytes)) pos += 24 isextended = bool(buf[504]) self.sparse = structs self.offset_data = tarfile.fileobj.tell() tarfile.offset = self.offset_data + self._block(self.size) self.size = origsize return self def _proc_pax(self, tarfile): """Process an extended or global header as described in POSIX.1-2008. """ # Read the header information. buf = tarfile.fileobj.read(self._block(self.size)) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self.type == XGLTYPE: pax_headers = tarfile.pax_headers else: pax_headers = tarfile.pax_headers.copy() # Check if the pax header contains a hdrcharset field. This tells us # the encoding of the path, linkpath, uname and gname fields. Normally, # these fields are UTF-8 encoded but since POSIX.1-2008 tar # implementations are allowed to store them as raw binary strings if # the translation to UTF-8 fails. match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) if match is not None: pax_headers["hdrcharset"] = match.group(1).decode("utf8") # For the time being, we don't care about anything other than "BINARY". # The only other value that is currently allowed by the standard is # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. hdrcharset = pax_headers.get("hdrcharset") if hdrcharset == "BINARY": encoding = tarfile.encoding else: encoding = "utf8" # Parse pax header information. A record looks like that: # "%d %s=%s\n" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re.compile(br"(\d+) ([^=]+)=") pos = 0 while True: match = regex.match(buf, pos) if not match: break length, keyword = match.groups() length = int(length) value = buf[match.end(2) + 1:match.start(1) + length - 1] # Normally, we could just use "utf8" as the encoding and "strict" # as the error handler, but we better not take the risk. For # example, GNU tar <= 1.23 is known to store filenames it cannot # translate to UTF-8 as raw strings (unfortunately without a # hdrcharset=BINARY header). # We first try the strict standard encoding, and if that fails we # fall back on the user's encoding and error handler. keyword = self._decode_pax_field(keyword, "utf8", "utf8", tarfile.errors) if keyword in PAX_NAME_FIELDS: value = self._decode_pax_field(value, encoding, tarfile.encoding, tarfile.errors) else: value = self._decode_pax_field(value, "utf8", "utf8", tarfile.errors) pax_headers[keyword] = value pos += length # Fetch the next header. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError("missing or bad subsequent header") # Process GNU sparse information. if "GNU.sparse.map" in pax_headers: # GNU extended sparse format version 0.1. self._proc_gnusparse_01(next, pax_headers) elif "GNU.sparse.size" in pax_headers: # GNU extended sparse format version 0.0. self._proc_gnusparse_00(next, pax_headers, buf) elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": # GNU extended sparse format version 1.0. self._proc_gnusparse_10(next, pax_headers, tarfile) if self.type in (XHDTYPE, SOLARIS_XHDTYPE): # Patch the TarInfo object with the extended header info. next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) next.offset = self.offset if "size" in pax_headers: # If the extended header replaces the size field, # we need to recalculate the offset where the next # header starts. offset = next.offset_data if next.isreg() or next.type not in SUPPORTED_TYPES: offset += next._block(next.size) tarfile.offset = offset return next def _proc_gnusparse_00(self, next, pax_headers, buf): """Process a GNU tar extended sparse header, version 0.0. """ offsets = [] for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): offsets.append(int(match.group(1))) numbytes = [] for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): numbytes.append(int(match.group(1))) next.sparse = list(zip(offsets, numbytes)) def _proc_gnusparse_01(self, next, pax_headers): """Process a GNU tar extended sparse header, version 0.1. """ sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] next.sparse = list(zip(sparse[::2], sparse[1::2])) def _proc_gnusparse_10(self, next, pax_headers, tarfile): """Process a GNU tar extended sparse header, version 1.0. """ fields = None sparse = [] buf = tarfile.fileobj.read(BLOCKSIZE) fields, buf = buf.split(b"\n", 1) fields = int(fields) while len(sparse) < fields * 2: if b"\n" not in buf: buf += tarfile.fileobj.read(BLOCKSIZE) number, buf = buf.split(b"\n", 1) sparse.append(int(number)) next.offset_data = tarfile.fileobj.tell() next.sparse = list(zip(sparse[::2], sparse[1::2])) def _apply_pax_info(self, pax_headers, encoding, errors): """Replace fields with supplemental information from a previous pax extended or global header. """ for keyword, value in pax_headers.items(): if keyword == "GNU.sparse.name": setattr(self, "path", value) elif keyword == "GNU.sparse.size": setattr(self, "size", int(value)) elif keyword == "GNU.sparse.realsize": setattr(self, "size", int(value)) elif keyword in PAX_FIELDS: if keyword in PAX_NUMBER_FIELDS: try: value = PAX_NUMBER_FIELDS[keyword](value) except ValueError: value = 0 if keyword == "path": value = value.rstrip("/") setattr(self, keyword, value) self.pax_headers = pax_headers.copy() def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): """Decode a single field from a pax record. """ try: return value.decode(encoding, "strict") except UnicodeDecodeError: return value.decode(fallback_encoding, fallback_errors) def _block(self, count): """Round up a byte count by BLOCKSIZE and return it, e.g. _block(834) => 1024. """ blocks, remainder = divmod(count, BLOCKSIZE) if remainder: blocks += 1 return blocks * BLOCKSIZE def isreg(self): return self.type in REGULAR_TYPES def isfile(self): return self.isreg() def isdir(self): return self.type == DIRTYPE def issym(self): return self.type == SYMTYPE def islnk(self): return self.type == LNKTYPE def ischr(self): return self.type == CHRTYPE def isblk(self): return self.type == BLKTYPE def isfifo(self): return self.type == FIFOTYPE def issparse(self): return self.sparse is not None def isdev(self): return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) # class TarInfo class TarFile(object): """The TarFile Class provides an interface to tar archives. """ debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) dereference = False # If true, add content of linked file to the # tar file, else the link. ignore_zeros = False # If true, skips empty or invalid blocks and # continues processing. errorlevel = 1 # If 0, fatal errors only appear in debug # messages (if debug >= 0). If > 0, errors # are passed to the caller as exceptions. format = DEFAULT_FORMAT # The format to use when creating an archive. encoding = ENCODING # Encoding for 8-bit character strings. errors = None # Error handler for unicode conversion. tarinfo = TarInfo # The default TarInfo class to use. fileobject = ExFileObject # The default ExFileObject class to use. def __init__(self, name=None, mode="r", fileobj=None, format=None, tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to read from an existing archive, 'a' to append data to an existing file or 'w' to create a new file overwriting an existing one. `mode' defaults to 'r'. If `fileobj' is given, it is used for reading or writing data. If it can be determined, `mode' is overridden by `fileobj's mode. `fileobj' is not closed, when TarFile is closed. """ if len(mode) > 1 or mode not in "raw": raise ValueError("mode must be 'r', 'a' or 'w'") self.mode = mode self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] if not fileobj: if self.mode == "a" and not os.path.exists(name): # Create nonexistent files in append mode. self.mode = "w" self._mode = "wb" fileobj = bltn_open(name, self._mode) self._extfileobj = False else: if name is None and hasattr(fileobj, "name"): name = fileobj.name if hasattr(fileobj, "mode"): self._mode = fileobj.mode self._extfileobj = True self.name = os.path.abspath(name) if name else None self.fileobj = fileobj # Init attributes. if format is not None: self.format = format if tarinfo is not None: self.tarinfo = tarinfo if dereference is not None: self.dereference = dereference if ignore_zeros is not None: self.ignore_zeros = ignore_zeros if encoding is not None: self.encoding = encoding self.errors = errors if pax_headers is not None and self.format == PAX_FORMAT: self.pax_headers = pax_headers else: self.pax_headers = {} if debug is not None: self.debug = debug if errorlevel is not None: self.errorlevel = errorlevel # Init datastructures. self.closed = False self.members = [] # list of members as TarInfo objects self._loaded = False # flag if all members have been read self.offset = self.fileobj.tell() # current position in the archive file self.inodes = {} # dictionary caching the inodes of # archive members already added try: if self.mode == "r": self.firstmember = None self.firstmember = self.next() if self.mode == "a": # Move to the end of the archive, # before the first empty block. while True: self.fileobj.seek(self.offset) try: tarinfo = self.tarinfo.fromtarfile(self) self.members.append(tarinfo) except EOFHeaderError: self.fileobj.seek(self.offset) break except HeaderError as e: raise ReadError(str(e)) if self.mode in "aw": self._loaded = True if self.pax_headers: buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) self.fileobj.write(buf) self.offset += len(buf) except: if not self._extfileobj: self.fileobj.close() self.closed = True raise #-------------------------------------------------------------------------- # Below are the classmethods which act as alternate constructors to the # TarFile class. The open() method is the only one that is needed for # public use; it is the "super"-constructor and is able to select an # adequate "sub"-constructor for a particular compression using the mapping # from OPEN_METH. # # This concept allows one to subclass TarFile without losing the comfort of # the super-constructor. A sub-constructor is registered and made available # by adding it to the mapping in OPEN_METH. @classmethod def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): """Open a tar archive for reading, writing or appending. Return an appropriate TarFile class. mode: 'r' or 'r:*' open for reading with transparent compression 'r:' open for reading exclusively uncompressed 'r:gz' open for reading with gzip compression 'r:bz2' open for reading with bzip2 compression 'a' or 'a:' open for appending, creating the file if necessary 'w' or 'w:' open for writing without compression 'w:gz' open for writing with gzip compression 'w:bz2' open for writing with bzip2 compression 'r|*' open a stream of tar blocks with transparent compression 'r|' open an uncompressed stream of tar blocks for reading 'r|gz' open a gzip compressed stream of tar blocks 'r|bz2' open a bzip2 compressed stream of tar blocks 'w|' open an uncompressed stream for writing 'w|gz' open a gzip compressed stream for writing 'w|bz2' open a bzip2 compressed stream for writing """ if not name and not fileobj: raise ValueError("nothing to open") if mode in ("r", "r:*"): # Find out which *open() is appropriate for opening the file. for comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) if fileobj is not None: saved_pos = fileobj.tell() try: return func(name, "r", fileobj, **kwargs) except (ReadError, CompressionError) as e: if fileobj is not None: fileobj.seek(saved_pos) continue raise ReadError("file could not be opened successfully") elif ":" in mode: filemode, comptype = mode.split(":", 1) filemode = filemode or "r" comptype = comptype or "tar" # Select the *open() function according to # given compression. if comptype in cls.OPEN_METH: func = getattr(cls, cls.OPEN_METH[comptype]) else: raise CompressionError("unknown compression type %r" % comptype) return func(name, filemode, fileobj, **kwargs) elif "|" in mode: filemode, comptype = mode.split("|", 1) filemode = filemode or "r" comptype = comptype or "tar" if filemode not in "rw": raise ValueError("mode must be 'r' or 'w'") stream = _Stream(name, filemode, comptype, fileobj, bufsize) try: t = cls(name, filemode, stream, **kwargs) except: stream.close() raise t._extfileobj = False return t elif mode in "aw": return cls.taropen(name, mode, fileobj, **kwargs) raise ValueError("undiscernible mode") @classmethod def taropen(cls, name, mode="r", fileobj=None, **kwargs): """Open uncompressed tar archive name for reading or writing. """ if len(mode) > 1 or mode not in "raw": raise ValueError("mode must be 'r', 'a' or 'w'") return cls(name, mode, fileobj, **kwargs) @classmethod def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): """Open gzip compressed tar archive name for reading or writing. Appending is not allowed. """ if len(mode) > 1 or mode not in "rw": raise ValueError("mode must be 'r' or 'w'") try: import gzip gzip.GzipFile except (ImportError, AttributeError): raise CompressionError("gzip module is not available") extfileobj = fileobj is not None try: fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) t = cls.taropen(name, mode, fileobj, **kwargs) except IOError: if not extfileobj and fileobj is not None: fileobj.close() if fileobj is None: raise raise ReadError("not a gzip file") except: if not extfileobj and fileobj is not None: fileobj.close() raise t._extfileobj = extfileobj return t @classmethod def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): """Open bzip2 compressed tar archive name for reading or writing. Appending is not allowed. """ if len(mode) > 1 or mode not in "rw": raise ValueError("mode must be 'r' or 'w'.") try: import bz2 except ImportError: raise CompressionError("bz2 module is not available") if fileobj is not None: fileobj = _BZ2Proxy(fileobj, mode) else: fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) try: t = cls.taropen(name, mode, fileobj, **kwargs) except (IOError, EOFError): fileobj.close() raise ReadError("not a bzip2 file") t._extfileobj = False return t # All *open() methods are registered here. OPEN_METH = { "tar": "taropen", # uncompressed tar "gz": "gzopen", # gzip compressed tar "bz2": "bz2open" # bzip2 compressed tar } #-------------------------------------------------------------------------- # The public methods which TarFile provides: def close(self): """Close the TarFile. In write-mode, two finishing zero blocks are appended to the archive. """ if self.closed: return if self.mode in "aw": self.fileobj.write(NUL * (BLOCKSIZE * 2)) self.offset += (BLOCKSIZE * 2) # fill up the end with zero-blocks # (like option -b20 for tar does) blocks, remainder = divmod(self.offset, RECORDSIZE) if remainder > 0: self.fileobj.write(NUL * (RECORDSIZE - remainder)) if not self._extfileobj: self.fileobj.close() self.closed = True def getmember(self, name): """Return a TarInfo object for member `name'. If `name' can not be found in the archive, KeyError is raised. If a member occurs more than once in the archive, its last occurrence is assumed to be the most up-to-date version. """ tarinfo = self._getmember(name) if tarinfo is None: raise KeyError("filename %r not found" % name) return tarinfo def getmembers(self): """Return the members of the archive as a list of TarInfo objects. The list has the same order as the members in the archive. """ self._check() if not self._loaded: # if we want to obtain a list of self._load() # all members, we first have to # scan the whole archive. return self.members def getnames(self): """Return the members of the archive as a list of their names. It has the same order as the list returned by getmembers(). """ return [tarinfo.name for tarinfo in self.getmembers()] def gettarinfo(self, name=None, arcname=None, fileobj=None): """Create a TarInfo object for either the file `name' or the file object `fileobj' (using os.fstat on its file descriptor). You can modify some of the TarInfo's attributes before you add it using addfile(). If given, `arcname' specifies an alternative name for the file in the archive. """ self._check("aw") # When fileobj is given, replace name by # fileobj's real name. if fileobj is not None: name = fileobj.name # Building the name of the member in the archive. # Backward slashes are converted to forward slashes, # Absolute paths are turned to relative paths. if arcname is None: arcname = name drv, arcname = os.path.splitdrive(arcname) arcname = arcname.replace(os.sep, "/") arcname = arcname.lstrip("/") # Now, fill the TarInfo object with # information specific for the file. tarinfo = self.tarinfo() tarinfo.tarfile = self # Use os.stat or os.lstat, depending on platform # and if symlinks shall be resolved. if fileobj is None: if hasattr(os, "lstat") and not self.dereference: statres = os.lstat(name) else: statres = os.stat(name) else: statres = os.fstat(fileobj.fileno()) linkname = "" stmd = statres.st_mode if stat.S_ISREG(stmd): inode = (statres.st_ino, statres.st_dev) if not self.dereference and statres.st_nlink > 1 and \ inode in self.inodes and arcname != self.inodes[inode]: # Is it a hardlink to an already # archived file? type = LNKTYPE linkname = self.inodes[inode] else: # The inode is added only if its valid. # For win32 it is always 0. type = REGTYPE if inode[0]: self.inodes[inode] = arcname elif stat.S_ISDIR(stmd): type = DIRTYPE elif stat.S_ISFIFO(stmd): type = FIFOTYPE elif stat.S_ISLNK(stmd): type = SYMTYPE linkname = os.readlink(name) elif stat.S_ISCHR(stmd): type = CHRTYPE elif stat.S_ISBLK(stmd): type = BLKTYPE else: return None # Fill the TarInfo object with all # information we can get. tarinfo.name = arcname tarinfo.mode = stmd tarinfo.uid = statres.st_uid tarinfo.gid = statres.st_gid if type == REGTYPE: tarinfo.size = statres.st_size else: tarinfo.size = 0 tarinfo.mtime = statres.st_mtime tarinfo.type = type tarinfo.linkname = linkname if pwd: try: tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] except KeyError: pass if grp: try: tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] except KeyError: pass if type in (CHRTYPE, BLKTYPE): if hasattr(os, "major") and hasattr(os, "minor"): tarinfo.devmajor = os.major(statres.st_rdev) tarinfo.devminor = os.minor(statres.st_rdev) return tarinfo def list(self, verbose=True): """Print a table of contents to sys.stdout. If `verbose' is False, only the names of the members are printed. If it is True, an `ls -l'-like output is produced. """ self._check() for tarinfo in self: if verbose: print(filemode(tarinfo.mode), end=' ') print("%s/%s" % (tarinfo.uname or tarinfo.uid, tarinfo.gname or tarinfo.gid), end=' ') if tarinfo.ischr() or tarinfo.isblk(): print("%10s" % ("%d,%d" \ % (tarinfo.devmajor, tarinfo.devminor)), end=' ') else: print("%10d" % tarinfo.size, end=' ') print("%d-%02d-%02d %02d:%02d:%02d" \ % time.localtime(tarinfo.mtime)[:6], end=' ') print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') if verbose: if tarinfo.issym(): print("->", tarinfo.linkname, end=' ') if tarinfo.islnk(): print("link to", tarinfo.linkname, end=' ') print() def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): """Add the file `name' to the archive. `name' may be any type of file (directory, fifo, symbolic link, etc.). If given, `arcname' specifies an alternative name for the file in the archive. Directories are added recursively by default. This can be avoided by setting `recursive' to False. `exclude' is a function that should return True for each filename to be excluded. `filter' is a function that expects a TarInfo object argument and returns the changed TarInfo object, if it returns None the TarInfo object will be excluded from the archive. """ self._check("aw") if arcname is None: arcname = name # Exclude pathnames. if exclude is not None: import warnings warnings.warn("use the filter argument instead", DeprecationWarning, 2) if exclude(name): self._dbg(2, "tarfile: Excluded %r" % name) return # Skip if somebody tries to archive the archive... if self.name is not None and os.path.abspath(name) == self.name: self._dbg(2, "tarfile: Skipped %r" % name) return self._dbg(1, name) # Create a TarInfo object from the file. tarinfo = self.gettarinfo(name, arcname) if tarinfo is None: self._dbg(1, "tarfile: Unsupported type %r" % name) return # Change or exclude the TarInfo object. if filter is not None: tarinfo = filter(tarinfo) if tarinfo is None: self._dbg(2, "tarfile: Excluded %r" % name) return # Append the tar header and data to the archive. if tarinfo.isreg(): f = bltn_open(name, "rb") self.addfile(tarinfo, f) f.close() elif tarinfo.isdir(): self.addfile(tarinfo) if recursive: for f in os.listdir(name): self.add(os.path.join(name, f), os.path.join(arcname, f), recursive, exclude, filter=filter) else: self.addfile(tarinfo) def addfile(self, tarinfo, fileobj=None): """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is given, tarinfo.size bytes are read from it and added to the archive. You can create TarInfo objects using gettarinfo(). On Windows platforms, `fileobj' should always be opened with mode 'rb' to avoid irritation about the file size. """ self._check("aw") tarinfo = copy.copy(tarinfo) buf = tarinfo.tobuf(self.format, self.encoding, self.errors) self.fileobj.write(buf) self.offset += len(buf) # If there's data to follow, append it. if fileobj is not None: copyfileobj(fileobj, self.fileobj, tarinfo.size) blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) if remainder > 0: self.fileobj.write(NUL * (BLOCKSIZE - remainder)) blocks += 1 self.offset += blocks * BLOCKSIZE self.members.append(tarinfo) def extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): # Extract directories with a safe mode. directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 0o700 # Do not set_attrs directories, as we will do that further down self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) # Reverse sort directories. directories.sort(key=lambda a: a.name) directories.reverse() # Set correct owner, mtime and filemode on directories. for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def extract(self, member, path="", set_attrs=True): """Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member' may be a filename or a TarInfo object. You can specify a different directory using `path'. File attributes (owner, mtime, mode) are set unless `set_attrs' is False. """ self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member # Prepare the link target for makelink(). if tarinfo.islnk(): tarinfo._link_target = os.path.join(path, tarinfo.linkname) try: self._extract_member(tarinfo, os.path.join(path, tarinfo.name), set_attrs=set_attrs) except EnvironmentError as e: if self.errorlevel > 0: raise else: if e.filename is None: self._dbg(1, "tarfile: %s" % e.strerror) else: self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e) def extractfile(self, member): """Extract a member from the archive as a file object. `member' may be a filename or a TarInfo object. If `member' is a regular file, a file-like object is returned. If `member' is a link, a file-like object is constructed from the link's target. If `member' is none of the above, None is returned. The file-like object is read-only and provides the following methods: read(), readline(), readlines(), seek() and tell() """ self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member if tarinfo.isreg(): return self.fileobject(self, tarinfo) elif tarinfo.type not in SUPPORTED_TYPES: # If a member's type is unknown, it is treated as a # regular file. return self.fileobject(self, tarinfo) elif tarinfo.islnk() or tarinfo.issym(): if isinstance(self.fileobj, _Stream): # A small but ugly workaround for the case that someone tries # to extract a (sym)link as a file-object from a non-seekable # stream of tar blocks. raise StreamError("cannot extract (sym)link as file object") else: # A (sym)link's file object is its target's file object. return self.extractfile(self._find_link_target(tarinfo)) else: # If there's no data associated with the member (directory, chrdev, # blkdev, etc.), return None instead of a file object. return None def _extract_member(self, tarinfo, targetpath, set_attrs=True): """Extract the TarInfo object tarinfo to a physical file called targetpath. """ # Fetch the TarInfo object for the given name # and build the destination pathname, replacing # forward slashes to platform specific separators. targetpath = targetpath.rstrip("/") targetpath = targetpath.replace("/", os.sep) # Create all upper directories. upperdirs = os.path.dirname(targetpath) if upperdirs and not os.path.exists(upperdirs): # Create directories that are not part of the archive with # default permissions. os.makedirs(upperdirs) if tarinfo.islnk() or tarinfo.issym(): self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) else: self._dbg(1, tarinfo.name) if tarinfo.isreg(): self.makefile(tarinfo, targetpath) elif tarinfo.isdir(): self.makedir(tarinfo, targetpath) elif tarinfo.isfifo(): self.makefifo(tarinfo, targetpath) elif tarinfo.ischr() or tarinfo.isblk(): self.makedev(tarinfo, targetpath) elif tarinfo.islnk() or tarinfo.issym(): self.makelink(tarinfo, targetpath) elif tarinfo.type not in SUPPORTED_TYPES: self.makeunknown(tarinfo, targetpath) else: self.makefile(tarinfo, targetpath) if set_attrs: self.chown(tarinfo, targetpath) if not tarinfo.issym(): self.chmod(tarinfo, targetpath) self.utime(tarinfo, targetpath) #-------------------------------------------------------------------------- # Below are the different file methods. They are called via # _extract_member() when extract() is called. They can be replaced in a # subclass to implement other functionality. def makedir(self, tarinfo, targetpath): """Make a directory called targetpath. """ try: # Use a safe mode for the directory, the real mode is set # later in _extract_member(). os.mkdir(targetpath, 0o700) except EnvironmentError as e: if e.errno != errno.EEXIST: raise def makefile(self, tarinfo, targetpath): """Make a file called targetpath. """ source = self.fileobj source.seek(tarinfo.offset_data) target = bltn_open(targetpath, "wb") if tarinfo.sparse is not None: for offset, size in tarinfo.sparse: target.seek(offset) copyfileobj(source, target, size) else: copyfileobj(source, target, tarinfo.size) target.seek(tarinfo.size) target.truncate() target.close() def makeunknown(self, tarinfo, targetpath): """Make a file from a TarInfo object with an unknown type at targetpath. """ self.makefile(tarinfo, targetpath) self._dbg(1, "tarfile: Unknown file type %r, " \ "extracted as regular file." % tarinfo.type) def makefifo(self, tarinfo, targetpath): """Make a fifo called targetpath. """ if hasattr(os, "mkfifo"): os.mkfifo(targetpath) else: raise ExtractError("fifo not supported by system") def makedev(self, tarinfo, targetpath): """Make a character or block device called targetpath. """ if not hasattr(os, "mknod") or not hasattr(os, "makedev"): raise ExtractError("special devices not supported by system") mode = tarinfo.mode if tarinfo.isblk(): mode |= stat.S_IFBLK else: mode |= stat.S_IFCHR os.mknod(targetpath, mode, os.makedev(tarinfo.devmajor, tarinfo.devminor)) def makelink(self, tarinfo, targetpath): """Make a (symbolic) link called targetpath. If it cannot be created (platform limitation), we try to make a copy of the referenced file instead of a link. """ try: # For systems that support symbolic and hard links. if tarinfo.issym(): os.symlink(tarinfo.linkname, targetpath) else: # See extract(). if os.path.exists(tarinfo._link_target): os.link(tarinfo._link_target, targetpath) else: self._extract_member(self._find_link_target(tarinfo), targetpath) except symlink_exception: if tarinfo.issym(): linkpath = os.path.join(os.path.dirname(tarinfo.name), tarinfo.linkname) else: linkpath = tarinfo.linkname else: try: self._extract_member(self._find_link_target(tarinfo), targetpath) except KeyError: raise ExtractError("unable to resolve link inside archive") def chown(self, tarinfo, targetpath): """Set owner of targetpath according to tarinfo. """ if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: # We have to be root to do so. try: g = grp.getgrnam(tarinfo.gname)[2] except KeyError: g = tarinfo.gid try: u = pwd.getpwnam(tarinfo.uname)[2] except KeyError: u = tarinfo.uid try: if tarinfo.issym() and hasattr(os, "lchown"): os.lchown(targetpath, u, g) else: if sys.platform != "os2emx": os.chown(targetpath, u, g) except EnvironmentError as e: raise ExtractError("could not change owner") def chmod(self, tarinfo, targetpath): """Set file permissions of targetpath according to tarinfo. """ if hasattr(os, 'chmod'): try: os.chmod(targetpath, tarinfo.mode) except EnvironmentError as e: raise ExtractError("could not change mode") def utime(self, tarinfo, targetpath): """Set modification time of targetpath according to tarinfo. """ if not hasattr(os, 'utime'): return try: os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) except EnvironmentError as e: raise ExtractError("could not change modification time") #-------------------------------------------------------------------------- def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m # Read the next block. self.fileobj.seek(self.offset) tarinfo = None while True: try: tarinfo = self.tarinfo.fromtarfile(self) except EOFHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue except InvalidHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue elif self.offset == 0: raise ReadError(str(e)) except EmptyHeaderError: if self.offset == 0: raise ReadError("empty file") except TruncatedHeaderError as e: if self.offset == 0: raise ReadError(str(e)) except SubsequentHeaderError as e: raise ReadError(str(e)) break if tarinfo is not None: self.members.append(tarinfo) else: self._loaded = True return tarinfo #-------------------------------------------------------------------------- # Little helper methods: def _getmember(self, name, tarinfo=None, normalize=False): """Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point. """ # Ensure that all members have been loaded. members = self.getmembers() # Limit the member search list up to tarinfo. if tarinfo is not None: members = members[:members.index(tarinfo)] if normalize: name = os.path.normpath(name) for member in reversed(members): if normalize: member_name = os.path.normpath(member.name) else: member_name = member.name if name == member_name: return member def _load(self): """Read through the entire archive file and look for readable members. """ while True: tarinfo = self.next() if tarinfo is None: break self._loaded = True def _check(self, mode=None): """Check if TarFile is still open, and if the operation's mode corresponds to TarFile's mode. """ if self.closed: raise IOError("%s is closed" % self.__class__.__name__) if mode is not None and self.mode not in mode: raise IOError("bad operation for mode %r" % self.mode) def _find_link_target(self, tarinfo): """Find the target member of a symlink or hardlink member in the archive. """ if tarinfo.issym(): # Always search the entire archive. linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname limit = None else: # Search the archive before the link, because a hard link is # just a reference to an already archived file. linkname = tarinfo.linkname limit = tarinfo member = self._getmember(linkname, tarinfo=limit, normalize=True) if member is None: raise KeyError("linkname %r not found" % linkname) return member def __iter__(self): """Provide an iterator object. """ if self._loaded: return iter(self.members) else: return TarIter(self) def _dbg(self, level, msg): """Write debugging output to sys.stderr. """ if level <= self.debug: print(msg, file=sys.stderr) def __enter__(self): self._check() return self def __exit__(self, type, value, traceback): if type is None: self.close() else: # An exception occurred. We must not call close() because # it would try to write end-of-archive blocks and padding. if not self._extfileobj: self.fileobj.close() self.closed = True # class TarFile class TarIter(object): """Iterator Class. for tarinfo in TarFile(...): suite... """ def __init__(self, tarfile): """Construct a TarIter object. """ self.tarfile = tarfile self.index = 0 def __iter__(self): """Return iterator object. """ return self def __next__(self): """Return the next item using TarFile's next() method. When all members have been read, set TarFile as _loaded. """ # Fix for SF #1100429: Under rare circumstances it can # happen that getmembers() is called during iteration, # which will cause TarIter to stop prematurely. if not self.tarfile._loaded: tarinfo = self.tarfile.next() if not tarinfo: self.tarfile._loaded = True raise StopIteration else: try: tarinfo = self.tarfile.members[self.index] except IndexError: raise StopIteration self.index += 1 return tarinfo next = __next__ # for Python 2.x #-------------------- # exported functions #-------------------- def is_tarfile(name): """Return True if name points to a tar archive that we are able to handle, else return False. """ try: t = open(name) t.close() return True except TarError: return False bltn_open = open open = TarFile.open
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """Access to Python's configuration information.""" import codecs import os import re import sys from os.path import pardir, realpath try: import configparser except ImportError: import ConfigParser as configparser __all__ = [ 'get_config_h_filename', 'get_config_var', 'get_config_vars', 'get_makefile_filename', 'get_path', 'get_path_names', 'get_paths', 'get_platform', 'get_python_version', 'get_scheme_names', 'parse_config_h', ] def _safe_realpath(path): try: return realpath(path) except OSError: return path if sys.executable: _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) else: # sys.executable can be empty if argv[0] has been changed and Python is # unable to retrieve the real program name _PROJECT_BASE = _safe_realpath(os.getcwd()) if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) # PC/VS7.1 if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) # PC/AMD64 if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) def is_python_build(): for fn in ("Setup.dist", "Setup.local"): if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): return True return False _PYTHON_BUILD = is_python_build() _cfg_read = False def _ensure_cfg_read(): global _cfg_read if not _cfg_read: from ..resources import finder backport_package = __name__.rsplit('.', 1)[0] _finder = finder(backport_package) _cfgfile = _finder.find('sysconfig.cfg') assert _cfgfile, 'sysconfig.cfg exists' with _cfgfile.as_stream() as s: _SCHEMES.readfp(s) if _PYTHON_BUILD: for scheme in ('posix_prefix', 'posix_home'): _SCHEMES.set(scheme, 'include', '{srcdir}/Include') _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') _cfg_read = True _SCHEMES = configparser.RawConfigParser() _VAR_REPL = re.compile(r'\{([^{]*?)\}') def _expand_globals(config): _ensure_cfg_read() if config.has_section('globals'): globals = config.items('globals') else: globals = tuple() sections = config.sections() for section in sections: if section == 'globals': continue for option, value in globals: if config.has_option(section, option): continue config.set(section, option, value) config.remove_section('globals') # now expanding local variables defined in the cfg file # for section in config.sections(): variables = dict(config.items(section)) def _replacer(matchobj): name = matchobj.group(1) if name in variables: return variables[name] return matchobj.group(0) for option, value in config.items(section): config.set(section, option, _VAR_REPL.sub(_replacer, value)) #_expand_globals(_SCHEMES) _PY_VERSION = '%s.%s.%s' % sys.version_info[:3] _PY_VERSION_SHORT = '%s.%s' % sys.version_info[:2] _PY_VERSION_SHORT_NO_DOT = '%s%s' % sys.version_info[:2] _PREFIX = os.path.normpath(sys.prefix) _EXEC_PREFIX = os.path.normpath(sys.exec_prefix) _CONFIG_VARS = None _USER_BASE = None def _subst_vars(path, local_vars): """In the string `path`, replace tokens like {some.thing} with the corresponding value from the map `local_vars`. If there is no corresponding value, leave the token unchanged. """ def _replacer(matchobj): name = matchobj.group(1) if name in local_vars: return local_vars[name] elif name in os.environ: return os.environ[name] return matchobj.group(0) return _VAR_REPL.sub(_replacer, path) def _extend_dict(target_dict, other_dict): target_keys = target_dict.keys() for key, value in other_dict.items(): if key in target_keys: continue target_dict[key] = value def _expand_vars(scheme, vars): res = {} if vars is None: vars = {} _extend_dict(vars, get_config_vars()) for key, value in _SCHEMES.items(scheme): if os.name in ('posix', 'nt'): value = os.path.expanduser(value) res[key] = os.path.normpath(_subst_vars(value, vars)) return res def format_value(value, vars): def _replacer(matchobj): name = matchobj.group(1) if name in vars: return vars[name] return matchobj.group(0) return _VAR_REPL.sub(_replacer, value) def _get_default_scheme(): if os.name == 'posix': # the default scheme for posix is posix_prefix return 'posix_prefix' return os.name def _getuserbase(): env_base = os.environ.get("PYTHONUSERBASE", None) def joinuser(*args): return os.path.expanduser(os.path.join(*args)) # what about 'os2emx', 'riscos' ? if os.name == "nt": base = os.environ.get("APPDATA") or "~" if env_base: return env_base else: return joinuser(base, "Python") if sys.platform == "darwin": framework = get_config_var("PYTHONFRAMEWORK") if framework: if env_base: return env_base else: return joinuser("~", "Library", framework, "%d.%d" % sys.version_info[:2]) if env_base: return env_base else: return joinuser("~", ".local") def _parse_makefile(filename, vars=None): """Parse a Makefile-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ # Regexes needed for parsing Makefile (and similar syntaxes, # like old-style Setup files). _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") if vars is None: vars = {} done = {} notdone = {} with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: lines = f.readlines() for line in lines: if line.startswith('#') or line.strip() == '': continue m = _variable_rx.match(line) if m: n, v = m.group(1, 2) v = v.strip() # `$$' is a literal `$' in make tmpv = v.replace('$$', '') if "$" in tmpv: notdone[n] = v else: try: v = int(v) except ValueError: # insert literal `$' done[n] = v.replace('$$', '$') else: done[n] = v # do variable interpolation here variables = list(notdone.keys()) # Variables with a 'PY_' prefix in the makefile. These need to # be made available without that prefix through sysconfig. # Special care is needed to ensure that variable expansion works, even # if the expansion uses the name without a prefix. renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') while len(variables) > 0: for name in tuple(variables): value = notdone[name] m = _findvar1_rx.search(value) or _findvar2_rx.search(value) if m is not None: n = m.group(1) found = True if n in done: item = str(done[n]) elif n in notdone: # get it on a subsequent round found = False elif n in os.environ: # do it like make: fall back to environment item = os.environ[n] elif n in renamed_variables: if (name.startswith('PY_') and name[3:] in renamed_variables): item = "" elif 'PY_' + n in notdone: found = False else: item = str(done['PY_' + n]) else: done[n] = item = "" if found: after = value[m.end():] value = value[:m.start()] + item + after if "$" in after: notdone[name] = value else: try: value = int(value) except ValueError: done[name] = value.strip() else: done[name] = value variables.remove(name) if (name.startswith('PY_') and name[3:] in renamed_variables): name = name[3:] if name not in done: done[name] = value else: # bogus variable reference (e.g. "prefix=$/opt/python"); # just drop it since we can't deal done[name] = value variables.remove(name) # strip spurious spaces for k, v in done.items(): if isinstance(v, str): done[k] = v.strip() # save the results in the global dictionary vars.update(done) return vars def get_makefile_filename(): """Return the path of the Makefile.""" if _PYTHON_BUILD: return os.path.join(_PROJECT_BASE, "Makefile") if hasattr(sys, 'abiflags'): config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) else: config_dir_name = 'config' return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') def _init_posix(vars): """Initialize the module as appropriate for POSIX systems.""" # load the installed Makefile: makefile = get_makefile_filename() try: _parse_makefile(makefile, vars) except IOError as e: msg = "invalid Python installation: unable to open %s" % makefile if hasattr(e, "strerror"): msg = msg + " (%s)" % e.strerror raise IOError(msg) # load the installed pyconfig.h: config_h = get_config_h_filename() try: with open(config_h) as f: parse_config_h(f, vars) except IOError as e: msg = "invalid Python installation: unable to open %s" % config_h if hasattr(e, "strerror"): msg = msg + " (%s)" % e.strerror raise IOError(msg) # On AIX, there are wrong paths to the linker scripts in the Makefile # -- these paths are relative to the Python source, but when installed # the scripts are in another directory. if _PYTHON_BUILD: vars['LDSHARED'] = vars['BLDSHARED'] def _init_non_posix(vars): """Initialize the module as appropriate for NT""" # set basic install directories vars['LIBDEST'] = get_path('stdlib') vars['BINLIBDEST'] = get_path('platstdlib') vars['INCLUDEPY'] = get_path('include') vars['SO'] = '.pyd' vars['EXE'] = '.exe' vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) # # public APIs # def parse_config_h(fp, vars=None): """Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ if vars is None: vars = {} define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") while True: line = fp.readline() if not line: break m = define_rx.match(line) if m: n, v = m.group(1, 2) try: v = int(v) except ValueError: pass vars[n] = v else: m = undef_rx.match(line) if m: vars[m.group(1)] = 0 return vars def get_config_h_filename(): """Return the path of pyconfig.h.""" if _PYTHON_BUILD: if os.name == "nt": inc_dir = os.path.join(_PROJECT_BASE, "PC") else: inc_dir = _PROJECT_BASE else: inc_dir = get_path('platinclude') return os.path.join(inc_dir, 'pyconfig.h') def get_scheme_names(): """Return a tuple containing the schemes names.""" return tuple(sorted(_SCHEMES.sections())) def get_path_names(): """Return a tuple containing the paths names.""" # xxx see if we want a static list return _SCHEMES.options('posix_prefix') def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): """Return a mapping containing an install scheme. ``scheme`` is the install scheme name. If not provided, it will return the default scheme for the current platform. """ _ensure_cfg_read() if expand: return _expand_vars(scheme, vars) else: return dict(_SCHEMES.items(scheme)) def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): """Return a path corresponding to the scheme. ``scheme`` is the install scheme name. """ return get_paths(scheme, vars, expand)[name] def get_config_vars(*args): """With no arguments, return a dictionary of all configuration variables relevant for the current platform. On Unix, this means every variable defined in Python's installed Makefile; On Windows and Mac OS it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. """ global _CONFIG_VARS if _CONFIG_VARS is None: _CONFIG_VARS = {} # Normalized versions of prefix and exec_prefix are handy to have; # in fact, these are the standard versions used most places in the # distutils2 module. _CONFIG_VARS['prefix'] = _PREFIX _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX _CONFIG_VARS['py_version'] = _PY_VERSION _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] _CONFIG_VARS['base'] = _PREFIX _CONFIG_VARS['platbase'] = _EXEC_PREFIX _CONFIG_VARS['projectbase'] = _PROJECT_BASE try: _CONFIG_VARS['abiflags'] = sys.abiflags except AttributeError: # sys.abiflags may not be defined on all platforms. _CONFIG_VARS['abiflags'] = '' if os.name in ('nt', 'os2'): _init_non_posix(_CONFIG_VARS) if os.name == 'posix': _init_posix(_CONFIG_VARS) # Setting 'userbase' is done below the call to the # init function to enable using 'get_config_var' in # the init-function. if sys.version >= '2.6': _CONFIG_VARS['userbase'] = _getuserbase() if 'srcdir' not in _CONFIG_VARS: _CONFIG_VARS['srcdir'] = _PROJECT_BASE else: _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) # Convert srcdir into an absolute path if it appears necessary. # Normally it is relative to the build directory. However, during # testing, for example, we might be running a non-installed python # from a different directory. if _PYTHON_BUILD and os.name == "posix": base = _PROJECT_BASE try: cwd = os.getcwd() except OSError: cwd = None if (not os.path.isabs(_CONFIG_VARS['srcdir']) and base != cwd): # srcdir is relative and we are not in the same directory # as the executable. Assume executable is in the build # directory and make srcdir absolute. srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) if sys.platform == 'darwin': kernel_version = os.uname()[2] # Kernel version (8.4.3) major_version = int(kernel_version.split('.')[0]) if major_version < 8: # On Mac OS X before 10.4, check if -arch and -isysroot # are in CFLAGS or LDFLAGS and remove them if they are. # This is needed when building extensions on a 10.3 system # using a universal build of python. for key in ('LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub(r'-arch\s+\w+\s', ' ', flags) flags = re.sub('-isysroot [^ \t]*', ' ', flags) _CONFIG_VARS[key] = flags else: # Allow the user to override the architecture flags using # an environment variable. # NOTE: This name was introduced by Apple in OSX 10.5 and # is used by several scripting languages distributed with # that OS release. if 'ARCHFLAGS' in os.environ: arch = os.environ['ARCHFLAGS'] for key in ('LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub(r'-arch\s+\w+\s', ' ', flags) flags = flags + ' ' + arch _CONFIG_VARS[key] = flags # If we're on OSX 10.5 or later and the user tries to # compiles an extension using an SDK that is not present # on the current machine it is better to not use an SDK # than to fail. # # The major usecase for this is users using a Python.org # binary installer on OSX 10.6: that installer uses # the 10.4u SDK, but that SDK is not installed by default # when you install Xcode. # CFLAGS = _CONFIG_VARS.get('CFLAGS', '') m = re.search(r'-isysroot\s+(\S+)', CFLAGS) if m is not None: sdk = m.group(1) if not os.path.exists(sdk): for key in ('LDFLAGS', 'BASECFLAGS', # a number of derived variables. These need to be # patched up as well. 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): flags = _CONFIG_VARS[key] flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags) _CONFIG_VARS[key] = flags if args: vals = [] for name in args: vals.append(_CONFIG_VARS.get(name)) return vals else: return _CONFIG_VARS def get_config_var(name): """Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name) """ return get_config_vars().get(name) def get_platform(): """Return a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; eg. for IRIX the architecture isn't particularly important (IRIX only runs on SGI hardware), but for Linux the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u irix-5.3 irix64-6.2 Windows will return one of: win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) win-ia64 (64bit Windows on Itanium) win32 (all others - specifically, sys.platform is returned) For other non-POSIX platforms, currently just returns 'sys.platform'. """ if os.name == 'nt': # sniff sys.version for architecture. prefix = " bit (" i = sys.version.find(prefix) if i == -1: return sys.platform j = sys.version.find(")", i) look = sys.version[i+len(prefix):j].lower() if look == 'amd64': return 'win-amd64' if look == 'itanium': return 'win-ia64' return sys.platform if os.name != "posix" or not hasattr(os, 'uname'): # XXX what about the architecture? NT is Intel or Alpha, # Mac OS is M68k or PPC, etc. return sys.platform # Try to distinguish various flavours of Unix osname, host, release, version, machine = os.uname() # Convert the OS name to lowercase, remove '/' characters # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") osname = osname.lower().replace('/', '') machine = machine.replace(' ', '_') machine = machine.replace('/', '-') if osname[:5] == "linux": # At least on Linux/Intel, 'machine' is the processor -- # i386, etc. # XXX what about Alpha, SPARC, etc? return "%s-%s" % (osname, machine) elif osname[:5] == "sunos": if release[0] >= "5": # SunOS 5 == Solaris 2 osname = "solaris" release = "%d.%s" % (int(release[0]) - 3, release[2:]) # fall through to standard osname-release-machine representation elif osname[:4] == "irix": # could be "irix64"! return "%s-%s" % (osname, release) elif osname[:3] == "aix": return "%s-%s.%s" % (osname, version, release) elif osname[:6] == "cygwin": osname = "cygwin" rel_re = re.compile(r'[\d.]+') m = rel_re.match(release) if m: release = m.group() elif osname[:6] == "darwin": # # For our purposes, we'll assume that the system version from # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set # to. This makes the compatibility story a bit more sane because the # machine is going to compile and link as if it were # MACOSX_DEPLOYMENT_TARGET. cfgvars = get_config_vars() macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') if True: # Always calculate the release of the running machine, # needed to determine if we can build fat binaries or not. macrelease = macver # Get the system version. Reading this plist is a documented # way to get the system version (see the documentation for # the Gestalt Manager) try: f = open('/System/Library/CoreServices/SystemVersion.plist') except IOError: # We're on a plain darwin box, fall back to the default # behaviour. pass else: try: m = re.search(r'<key>ProductUserVisibleVersion</key>\s*' r'<string>(.*?)</string>', f.read()) finally: f.close() if m is not None: macrelease = '.'.join(m.group(1).split('.')[:2]) # else: fall back to the default behaviour if not macver: macver = macrelease if macver: release = macver osname = "macosx" if ((macrelease + '.') >= '10.4.' and '-arch' in get_config_vars().get('CFLAGS', '').strip()): # The universal build will build fat binaries, but not on # systems before 10.4 # # Try to detect 4-way universal builds, those have machine-type # 'universal' instead of 'fat'. machine = 'fat' cflags = get_config_vars().get('CFLAGS') archs = re.findall(r'-arch\s+(\S+)', cflags) archs = tuple(sorted(set(archs))) if len(archs) == 1: machine = archs[0] elif archs == ('i386', 'ppc'): machine = 'fat' elif archs == ('i386', 'x86_64'): machine = 'intel' elif archs == ('i386', 'ppc', 'x86_64'): machine = 'fat3' elif archs == ('ppc64', 'x86_64'): machine = 'fat64' elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): machine = 'universal' else: raise ValueError( "Don't know machine value for archs=%r" % (archs,)) elif machine == 'i386': # On OSX the machine type returned by uname is always the # 32-bit variant, even if the executable architecture is # the 64-bit variant if sys.maxsize >= 2**32: machine = 'x86_64' elif machine in ('PowerPC', 'Power_Macintosh'): # Pick a sane name for the PPC architecture. # See 'i386' case if sys.maxsize >= 2**32: machine = 'ppc64' else: machine = 'ppc' return "%s-%s-%s" % (osname, release, machine) def get_python_version(): return _PY_VERSION_SHORT def _print_dict(title, data): for index, (key, value) in enumerate(sorted(data.items())): if index == 0: print('%s: ' % (title)) print('\t%s = "%s"' % (key, value)) def _main(): """Display all information sysconfig detains.""" print('Platform: "%s"' % get_platform()) print('Python version: "%s"' % get_python_version()) print('Current installation scheme: "%s"' % _get_default_scheme()) print() _print_dict('Paths', get_paths()) print() _print_dict('Variables', get_config_vars()) if __name__ == '__main__': _main()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/colorama/__init__.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. from .initialise import init, deinit, reinit, colorama_text from .ansi import Fore, Back, Style, Cursor from .ansitowin32 import AnsiToWin32 __version__ = '0.4.3'
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/colorama/win32.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. # from winbase.h STDOUT = -11 STDERR = -12 try: import ctypes from ctypes import LibraryLoader windll = LibraryLoader(ctypes.WinDLL) from ctypes import wintypes except (AttributeError, ImportError): windll = None SetConsoleTextAttribute = lambda *_: None winapi_test = lambda *_: None else: from ctypes import byref, Structure, c_char, POINTER COORD = wintypes._COORD class CONSOLE_SCREEN_BUFFER_INFO(Structure): """struct in wincon.h.""" _fields_ = [ ("dwSize", COORD), ("dwCursorPosition", COORD), ("wAttributes", wintypes.WORD), ("srWindow", wintypes.SMALL_RECT), ("dwMaximumWindowSize", COORD), ] def __str__(self): return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( self.dwSize.Y, self.dwSize.X , self.dwCursorPosition.Y, self.dwCursorPosition.X , self.wAttributes , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X ) _GetStdHandle = windll.kernel32.GetStdHandle _GetStdHandle.argtypes = [ wintypes.DWORD, ] _GetStdHandle.restype = wintypes.HANDLE _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo _GetConsoleScreenBufferInfo.argtypes = [ wintypes.HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO), ] _GetConsoleScreenBufferInfo.restype = wintypes.BOOL _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute _SetConsoleTextAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, ] _SetConsoleTextAttribute.restype = wintypes.BOOL _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition _SetConsoleCursorPosition.argtypes = [ wintypes.HANDLE, COORD, ] _SetConsoleCursorPosition.restype = wintypes.BOOL _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA _FillConsoleOutputCharacterA.argtypes = [ wintypes.HANDLE, c_char, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputCharacterA.restype = wintypes.BOOL _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute _FillConsoleOutputAttribute.argtypes = [ wintypes.HANDLE, wintypes.WORD, wintypes.DWORD, COORD, POINTER(wintypes.DWORD), ] _FillConsoleOutputAttribute.restype = wintypes.BOOL _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW _SetConsoleTitleW.argtypes = [ wintypes.LPCWSTR ] _SetConsoleTitleW.restype = wintypes.BOOL def _winapi_test(handle): csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return bool(success) def winapi_test(): return any(_winapi_test(h) for h in (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) def GetConsoleScreenBufferInfo(stream_id=STDOUT): handle = _GetStdHandle(stream_id) csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( handle, byref(csbi)) return csbi def SetConsoleTextAttribute(stream_id, attrs): handle = _GetStdHandle(stream_id) return _SetConsoleTextAttribute(handle, attrs) def SetConsoleCursorPosition(stream_id, position, adjust=True): position = COORD(*position) # If the position is out of range, do nothing. if position.Y <= 0 or position.X <= 0: return # Adjust for Windows' SetConsoleCursorPosition: # 1. being 0-based, while ANSI is 1-based. # 2. expecting (x,y), while ANSI uses (y,x). adjusted_position = COORD(position.Y - 1, position.X - 1) if adjust: # Adjust for viewport's scroll position sr = GetConsoleScreenBufferInfo(STDOUT).srWindow adjusted_position.Y += sr.Top adjusted_position.X += sr.Left # Resume normal processing handle = _GetStdHandle(stream_id) return _SetConsoleCursorPosition(handle, adjusted_position) def FillConsoleOutputCharacter(stream_id, char, length, start): handle = _GetStdHandle(stream_id) char = c_char(char.encode()) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. success = _FillConsoleOutputCharacterA( handle, char, length, start, byref(num_written)) return num_written.value def FillConsoleOutputAttribute(stream_id, attr, length, start): ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' handle = _GetStdHandle(stream_id) attribute = wintypes.WORD(attr) length = wintypes.DWORD(length) num_written = wintypes.DWORD(0) # Note that this is hard-coded for ANSI (vs wide) bytes. return _FillConsoleOutputAttribute( handle, attribute, length, start, byref(num_written)) def SetConsoleTitle(title): return _SetConsoleTitleW(title)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/colorama/ansitowin32.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import re import sys import os from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style from .winterm import WinTerm, WinColor, WinStyle from .win32 import windll, winapi_test winterm = None if windll is not None: winterm = WinTerm() class StreamWrapper(object): ''' Wraps a stream (such as stdout), acting as a transparent proxy for all attribute access apart from method 'write()', which is delegated to our Converter instance. ''' def __init__(self, wrapped, converter): # double-underscore everything to prevent clashes with names of # attributes on the wrapped stream object. self.__wrapped = wrapped self.__convertor = converter def __getattr__(self, name): return getattr(self.__wrapped, name) def __enter__(self, *args, **kwargs): # special method lookup bypasses __getattr__/__getattribute__, see # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit # thus, contextlib magic methods are not proxied via __getattr__ return self.__wrapped.__enter__(*args, **kwargs) def __exit__(self, *args, **kwargs): return self.__wrapped.__exit__(*args, **kwargs) def write(self, text): self.__convertor.write(text) def isatty(self): stream = self.__wrapped if 'PYCHARM_HOSTED' in os.environ: if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__): return True try: stream_isatty = stream.isatty except AttributeError: return False else: return stream_isatty() @property def closed(self): stream = self.__wrapped try: return stream.closed except AttributeError: return True class AnsiToWin32(object): ''' Implements a 'write()' method which, on Windows, will strip ANSI character sequences from the text, and if outputting to a tty, will convert them into win32 function calls. ''' ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer ANSI_OSC_RE = re.compile('\001?\033\\]((?:.|;)*?)(\x07)\002?') # Operating System Command def __init__(self, wrapped, convert=None, strip=None, autoreset=False): # The wrapped stream (normally sys.stdout or sys.stderr) self.wrapped = wrapped # should we reset colors to defaults after every .write() self.autoreset = autoreset # create the proxy wrapping our output stream self.stream = StreamWrapper(wrapped, self) on_windows = os.name == 'nt' # We test if the WinAPI works, because even if we are on Windows # we may be using a terminal that doesn't support the WinAPI # (e.g. Cygwin Terminal). In this case it's up to the terminal # to support the ANSI codes. conversion_supported = on_windows and winapi_test() # should we strip ANSI sequences from our output? if strip is None: strip = conversion_supported or (not self.stream.closed and not self.stream.isatty()) self.strip = strip # should we should convert ANSI sequences into win32 calls? if convert is None: convert = conversion_supported and not self.stream.closed and self.stream.isatty() self.convert = convert # dict of ansi codes to win32 functions and parameters self.win32_calls = self.get_win32_calls() # are we wrapping stderr? self.on_stderr = self.wrapped is sys.stderr def should_wrap(self): ''' True if this class is actually needed. If false, then the output stream will not be affected, nor will win32 calls be issued, so wrapping stdout is not actually required. This will generally be False on non-Windows platforms, unless optional functionality like autoreset has been requested using kwargs to init() ''' return self.convert or self.strip or self.autoreset def get_win32_calls(self): if self.convert and winterm: return { AnsiStyle.RESET_ALL: (winterm.reset_all, ), AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT), AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL), AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL), AnsiFore.BLACK: (winterm.fore, WinColor.BLACK), AnsiFore.RED: (winterm.fore, WinColor.RED), AnsiFore.GREEN: (winterm.fore, WinColor.GREEN), AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW), AnsiFore.BLUE: (winterm.fore, WinColor.BLUE), AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA), AnsiFore.CYAN: (winterm.fore, WinColor.CYAN), AnsiFore.WHITE: (winterm.fore, WinColor.GREY), AnsiFore.RESET: (winterm.fore, ), AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True), AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True), AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True), AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True), AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True), AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True), AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True), AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True), AnsiBack.BLACK: (winterm.back, WinColor.BLACK), AnsiBack.RED: (winterm.back, WinColor.RED), AnsiBack.GREEN: (winterm.back, WinColor.GREEN), AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW), AnsiBack.BLUE: (winterm.back, WinColor.BLUE), AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA), AnsiBack.CYAN: (winterm.back, WinColor.CYAN), AnsiBack.WHITE: (winterm.back, WinColor.GREY), AnsiBack.RESET: (winterm.back, ), AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True), AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True), AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True), AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True), AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True), AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True), AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True), AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True), } return dict() def write(self, text): if self.strip or self.convert: self.write_and_convert(text) else: self.wrapped.write(text) self.wrapped.flush() if self.autoreset: self.reset_all() def reset_all(self): if self.convert: self.call_win32('m', (0,)) elif not self.strip and not self.stream.closed: self.wrapped.write(Style.RESET_ALL) def write_and_convert(self, text): ''' Write the given text to our wrapped stream, stripping any ANSI sequences from the text, and optionally converting them into win32 calls. ''' cursor = 0 text = self.convert_osc(text) for match in self.ANSI_CSI_RE.finditer(text): start, end = match.span() self.write_plain_text(text, cursor, start) self.convert_ansi(*match.groups()) cursor = end self.write_plain_text(text, cursor, len(text)) def write_plain_text(self, text, start, end): if start < end: self.wrapped.write(text[start:end]) self.wrapped.flush() def convert_ansi(self, paramstring, command): if self.convert: params = self.extract_params(command, paramstring) self.call_win32(command, params) def extract_params(self, command, paramstring): if command in 'Hf': params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';')) while len(params) < 2: # defaults: params = params + (1,) else: params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0) if len(params) == 0: # defaults: if command in 'JKm': params = (0,) elif command in 'ABCD': params = (1,) return params def call_win32(self, command, params): if command == 'm': for param in params: if param in self.win32_calls: func_args = self.win32_calls[param] func = func_args[0] args = func_args[1:] kwargs = dict(on_stderr=self.on_stderr) func(*args, **kwargs) elif command in 'J': winterm.erase_screen(params[0], on_stderr=self.on_stderr) elif command in 'K': winterm.erase_line(params[0], on_stderr=self.on_stderr) elif command in 'Hf': # cursor position - absolute winterm.set_cursor_position(params, on_stderr=self.on_stderr) elif command in 'ABCD': # cursor position - relative n = params[0] # A - up, B - down, C - forward, D - back x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command] winterm.cursor_adjust(x, y, on_stderr=self.on_stderr) def convert_osc(self, text): for match in self.ANSI_OSC_RE.finditer(text): start, end = match.span() text = text[:start] + text[end:] paramstring, command = match.groups() if command in '\x07': # \x07 = BEL params = paramstring.split(";") # 0 - change title and icon (we will only change title) # 1 - change icon (we don't support this) # 2 - change title if params[0] in '02': winterm.set_title(params[1]) return text
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/colorama/ansi.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. ''' This module generates ANSI character codes to printing colors to terminals. See: http://en.wikipedia.org/wiki/ANSI_escape_code ''' CSI = '\033[' OSC = '\033]' BEL = '\007' def code_to_chars(code): return CSI + str(code) + 'm' def set_title(title): return OSC + '2;' + title + BEL def clear_screen(mode=2): return CSI + str(mode) + 'J' def clear_line(mode=2): return CSI + str(mode) + 'K' class AnsiCodes(object): def __init__(self): # the subclasses declare class attributes which are numbers. # Upon instantiation we define instance attributes, which are the same # as the class attributes but wrapped with the ANSI escape sequence for name in dir(self): if not name.startswith('_'): value = getattr(self, name) setattr(self, name, code_to_chars(value)) class AnsiCursor(object): def UP(self, n=1): return CSI + str(n) + 'A' def DOWN(self, n=1): return CSI + str(n) + 'B' def FORWARD(self, n=1): return CSI + str(n) + 'C' def BACK(self, n=1): return CSI + str(n) + 'D' def POS(self, x=1, y=1): return CSI + str(y) + ';' + str(x) + 'H' class AnsiFore(AnsiCodes): BLACK = 30 RED = 31 GREEN = 32 YELLOW = 33 BLUE = 34 MAGENTA = 35 CYAN = 36 WHITE = 37 RESET = 39 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 90 LIGHTRED_EX = 91 LIGHTGREEN_EX = 92 LIGHTYELLOW_EX = 93 LIGHTBLUE_EX = 94 LIGHTMAGENTA_EX = 95 LIGHTCYAN_EX = 96 LIGHTWHITE_EX = 97 class AnsiBack(AnsiCodes): BLACK = 40 RED = 41 GREEN = 42 YELLOW = 43 BLUE = 44 MAGENTA = 45 CYAN = 46 WHITE = 47 RESET = 49 # These are fairly well supported, but not part of the standard. LIGHTBLACK_EX = 100 LIGHTRED_EX = 101 LIGHTGREEN_EX = 102 LIGHTYELLOW_EX = 103 LIGHTBLUE_EX = 104 LIGHTMAGENTA_EX = 105 LIGHTCYAN_EX = 106 LIGHTWHITE_EX = 107 class AnsiStyle(AnsiCodes): BRIGHT = 1 DIM = 2 NORMAL = 22 RESET_ALL = 0 Fore = AnsiFore() Back = AnsiBack() Style = AnsiStyle() Cursor = AnsiCursor()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/colorama/winterm.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. from . import win32 # from wincon.h class WinColor(object): BLACK = 0 BLUE = 1 GREEN = 2 CYAN = 3 RED = 4 MAGENTA = 5 YELLOW = 6 GREY = 7 # from wincon.h class WinStyle(object): NORMAL = 0x00 # dim text, dim background BRIGHT = 0x08 # bright text, dim background BRIGHT_BACKGROUND = 0x80 # dim text, bright background class WinTerm(object): def __init__(self): self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes self.set_attrs(self._default) self._default_fore = self._fore self._default_back = self._back self._default_style = self._style # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style. # So that LIGHT_EX colors and BRIGHT style do not clobber each other, # we track them separately, since LIGHT_EX is overwritten by Fore/Back # and BRIGHT is overwritten by Style codes. self._light = 0 def get_attrs(self): return self._fore + self._back * 16 + (self._style | self._light) def set_attrs(self, value): self._fore = value & 7 self._back = (value >> 4) & 7 self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND) def reset_all(self, on_stderr=None): self.set_attrs(self._default) self.set_console(attrs=self._default) self._light = 0 def fore(self, fore=None, light=False, on_stderr=False): if fore is None: fore = self._default_fore self._fore = fore # Emulate LIGHT_EX with BRIGHT Style if light: self._light |= WinStyle.BRIGHT else: self._light &= ~WinStyle.BRIGHT self.set_console(on_stderr=on_stderr) def back(self, back=None, light=False, on_stderr=False): if back is None: back = self._default_back self._back = back # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style if light: self._light |= WinStyle.BRIGHT_BACKGROUND else: self._light &= ~WinStyle.BRIGHT_BACKGROUND self.set_console(on_stderr=on_stderr) def style(self, style=None, on_stderr=False): if style is None: style = self._default_style self._style = style self.set_console(on_stderr=on_stderr) def set_console(self, attrs=None, on_stderr=False): if attrs is None: attrs = self.get_attrs() handle = win32.STDOUT if on_stderr: handle = win32.STDERR win32.SetConsoleTextAttribute(handle, attrs) def get_position(self, handle): position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition # Because Windows coordinates are 0-based, # and win32.SetConsoleCursorPosition expects 1-based. position.X += 1 position.Y += 1 return position def set_cursor_position(self, position=None, on_stderr=False): if position is None: # I'm not currently tracking the position, so there is no default. # position = self.get_position() return handle = win32.STDOUT if on_stderr: handle = win32.STDERR win32.SetConsoleCursorPosition(handle, position) def cursor_adjust(self, x, y, on_stderr=False): handle = win32.STDOUT if on_stderr: handle = win32.STDERR position = self.get_position(handle) adjusted_position = (position.Y + y, position.X + x) win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False) def erase_screen(self, mode=0, on_stderr=False): # 0 should clear from the cursor to the end of the screen. # 1 should clear from the cursor to the beginning of the screen. # 2 should clear the entire screen, and move cursor to (1,1) handle = win32.STDOUT if on_stderr: handle = win32.STDERR csbi = win32.GetConsoleScreenBufferInfo(handle) # get the number of character cells in the current buffer cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y # get number of character cells before current cursor position cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X if mode == 0: from_coord = csbi.dwCursorPosition cells_to_erase = cells_in_screen - cells_before_cursor elif mode == 1: from_coord = win32.COORD(0, 0) cells_to_erase = cells_before_cursor elif mode == 2: from_coord = win32.COORD(0, 0) cells_to_erase = cells_in_screen else: # invalid mode return # fill the entire screen with blanks win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) # now set the buffer's attributes accordingly win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) if mode == 2: # put the cursor where needed win32.SetConsoleCursorPosition(handle, (1, 1)) def erase_line(self, mode=0, on_stderr=False): # 0 should clear from the cursor to the end of the line. # 1 should clear from the cursor to the beginning of the line. # 2 should clear the entire line. handle = win32.STDOUT if on_stderr: handle = win32.STDERR csbi = win32.GetConsoleScreenBufferInfo(handle) if mode == 0: from_coord = csbi.dwCursorPosition cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X elif mode == 1: from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) cells_to_erase = csbi.dwCursorPosition.X elif mode == 2: from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) cells_to_erase = csbi.dwSize.X else: # invalid mode return # fill the entire screen with blanks win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) # now set the buffer's attributes accordingly win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord) def set_title(self, title): win32.SetConsoleTitle(title)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/colorama/initialise.py
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. import atexit import contextlib import sys from .ansitowin32 import AnsiToWin32 orig_stdout = None orig_stderr = None wrapped_stdout = None wrapped_stderr = None atexit_done = False def reset_all(): if AnsiToWin32 is not None: # Issue #74: objects might become None at exit AnsiToWin32(orig_stdout).reset_all() def init(autoreset=False, convert=None, strip=None, wrap=True): if not wrap and any([autoreset, convert, strip]): raise ValueError('wrap=False conflicts with any other arg=True') global wrapped_stdout, wrapped_stderr global orig_stdout, orig_stderr orig_stdout = sys.stdout orig_stderr = sys.stderr if sys.stdout is None: wrapped_stdout = None else: sys.stdout = wrapped_stdout = \ wrap_stream(orig_stdout, convert, strip, autoreset, wrap) if sys.stderr is None: wrapped_stderr = None else: sys.stderr = wrapped_stderr = \ wrap_stream(orig_stderr, convert, strip, autoreset, wrap) global atexit_done if not atexit_done: atexit.register(reset_all) atexit_done = True def deinit(): if orig_stdout is not None: sys.stdout = orig_stdout if orig_stderr is not None: sys.stderr = orig_stderr @contextlib.contextmanager def colorama_text(*args, **kwargs): init(*args, **kwargs) try: yield finally: deinit() def reinit(): if wrapped_stdout is not None: sys.stdout = wrapped_stdout if wrapped_stderr is not None: sys.stderr = wrapped_stderr def wrap_stream(stream, convert, strip, autoreset, wrap): if wrap: wrapper = AnsiToWin32(stream, convert=convert, strip=strip, autoreset=autoreset) if wrapper.should_wrap(): stream = wrapper.stream return stream
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/serialize.py
import base64 import io import json import zlib from pip._vendor import msgpack from pip._vendor.requests.structures import CaseInsensitiveDict from .compat import HTTPResponse, pickle, text_type def _b64_decode_bytes(b): return base64.b64decode(b.encode("ascii")) def _b64_decode_str(s): return _b64_decode_bytes(s).decode("utf8") class Serializer(object): def dumps(self, request, response, body=None): response_headers = CaseInsensitiveDict(response.headers) if body is None: body = response.read(decode_content=False) # NOTE: 99% sure this is dead code. I'm only leaving it # here b/c I don't have a test yet to prove # it. Basically, before using # `cachecontrol.filewrapper.CallbackFileWrapper`, # this made an effort to reset the file handle. The # `CallbackFileWrapper` short circuits this code by # setting the body as the content is consumed, the # result being a `body` argument is *always* passed # into cache_response, and in turn, # `Serializer.dump`. response._fp = io.BytesIO(body) # NOTE: This is all a bit weird, but it's really important that on # Python 2.x these objects are unicode and not str, even when # they contain only ascii. The problem here is that msgpack # understands the difference between unicode and bytes and we # have it set to differentiate between them, however Python 2 # doesn't know the difference. Forcing these to unicode will be # enough to have msgpack know the difference. data = { u"response": { u"body": body, u"headers": dict( (text_type(k), text_type(v)) for k, v in response.headers.items() ), u"status": response.status, u"version": response.version, u"reason": text_type(response.reason), u"strict": response.strict, u"decode_content": response.decode_content, } } # Construct our vary headers data[u"vary"] = {} if u"vary" in response_headers: varied_headers = response_headers[u"vary"].split(",") for header in varied_headers: header = text_type(header).strip() header_value = request.headers.get(header, None) if header_value is not None: header_value = text_type(header_value) data[u"vary"][header] = header_value return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)]) def loads(self, request, data): # Short circuit if we've been given an empty set of data if not data: return # Determine what version of the serializer the data was serialized # with try: ver, data = data.split(b",", 1) except ValueError: ver = b"cc=0" # Make sure that our "ver" is actually a version and isn't a false # positive from a , being in the data stream. if ver[:3] != b"cc=": data = ver + data ver = b"cc=0" # Get the version number out of the cc=N ver = ver.split(b"=", 1)[-1].decode("ascii") # Dispatch to the actual load method for the given version try: return getattr(self, "_loads_v{}".format(ver))(request, data) except AttributeError: # This is a version we don't have a loads function for, so we'll # just treat it as a miss and return None return def prepare_response(self, request, cached): """Verify our vary headers match and construct a real urllib3 HTTPResponse object. """ # Special case the '*' Vary value as it means we cannot actually # determine if the cached response is suitable for this request. # This case is also handled in the controller code when creating # a cache entry, but is left here for backwards compatibility. if "*" in cached.get("vary", {}): return # Ensure that the Vary headers for the cached response match our # request for header, value in cached.get("vary", {}).items(): if request.headers.get(header, None) != value: return body_raw = cached["response"].pop("body") headers = CaseInsensitiveDict(data=cached["response"]["headers"]) if headers.get("transfer-encoding", "") == "chunked": headers.pop("transfer-encoding") cached["response"]["headers"] = headers try: body = io.BytesIO(body_raw) except TypeError: # This can happen if cachecontrol serialized to v1 format (pickle) # using Python 2. A Python 2 str(byte string) will be unpickled as # a Python 3 str (unicode string), which will cause the above to # fail with: # # TypeError: 'str' does not support the buffer interface body = io.BytesIO(body_raw.encode("utf8")) return HTTPResponse(body=body, preload_content=False, **cached["response"]) def _loads_v0(self, request, data): # The original legacy cache data. This doesn't contain enough # information to construct everything we need, so we'll treat this as # a miss. return def _loads_v1(self, request, data): try: cached = pickle.loads(data) except ValueError: return return self.prepare_response(request, cached) def _loads_v2(self, request, data): try: cached = json.loads(zlib.decompress(data).decode("utf8")) except (ValueError, zlib.error): return # We need to decode the items that we've base64 encoded cached["response"]["body"] = _b64_decode_bytes(cached["response"]["body"]) cached["response"]["headers"] = dict( (_b64_decode_str(k), _b64_decode_str(v)) for k, v in cached["response"]["headers"].items() ) cached["response"]["reason"] = _b64_decode_str(cached["response"]["reason"]) cached["vary"] = dict( (_b64_decode_str(k), _b64_decode_str(v) if v is not None else v) for k, v in cached["vary"].items() ) return self.prepare_response(request, cached) def _loads_v3(self, request, data): # Due to Python 2 encoding issues, it's impossible to know for sure # exactly how to load v3 entries, thus we'll treat these as a miss so # that they get rewritten out as v4 entries. return def _loads_v4(self, request, data): try: cached = msgpack.loads(data, raw=False) except ValueError: return return self.prepare_response(request, cached)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/wrapper.py
from .adapter import CacheControlAdapter from .cache import DictCache def CacheControl( sess, cache=None, cache_etags=True, serializer=None, heuristic=None, controller_class=None, adapter_class=None, cacheable_methods=None, ): cache = DictCache() if cache is None else cache adapter_class = adapter_class or CacheControlAdapter adapter = adapter_class( cache, cache_etags=cache_etags, serializer=serializer, heuristic=heuristic, controller_class=controller_class, cacheable_methods=cacheable_methods, ) sess.mount("http://", adapter) sess.mount("https://", adapter) return sess
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/controller.py
""" The httplib2 algorithms ported for use with requests. """ import logging import re import calendar import time from email.utils import parsedate_tz from pip._vendor.requests.structures import CaseInsensitiveDict from .cache import DictCache from .serialize import Serializer logger = logging.getLogger(__name__) URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") def parse_uri(uri): """Parses a URI using the regex given in Appendix B of RFC 3986. (scheme, authority, path, query, fragment) = parse_uri(uri) """ groups = URI.match(uri).groups() return (groups[1], groups[3], groups[4], groups[6], groups[8]) class CacheController(object): """An interface to see if request should cached or not. """ def __init__( self, cache=None, cache_etags=True, serializer=None, status_codes=None ): self.cache = DictCache() if cache is None else cache self.cache_etags = cache_etags self.serializer = serializer or Serializer() self.cacheable_status_codes = status_codes or (200, 203, 300, 301) @classmethod def _urlnorm(cls, uri): """Normalize the URL to create a safe key for the cache""" (scheme, authority, path, query, fragment) = parse_uri(uri) if not scheme or not authority: raise Exception("Only absolute URIs are allowed. uri = %s" % uri) scheme = scheme.lower() authority = authority.lower() if not path: path = "/" # Could do syntax based normalization of the URI before # computing the digest. See Section 6.2.2 of Std 66. request_uri = query and "?".join([path, query]) or path defrag_uri = scheme + "://" + authority + request_uri return defrag_uri @classmethod def cache_url(cls, uri): return cls._urlnorm(uri) def parse_cache_control(self, headers): known_directives = { # https://tools.ietf.org/html/rfc7234#section-5.2 "max-age": (int, True), "max-stale": (int, False), "min-fresh": (int, True), "no-cache": (None, False), "no-store": (None, False), "no-transform": (None, False), "only-if-cached": (None, False), "must-revalidate": (None, False), "public": (None, False), "private": (None, False), "proxy-revalidate": (None, False), "s-maxage": (int, True), } cc_headers = headers.get("cache-control", headers.get("Cache-Control", "")) retval = {} for cc_directive in cc_headers.split(","): if not cc_directive.strip(): continue parts = cc_directive.split("=", 1) directive = parts[0].strip() try: typ, required = known_directives[directive] except KeyError: logger.debug("Ignoring unknown cache-control directive: %s", directive) continue if not typ or not required: retval[directive] = None if typ: try: retval[directive] = typ(parts[1].strip()) except IndexError: if required: logger.debug( "Missing value for cache-control " "directive: %s", directive, ) except ValueError: logger.debug( "Invalid value for cache-control directive " "%s, must be %s", directive, typ.__name__, ) return retval def cached_request(self, request): """ Return a cached response if it exists in the cache, otherwise return False. """ cache_url = self.cache_url(request.url) logger.debug('Looking up "%s" in the cache', cache_url) cc = self.parse_cache_control(request.headers) # Bail out if the request insists on fresh data if "no-cache" in cc: logger.debug('Request header has "no-cache", cache bypassed') return False if "max-age" in cc and cc["max-age"] == 0: logger.debug('Request header has "max_age" as 0, cache bypassed') return False # Request allows serving from the cache, let's see if we find something cache_data = self.cache.get(cache_url) if cache_data is None: logger.debug("No cache entry available") return False # Check whether it can be deserialized resp = self.serializer.loads(request, cache_data) if not resp: logger.warning("Cache entry deserialization failed, entry ignored") return False # If we have a cached 301, return it immediately. We don't # need to test our response for other headers b/c it is # intrinsically "cacheable" as it is Permanent. # See: # https://tools.ietf.org/html/rfc7231#section-6.4.2 # # Client can try to refresh the value by repeating the request # with cache busting headers as usual (ie no-cache). if resp.status == 301: msg = ( 'Returning cached "301 Moved Permanently" response ' "(ignoring date and etag information)" ) logger.debug(msg) return resp headers = CaseInsensitiveDict(resp.headers) if not headers or "date" not in headers: if "etag" not in headers: # Without date or etag, the cached response can never be used # and should be deleted. logger.debug("Purging cached response: no date or etag") self.cache.delete(cache_url) logger.debug("Ignoring cached response: no date") return False now = time.time() date = calendar.timegm(parsedate_tz(headers["date"])) current_age = max(0, now - date) logger.debug("Current age based on date: %i", current_age) # TODO: There is an assumption that the result will be a # urllib3 response object. This may not be best since we # could probably avoid instantiating or constructing the # response until we know we need it. resp_cc = self.parse_cache_control(headers) # determine freshness freshness_lifetime = 0 # Check the max-age pragma in the cache control header if "max-age" in resp_cc: freshness_lifetime = resp_cc["max-age"] logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime) # If there isn't a max-age, check for an expires header elif "expires" in headers: expires = parsedate_tz(headers["expires"]) if expires is not None: expire_time = calendar.timegm(expires) - date freshness_lifetime = max(0, expire_time) logger.debug("Freshness lifetime from expires: %i", freshness_lifetime) # Determine if we are setting freshness limit in the # request. Note, this overrides what was in the response. if "max-age" in cc: freshness_lifetime = cc["max-age"] logger.debug( "Freshness lifetime from request max-age: %i", freshness_lifetime ) if "min-fresh" in cc: min_fresh = cc["min-fresh"] # adjust our current age by our min fresh current_age += min_fresh logger.debug("Adjusted current age from min-fresh: %i", current_age) # Return entry if it is fresh enough if freshness_lifetime > current_age: logger.debug('The response is "fresh", returning cached response') logger.debug("%i > %i", freshness_lifetime, current_age) return resp # we're not fresh. If we don't have an Etag, clear it out if "etag" not in headers: logger.debug('The cached response is "stale" with no etag, purging') self.cache.delete(cache_url) # return the original handler return False def conditional_headers(self, request): cache_url = self.cache_url(request.url) resp = self.serializer.loads(request, self.cache.get(cache_url)) new_headers = {} if resp: headers = CaseInsensitiveDict(resp.headers) if "etag" in headers: new_headers["If-None-Match"] = headers["ETag"] if "last-modified" in headers: new_headers["If-Modified-Since"] = headers["Last-Modified"] return new_headers def cache_response(self, request, response, body=None, status_codes=None): """ Algorithm for caching requests. This assumes a requests Response object. """ # From httplib2: Don't cache 206's since we aren't going to # handle byte range requests cacheable_status_codes = status_codes or self.cacheable_status_codes if response.status not in cacheable_status_codes: logger.debug( "Status code %s not in %s", response.status, cacheable_status_codes ) return response_headers = CaseInsensitiveDict(response.headers) # If we've been given a body, our response has a Content-Length, that # Content-Length is valid then we can check to see if the body we've # been given matches the expected size, and if it doesn't we'll just # skip trying to cache it. if ( body is not None and "content-length" in response_headers and response_headers["content-length"].isdigit() and int(response_headers["content-length"]) != len(body) ): return cc_req = self.parse_cache_control(request.headers) cc = self.parse_cache_control(response_headers) cache_url = self.cache_url(request.url) logger.debug('Updating cache with response from "%s"', cache_url) # Delete it from the cache if we happen to have it stored there no_store = False if "no-store" in cc: no_store = True logger.debug('Response header has "no-store"') if "no-store" in cc_req: no_store = True logger.debug('Request header has "no-store"') if no_store and self.cache.get(cache_url): logger.debug('Purging existing cache entry to honor "no-store"') self.cache.delete(cache_url) if no_store: return # https://tools.ietf.org/html/rfc7234#section-4.1: # A Vary header field-value of "*" always fails to match. # Storing such a response leads to a deserialization warning # during cache lookup and is not allowed to ever be served, # so storing it can be avoided. if "*" in response_headers.get("vary", ""): logger.debug('Response header has "Vary: *"') return # If we've been given an etag, then keep the response if self.cache_etags and "etag" in response_headers: logger.debug("Caching due to etag") self.cache.set( cache_url, self.serializer.dumps(request, response, body=body) ) # Add to the cache any 301s. We do this before looking that # the Date headers. elif response.status == 301: logger.debug("Caching permanant redirect") self.cache.set(cache_url, self.serializer.dumps(request, response)) # Add to the cache if the response headers demand it. If there # is no date header then we can't do anything about expiring # the cache. elif "date" in response_headers: # cache when there is a max-age > 0 if "max-age" in cc and cc["max-age"] > 0: logger.debug("Caching b/c date exists and max-age > 0") self.cache.set( cache_url, self.serializer.dumps(request, response, body=body) ) # If the request can expire, it means we should cache it # in the meantime. elif "expires" in response_headers: if response_headers["expires"]: logger.debug("Caching b/c of expires header") self.cache.set( cache_url, self.serializer.dumps(request, response, body=body) ) def update_cached_response(self, request, response): """On a 304 we will get a new set of headers that we want to update our cached value with, assuming we have one. This should only ever be called when we've sent an ETag and gotten a 304 as the response. """ cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = ["content-length"] cached_response.headers.update( dict( (k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers ) ) # we want a 200 b/c we have content via the cache cached_response.status = 200 # update our cache self.cache.set(cache_url, self.serializer.dumps(request, cached_response)) return cached_response
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/compat.py
try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin try: import cPickle as pickle except ImportError: import pickle # Handle the case where the requests module has been patched to not have # urllib3 bundled as part of its source. try: from pip._vendor.requests.packages.urllib3.response import HTTPResponse except ImportError: from pip._vendor.urllib3.response import HTTPResponse try: from pip._vendor.requests.packages.urllib3.util import is_fp_closed except ImportError: from pip._vendor.urllib3.util import is_fp_closed # Replicate some six behaviour try: text_type = unicode except NameError: text_type = str
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py
from io import BytesIO class CallbackFileWrapper(object): """ Small wrapper around a fp object which will tee everything read into a buffer, and when that file is closed it will execute a callback with the contents of that buffer. All attributes are proxied to the underlying file object. This class uses members with a double underscore (__) leading prefix so as not to accidentally shadow an attribute. """ def __init__(self, fp, callback): self.__buf = BytesIO() self.__fp = fp self.__callback = callback def __getattr__(self, name): # The vaguaries of garbage collection means that self.__fp is # not always set. By using __getattribute__ and the private # name[0] allows looking up the attribute value and raising an # AttributeError when it doesn't exist. This stop thigns from # infinitely recursing calls to getattr in the case where # self.__fp hasn't been set. # # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers fp = self.__getattribute__("_CallbackFileWrapper__fp") return getattr(fp, name) def __is_fp_closed(self): try: return self.__fp.fp is None except AttributeError: pass try: return self.__fp.closed except AttributeError: pass # We just don't cache it then. # TODO: Add some logging here... return False def _close(self): if self.__callback: self.__callback(self.__buf.getvalue()) # We assign this to None here, because otherwise we can get into # really tricky problems where the CPython interpreter dead locks # because the callback is holding a reference to something which # has a __del__ method. Setting this to None breaks the cycle # and allows the garbage collector to do it's thing normally. self.__callback = None def read(self, amt=None): data = self.__fp.read(amt) self.__buf.write(data) if self.__is_fp_closed(): self._close() return data def _safe_read(self, amt): data = self.__fp._safe_read(amt) if amt == 2 and data == b"\r\n": # urllib executes this read to toss the CRLF at the end # of the chunk. return data self.__buf.write(data) if self.__is_fp_closed(): self._close() return data
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/heuristics.py
import calendar import time from email.utils import formatdate, parsedate, parsedate_tz from datetime import datetime, timedelta TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT" def expire_after(delta, date=None): date = date or datetime.utcnow() return date + delta def datetime_to_header(dt): return formatdate(calendar.timegm(dt.timetuple())) class BaseHeuristic(object): def warning(self, response): """ Return a valid 1xx warning header value describing the cache adjustments. The response is provided too allow warnings like 113 http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need to explicitly say response is over 24 hours old. """ return '110 - "Response is Stale"' def update_headers(self, response): """Update the response headers with any new headers. NOTE: This SHOULD always include some Warning header to signify that the response was cached by the client, not by way of the provided headers. """ return {} def apply(self, response): updated_headers = self.update_headers(response) if updated_headers: response.headers.update(updated_headers) warning_header_value = self.warning(response) if warning_header_value is not None: response.headers.update({"Warning": warning_header_value}) return response class OneDayCache(BaseHeuristic): """ Cache the response by providing an expires 1 day in the future. """ def update_headers(self, response): headers = {} if "expires" not in response.headers: date = parsedate(response.headers["date"]) expires = expire_after(timedelta(days=1), date=datetime(*date[:6])) headers["expires"] = datetime_to_header(expires) headers["cache-control"] = "public" return headers class ExpiresAfter(BaseHeuristic): """ Cache **all** requests for a defined time period. """ def __init__(self, **kw): self.delta = timedelta(**kw) def update_headers(self, response): expires = expire_after(self.delta) return {"expires": datetime_to_header(expires), "cache-control": "public"} def warning(self, response): tmpl = "110 - Automatically cached for %s. Response might be stale" return tmpl % self.delta class LastModified(BaseHeuristic): """ If there is no Expires header already, fall back on Last-Modified using the heuristic from http://tools.ietf.org/html/rfc7234#section-4.2.2 to calculate a reasonable value. Firefox also does something like this per https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397 Unlike mozilla we limit this to 24-hr. """ cacheable_by_default_statuses = { 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501 } def update_headers(self, resp): headers = resp.headers if "expires" in headers: return {} if "cache-control" in headers and headers["cache-control"] != "public": return {} if resp.status not in self.cacheable_by_default_statuses: return {} if "date" not in headers or "last-modified" not in headers: return {} date = calendar.timegm(parsedate_tz(headers["date"])) last_modified = parsedate(headers["last-modified"]) if date is None or last_modified is None: return {} now = time.time() current_age = max(0, now - date) delta = date - calendar.timegm(last_modified) freshness_lifetime = max(0, min(delta / 10, 24 * 3600)) if freshness_lifetime <= current_age: return {} expires = date + freshness_lifetime return {"expires": time.strftime(TIME_FMT, time.gmtime(expires))} def warning(self, resp): return None
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/adapter.py
import types import functools import zlib from pip._vendor.requests.adapters import HTTPAdapter from .controller import CacheController from .cache import DictCache from .filewrapper import CallbackFileWrapper class CacheControlAdapter(HTTPAdapter): invalidating_methods = {"PUT", "DELETE"} def __init__( self, cache=None, cache_etags=True, controller_class=None, serializer=None, heuristic=None, cacheable_methods=None, *args, **kw ): super(CacheControlAdapter, self).__init__(*args, **kw) self.cache = DictCache() if cache is None else cache self.heuristic = heuristic self.cacheable_methods = cacheable_methods or ("GET",) controller_factory = controller_class or CacheController self.controller = controller_factory( self.cache, cache_etags=cache_etags, serializer=serializer ) def send(self, request, cacheable_methods=None, **kw): """ Send a request. Use the request information to see if it exists in the cache and cache the response if we need to and can. """ cacheable = cacheable_methods or self.cacheable_methods if request.method in cacheable: try: cached_response = self.controller.cached_request(request) except zlib.error: cached_response = None if cached_response: return self.build_response(request, cached_response, from_cache=True) # check for etags and add headers if appropriate request.headers.update(self.controller.conditional_headers(request)) resp = super(CacheControlAdapter, self).send(request, **kw) return resp def build_response( self, request, response, from_cache=False, cacheable_methods=None ): """ Build a response by making a request or using the cache. This will end up calling send and returning a potentially cached response """ cacheable = cacheable_methods or self.cacheable_methods if not from_cache and request.method in cacheable: # Check for any heuristics that might update headers # before trying to cache. if self.heuristic: response = self.heuristic.apply(response) # apply any expiration heuristics if response.status == 304: # We must have sent an ETag request. This could mean # that we've been expired already or that we simply # have an etag. In either case, we want to try and # update the cache if that is the case. cached_response = self.controller.update_cached_response( request, response ) if cached_response is not response: from_cache = True # We are done with the server response, read a # possible response body (compliant servers will # not return one, but we cannot be 100% sure) and # release the connection back to the pool. response.read(decode_content=False) response.release_conn() response = cached_response # We always cache the 301 responses elif response.status == 301: self.controller.cache_response(request, response) else: # Wrap the response file with a wrapper that will cache the # response when the stream has been consumed. response._fp = CallbackFileWrapper( response._fp, functools.partial( self.controller.cache_response, request, response ), ) if response.chunked: super_update_chunk_length = response._update_chunk_length def _update_chunk_length(self): super_update_chunk_length() if self.chunk_left == 0: self._fp._close() response._update_chunk_length = types.MethodType( _update_chunk_length, response ) resp = super(CacheControlAdapter, self).build_response(request, response) # See if we should invalidate the cache. if request.method in self.invalidating_methods and resp.ok: cache_url = self.controller.cache_url(request.url) self.cache.delete(cache_url) # Give the request a from_cache attr to let people use it resp.from_cache = from_cache return resp def close(self): self.cache.close() super(CacheControlAdapter, self).close()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/cache.py
""" The cache object API for implementing caches. The default is a thread safe in-memory dictionary. """ from threading import Lock class BaseCache(object): def get(self, key): raise NotImplementedError() def set(self, key, value): raise NotImplementedError() def delete(self, key): raise NotImplementedError() def close(self): pass class DictCache(BaseCache): def __init__(self, init_dict=None): self.lock = Lock() self.data = init_dict or {} def get(self, key): return self.data.get(key, None) def set(self, key, value): with self.lock: self.data.update({key: value}) def delete(self, key): with self.lock: if key in self.data: self.data.pop(key)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/__init__.py
"""CacheControl import Interface. Make it easy to import from cachecontrol without long namespaces. """ __author__ = "Eric Larson" __email__ = "eric@ionrock.org" __version__ = "0.12.6" from .wrapper import CacheControl from .adapter import CacheControlAdapter from .controller import CacheController
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py
import logging from pip._vendor import requests from pip._vendor.cachecontrol.adapter import CacheControlAdapter from pip._vendor.cachecontrol.cache import DictCache from pip._vendor.cachecontrol.controller import logger from argparse import ArgumentParser def setup_logging(): logger.setLevel(logging.DEBUG) handler = logging.StreamHandler() logger.addHandler(handler) def get_session(): adapter = CacheControlAdapter( DictCache(), cache_etags=True, serializer=None, heuristic=None ) sess = requests.Session() sess.mount("http://", adapter) sess.mount("https://", adapter) sess.cache_controller = adapter.controller return sess def get_args(): parser = ArgumentParser() parser.add_argument("url", help="The URL to try and cache") return parser.parse_args() def main(args=None): args = get_args() sess = get_session() # Make a request to get a response resp = sess.get(args.url) # Turn on logging setup_logging() # try setting the cache sess.cache_controller.cache_response(resp.request, resp.raw) # Now try to get it if sess.cache_controller.cached_request(resp.request): print("Cached!") else: print("Not cached :(") if __name__ == "__main__": main()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
import hashlib import os from textwrap import dedent from ..cache import BaseCache from ..controller import CacheController try: FileNotFoundError except NameError: # py2.X FileNotFoundError = (IOError, OSError) def _secure_open_write(filename, fmode): # We only want to write to this file, so open it in write only mode flags = os.O_WRONLY # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only # will open *new* files. # We specify this because we want to ensure that the mode we pass is the # mode of the file. flags |= os.O_CREAT | os.O_EXCL # Do not follow symlinks to prevent someone from making a symlink that # we follow and insecurely open a cache file. if hasattr(os, "O_NOFOLLOW"): flags |= os.O_NOFOLLOW # On Windows we'll mark this file as binary if hasattr(os, "O_BINARY"): flags |= os.O_BINARY # Before we open our file, we want to delete any existing file that is # there try: os.remove(filename) except (IOError, OSError): # The file must not exist already, so we can just skip ahead to opening pass # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a # race condition happens between the os.remove and this line, that an # error will be raised. Because we utilize a lockfile this should only # happen if someone is attempting to attack us. fd = os.open(filename, flags, fmode) try: return os.fdopen(fd, "wb") except: # An error occurred wrapping our FD in a file object os.close(fd) raise class FileCache(BaseCache): def __init__( self, directory, forever=False, filemode=0o0600, dirmode=0o0700, use_dir_lock=None, lock_class=None, ): if use_dir_lock is not None and lock_class is not None: raise ValueError("Cannot use use_dir_lock and lock_class together") try: from lockfile import LockFile from lockfile.mkdirlockfile import MkdirLockFile except ImportError: notice = dedent( """ NOTE: In order to use the FileCache you must have lockfile installed. You can install it via pip: pip install lockfile """ ) raise ImportError(notice) else: if use_dir_lock: lock_class = MkdirLockFile elif lock_class is None: lock_class = LockFile self.directory = directory self.forever = forever self.filemode = filemode self.dirmode = dirmode self.lock_class = lock_class @staticmethod def encode(x): return hashlib.sha224(x.encode()).hexdigest() def _fn(self, name): # NOTE: This method should not change as some may depend on it. # See: https://github.com/ionrock/cachecontrol/issues/63 hashed = self.encode(name) parts = list(hashed[:5]) + [hashed] return os.path.join(self.directory, *parts) def get(self, key): name = self._fn(key) try: with open(name, "rb") as fh: return fh.read() except FileNotFoundError: return None def set(self, key, value): name = self._fn(key) # Make sure the directory exists try: os.makedirs(os.path.dirname(name), self.dirmode) except (IOError, OSError): pass with self.lock_class(name) as lock: # Write our actual file with _secure_open_write(lock.path, self.filemode) as fh: fh.write(value) def delete(self, key): name = self._fn(key) if not self.forever: try: os.remove(name) except FileNotFoundError: pass def url_to_file_path(url, filecache): """Return the file cache path based on the URL. This does not ensure the file exists! """ key = CacheController.cache_url(url) return filecache._fn(key)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
from .file_cache import FileCache # noqa from .redis_cache import RedisCache # noqa
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
from __future__ import division from datetime import datetime from pip._vendor.cachecontrol.cache import BaseCache class RedisCache(BaseCache): def __init__(self, conn): self.conn = conn def get(self, key): return self.conn.get(key) def set(self, key, value, expires=None): if not expires: self.conn.set(key, value) else: expires = expires - datetime.utcnow() self.conn.setex(key, int(expires.total_seconds()), value) def delete(self, key): self.conn.delete(key) def clear(self): """Helper for clearing all the keys in a database. Use with caution!""" for key in self.conn.keys(): self.conn.delete(key) def close(self): """Redis uses connection pooling, no need to close the connection.""" pass
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/idna/intranges.py
""" Given a list of integers, made up of (hopefully) a small number of long runs of consecutive integers, compute a representation of the form ((start1, end1), (start2, end2) ...). Then answer the question "was x present in the original list?" in time O(log(# runs)). """ import bisect def intranges_from_list(list_): """Represent a list of integers as a sequence of ranges: ((start_0, end_0), (start_1, end_1), ...), such that the original integers are exactly those x such that start_i <= x < end_i for some i. Ranges are encoded as single integers (start << 32 | end), not as tuples. """ sorted_list = sorted(list_) ranges = [] last_write = -1 for i in range(len(sorted_list)): if i+1 < len(sorted_list): if sorted_list[i] == sorted_list[i+1]-1: continue current_range = sorted_list[last_write+1:i+1] ranges.append(_encode_range(current_range[0], current_range[-1] + 1)) last_write = i return tuple(ranges) def _encode_range(start, end): return (start << 32) | end def _decode_range(r): return (r >> 32), (r & ((1 << 32) - 1)) def intranges_contain(int_, ranges): """Determine if `int_` falls into one of the ranges in `ranges`.""" tuple_ = _encode_range(int_, 0) pos = bisect.bisect_left(ranges, tuple_) # we could be immediately ahead of a tuple (start, end) # with start < int_ <= end if pos > 0: left, right = _decode_range(ranges[pos-1]) if left <= int_ < right: return True # or we could be immediately behind a tuple (int_, end) if pos < len(ranges): left, _ = _decode_range(ranges[pos]) if left == int_: return True return False
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/idna/package_data.py
__version__ = '2.10'
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/idna/compat.py
from .core import * from .codec import * def ToASCII(label): return encode(label) def ToUnicode(label): return decode(label) def nameprep(s): raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/idna/idnadata.py
# This file is automatically generated by tools/idna-data __version__ = "13.0.0" scripts = { 'Greek': ( 0x37000000374, 0x37500000378, 0x37a0000037e, 0x37f00000380, 0x38400000385, 0x38600000387, 0x3880000038b, 0x38c0000038d, 0x38e000003a2, 0x3a3000003e2, 0x3f000000400, 0x1d2600001d2b, 0x1d5d00001d62, 0x1d6600001d6b, 0x1dbf00001dc0, 0x1f0000001f16, 0x1f1800001f1e, 0x1f2000001f46, 0x1f4800001f4e, 0x1f5000001f58, 0x1f5900001f5a, 0x1f5b00001f5c, 0x1f5d00001f5e, 0x1f5f00001f7e, 0x1f8000001fb5, 0x1fb600001fc5, 0x1fc600001fd4, 0x1fd600001fdc, 0x1fdd00001ff0, 0x1ff200001ff5, 0x1ff600001fff, 0x212600002127, 0xab650000ab66, 0x101400001018f, 0x101a0000101a1, 0x1d2000001d246, ), 'Han': ( 0x2e8000002e9a, 0x2e9b00002ef4, 0x2f0000002fd6, 0x300500003006, 0x300700003008, 0x30210000302a, 0x30380000303c, 0x340000004dc0, 0x4e0000009ffd, 0xf9000000fa6e, 0xfa700000fada, 0x16ff000016ff2, 0x200000002a6de, 0x2a7000002b735, 0x2b7400002b81e, 0x2b8200002cea2, 0x2ceb00002ebe1, 0x2f8000002fa1e, 0x300000003134b, ), 'Hebrew': ( 0x591000005c8, 0x5d0000005eb, 0x5ef000005f5, 0xfb1d0000fb37, 0xfb380000fb3d, 0xfb3e0000fb3f, 0xfb400000fb42, 0xfb430000fb45, 0xfb460000fb50, ), 'Hiragana': ( 0x304100003097, 0x309d000030a0, 0x1b0010001b11f, 0x1b1500001b153, 0x1f2000001f201, ), 'Katakana': ( 0x30a1000030fb, 0x30fd00003100, 0x31f000003200, 0x32d0000032ff, 0x330000003358, 0xff660000ff70, 0xff710000ff9e, 0x1b0000001b001, 0x1b1640001b168, ), } joining_types = { 0x600: 85, 0x601: 85, 0x602: 85, 0x603: 85, 0x604: 85, 0x605: 85, 0x608: 85, 0x60b: 85, 0x620: 68, 0x621: 85, 0x622: 82, 0x623: 82, 0x624: 82, 0x625: 82, 0x626: 68, 0x627: 82, 0x628: 68, 0x629: 82, 0x62a: 68, 0x62b: 68, 0x62c: 68, 0x62d: 68, 0x62e: 68, 0x62f: 82, 0x630: 82, 0x631: 82, 0x632: 82, 0x633: 68, 0x634: 68, 0x635: 68, 0x636: 68, 0x637: 68, 0x638: 68, 0x639: 68, 0x63a: 68, 0x63b: 68, 0x63c: 68, 0x63d: 68, 0x63e: 68, 0x63f: 68, 0x640: 67, 0x641: 68, 0x642: 68, 0x643: 68, 0x644: 68, 0x645: 68, 0x646: 68, 0x647: 68, 0x648: 82, 0x649: 68, 0x64a: 68, 0x66e: 68, 0x66f: 68, 0x671: 82, 0x672: 82, 0x673: 82, 0x674: 85, 0x675: 82, 0x676: 82, 0x677: 82, 0x678: 68, 0x679: 68, 0x67a: 68, 0x67b: 68, 0x67c: 68, 0x67d: 68, 0x67e: 68, 0x67f: 68, 0x680: 68, 0x681: 68, 0x682: 68, 0x683: 68, 0x684: 68, 0x685: 68, 0x686: 68, 0x687: 68, 0x688: 82, 0x689: 82, 0x68a: 82, 0x68b: 82, 0x68c: 82, 0x68d: 82, 0x68e: 82, 0x68f: 82, 0x690: 82, 0x691: 82, 0x692: 82, 0x693: 82, 0x694: 82, 0x695: 82, 0x696: 82, 0x697: 82, 0x698: 82, 0x699: 82, 0x69a: 68, 0x69b: 68, 0x69c: 68, 0x69d: 68, 0x69e: 68, 0x69f: 68, 0x6a0: 68, 0x6a1: 68, 0x6a2: 68, 0x6a3: 68, 0x6a4: 68, 0x6a5: 68, 0x6a6: 68, 0x6a7: 68, 0x6a8: 68, 0x6a9: 68, 0x6aa: 68, 0x6ab: 68, 0x6ac: 68, 0x6ad: 68, 0x6ae: 68, 0x6af: 68, 0x6b0: 68, 0x6b1: 68, 0x6b2: 68, 0x6b3: 68, 0x6b4: 68, 0x6b5: 68, 0x6b6: 68, 0x6b7: 68, 0x6b8: 68, 0x6b9: 68, 0x6ba: 68, 0x6bb: 68, 0x6bc: 68, 0x6bd: 68, 0x6be: 68, 0x6bf: 68, 0x6c0: 82, 0x6c1: 68, 0x6c2: 68, 0x6c3: 82, 0x6c4: 82, 0x6c5: 82, 0x6c6: 82, 0x6c7: 82, 0x6c8: 82, 0x6c9: 82, 0x6ca: 82, 0x6cb: 82, 0x6cc: 68, 0x6cd: 82, 0x6ce: 68, 0x6cf: 82, 0x6d0: 68, 0x6d1: 68, 0x6d2: 82, 0x6d3: 82, 0x6d5: 82, 0x6dd: 85, 0x6ee: 82, 0x6ef: 82, 0x6fa: 68, 0x6fb: 68, 0x6fc: 68, 0x6ff: 68, 0x70f: 84, 0x710: 82, 0x712: 68, 0x713: 68, 0x714: 68, 0x715: 82, 0x716: 82, 0x717: 82, 0x718: 82, 0x719: 82, 0x71a: 68, 0x71b: 68, 0x71c: 68, 0x71d: 68, 0x71e: 82, 0x71f: 68, 0x720: 68, 0x721: 68, 0x722: 68, 0x723: 68, 0x724: 68, 0x725: 68, 0x726: 68, 0x727: 68, 0x728: 82, 0x729: 68, 0x72a: 82, 0x72b: 68, 0x72c: 82, 0x72d: 68, 0x72e: 68, 0x72f: 82, 0x74d: 82, 0x74e: 68, 0x74f: 68, 0x750: 68, 0x751: 68, 0x752: 68, 0x753: 68, 0x754: 68, 0x755: 68, 0x756: 68, 0x757: 68, 0x758: 68, 0x759: 82, 0x75a: 82, 0x75b: 82, 0x75c: 68, 0x75d: 68, 0x75e: 68, 0x75f: 68, 0x760: 68, 0x761: 68, 0x762: 68, 0x763: 68, 0x764: 68, 0x765: 68, 0x766: 68, 0x767: 68, 0x768: 68, 0x769: 68, 0x76a: 68, 0x76b: 82, 0x76c: 82, 0x76d: 68, 0x76e: 68, 0x76f: 68, 0x770: 68, 0x771: 82, 0x772: 68, 0x773: 82, 0x774: 82, 0x775: 68, 0x776: 68, 0x777: 68, 0x778: 82, 0x779: 82, 0x77a: 68, 0x77b: 68, 0x77c: 68, 0x77d: 68, 0x77e: 68, 0x77f: 68, 0x7ca: 68, 0x7cb: 68, 0x7cc: 68, 0x7cd: 68, 0x7ce: 68, 0x7cf: 68, 0x7d0: 68, 0x7d1: 68, 0x7d2: 68, 0x7d3: 68, 0x7d4: 68, 0x7d5: 68, 0x7d6: 68, 0x7d7: 68, 0x7d8: 68, 0x7d9: 68, 0x7da: 68, 0x7db: 68, 0x7dc: 68, 0x7dd: 68, 0x7de: 68, 0x7df: 68, 0x7e0: 68, 0x7e1: 68, 0x7e2: 68, 0x7e3: 68, 0x7e4: 68, 0x7e5: 68, 0x7e6: 68, 0x7e7: 68, 0x7e8: 68, 0x7e9: 68, 0x7ea: 68, 0x7fa: 67, 0x840: 82, 0x841: 68, 0x842: 68, 0x843: 68, 0x844: 68, 0x845: 68, 0x846: 82, 0x847: 82, 0x848: 68, 0x849: 82, 0x84a: 68, 0x84b: 68, 0x84c: 68, 0x84d: 68, 0x84e: 68, 0x84f: 68, 0x850: 68, 0x851: 68, 0x852: 68, 0x853: 68, 0x854: 82, 0x855: 68, 0x856: 82, 0x857: 82, 0x858: 82, 0x860: 68, 0x861: 85, 0x862: 68, 0x863: 68, 0x864: 68, 0x865: 68, 0x866: 85, 0x867: 82, 0x868: 68, 0x869: 82, 0x86a: 82, 0x8a0: 68, 0x8a1: 68, 0x8a2: 68, 0x8a3: 68, 0x8a4: 68, 0x8a5: 68, 0x8a6: 68, 0x8a7: 68, 0x8a8: 68, 0x8a9: 68, 0x8aa: 82, 0x8ab: 82, 0x8ac: 82, 0x8ad: 85, 0x8ae: 82, 0x8af: 68, 0x8b0: 68, 0x8b1: 82, 0x8b2: 82, 0x8b3: 68, 0x8b4: 68, 0x8b6: 68, 0x8b7: 68, 0x8b8: 68, 0x8b9: 82, 0x8ba: 68, 0x8bb: 68, 0x8bc: 68, 0x8bd: 68, 0x8be: 68, 0x8bf: 68, 0x8c0: 68, 0x8c1: 68, 0x8c2: 68, 0x8c3: 68, 0x8c4: 68, 0x8c5: 68, 0x8c6: 68, 0x8c7: 68, 0x8e2: 85, 0x1806: 85, 0x1807: 68, 0x180a: 67, 0x180e: 85, 0x1820: 68, 0x1821: 68, 0x1822: 68, 0x1823: 68, 0x1824: 68, 0x1825: 68, 0x1826: 68, 0x1827: 68, 0x1828: 68, 0x1829: 68, 0x182a: 68, 0x182b: 68, 0x182c: 68, 0x182d: 68, 0x182e: 68, 0x182f: 68, 0x1830: 68, 0x1831: 68, 0x1832: 68, 0x1833: 68, 0x1834: 68, 0x1835: 68, 0x1836: 68, 0x1837: 68, 0x1838: 68, 0x1839: 68, 0x183a: 68, 0x183b: 68, 0x183c: 68, 0x183d: 68, 0x183e: 68, 0x183f: 68, 0x1840: 68, 0x1841: 68, 0x1842: 68, 0x1843: 68, 0x1844: 68, 0x1845: 68, 0x1846: 68, 0x1847: 68, 0x1848: 68, 0x1849: 68, 0x184a: 68, 0x184b: 68, 0x184c: 68, 0x184d: 68, 0x184e: 68, 0x184f: 68, 0x1850: 68, 0x1851: 68, 0x1852: 68, 0x1853: 68, 0x1854: 68, 0x1855: 68, 0x1856: 68, 0x1857: 68, 0x1858: 68, 0x1859: 68, 0x185a: 68, 0x185b: 68, 0x185c: 68, 0x185d: 68, 0x185e: 68, 0x185f: 68, 0x1860: 68, 0x1861: 68, 0x1862: 68, 0x1863: 68, 0x1864: 68, 0x1865: 68, 0x1866: 68, 0x1867: 68, 0x1868: 68, 0x1869: 68, 0x186a: 68, 0x186b: 68, 0x186c: 68, 0x186d: 68, 0x186e: 68, 0x186f: 68, 0x1870: 68, 0x1871: 68, 0x1872: 68, 0x1873: 68, 0x1874: 68, 0x1875: 68, 0x1876: 68, 0x1877: 68, 0x1878: 68, 0x1880: 85, 0x1881: 85, 0x1882: 85, 0x1883: 85, 0x1884: 85, 0x1885: 84, 0x1886: 84, 0x1887: 68, 0x1888: 68, 0x1889: 68, 0x188a: 68, 0x188b: 68, 0x188c: 68, 0x188d: 68, 0x188e: 68, 0x188f: 68, 0x1890: 68, 0x1891: 68, 0x1892: 68, 0x1893: 68, 0x1894: 68, 0x1895: 68, 0x1896: 68, 0x1897: 68, 0x1898: 68, 0x1899: 68, 0x189a: 68, 0x189b: 68, 0x189c: 68, 0x189d: 68, 0x189e: 68, 0x189f: 68, 0x18a0: 68, 0x18a1: 68, 0x18a2: 68, 0x18a3: 68, 0x18a4: 68, 0x18a5: 68, 0x18a6: 68, 0x18a7: 68, 0x18a8: 68, 0x18aa: 68, 0x200c: 85, 0x200d: 67, 0x202f: 85, 0x2066: 85, 0x2067: 85, 0x2068: 85, 0x2069: 85, 0xa840: 68, 0xa841: 68, 0xa842: 68, 0xa843: 68, 0xa844: 68, 0xa845: 68, 0xa846: 68, 0xa847: 68, 0xa848: 68, 0xa849: 68, 0xa84a: 68, 0xa84b: 68, 0xa84c: 68, 0xa84d: 68, 0xa84e: 68, 0xa84f: 68, 0xa850: 68, 0xa851: 68, 0xa852: 68, 0xa853: 68, 0xa854: 68, 0xa855: 68, 0xa856: 68, 0xa857: 68, 0xa858: 68, 0xa859: 68, 0xa85a: 68, 0xa85b: 68, 0xa85c: 68, 0xa85d: 68, 0xa85e: 68, 0xa85f: 68, 0xa860: 68, 0xa861: 68, 0xa862: 68, 0xa863: 68, 0xa864: 68, 0xa865: 68, 0xa866: 68, 0xa867: 68, 0xa868: 68, 0xa869: 68, 0xa86a: 68, 0xa86b: 68, 0xa86c: 68, 0xa86d: 68, 0xa86e: 68, 0xa86f: 68, 0xa870: 68, 0xa871: 68, 0xa872: 76, 0xa873: 85, 0x10ac0: 68, 0x10ac1: 68, 0x10ac2: 68, 0x10ac3: 68, 0x10ac4: 68, 0x10ac5: 82, 0x10ac6: 85, 0x10ac7: 82, 0x10ac8: 85, 0x10ac9: 82, 0x10aca: 82, 0x10acb: 85, 0x10acc: 85, 0x10acd: 76, 0x10ace: 82, 0x10acf: 82, 0x10ad0: 82, 0x10ad1: 82, 0x10ad2: 82, 0x10ad3: 68, 0x10ad4: 68, 0x10ad5: 68, 0x10ad6: 68, 0x10ad7: 76, 0x10ad8: 68, 0x10ad9: 68, 0x10ada: 68, 0x10adb: 68, 0x10adc: 68, 0x10add: 82, 0x10ade: 68, 0x10adf: 68, 0x10ae0: 68, 0x10ae1: 82, 0x10ae2: 85, 0x10ae3: 85, 0x10ae4: 82, 0x10aeb: 68, 0x10aec: 68, 0x10aed: 68, 0x10aee: 68, 0x10aef: 82, 0x10b80: 68, 0x10b81: 82, 0x10b82: 68, 0x10b83: 82, 0x10b84: 82, 0x10b85: 82, 0x10b86: 68, 0x10b87: 68, 0x10b88: 68, 0x10b89: 82, 0x10b8a: 68, 0x10b8b: 68, 0x10b8c: 82, 0x10b8d: 68, 0x10b8e: 82, 0x10b8f: 82, 0x10b90: 68, 0x10b91: 82, 0x10ba9: 82, 0x10baa: 82, 0x10bab: 82, 0x10bac: 82, 0x10bad: 68, 0x10bae: 68, 0x10baf: 85, 0x10d00: 76, 0x10d01: 68, 0x10d02: 68, 0x10d03: 68, 0x10d04: 68, 0x10d05: 68, 0x10d06: 68, 0x10d07: 68, 0x10d08: 68, 0x10d09: 68, 0x10d0a: 68, 0x10d0b: 68, 0x10d0c: 68, 0x10d0d: 68, 0x10d0e: 68, 0x10d0f: 68, 0x10d10: 68, 0x10d11: 68, 0x10d12: 68, 0x10d13: 68, 0x10d14: 68, 0x10d15: 68, 0x10d16: 68, 0x10d17: 68, 0x10d18: 68, 0x10d19: 68, 0x10d1a: 68, 0x10d1b: 68, 0x10d1c: 68, 0x10d1d: 68, 0x10d1e: 68, 0x10d1f: 68, 0x10d20: 68, 0x10d21: 68, 0x10d22: 82, 0x10d23: 68, 0x10f30: 68, 0x10f31: 68, 0x10f32: 68, 0x10f33: 82, 0x10f34: 68, 0x10f35: 68, 0x10f36: 68, 0x10f37: 68, 0x10f38: 68, 0x10f39: 68, 0x10f3a: 68, 0x10f3b: 68, 0x10f3c: 68, 0x10f3d: 68, 0x10f3e: 68, 0x10f3f: 68, 0x10f40: 68, 0x10f41: 68, 0x10f42: 68, 0x10f43: 68, 0x10f44: 68, 0x10f45: 85, 0x10f51: 68, 0x10f52: 68, 0x10f53: 68, 0x10f54: 82, 0x10fb0: 68, 0x10fb1: 85, 0x10fb2: 68, 0x10fb3: 68, 0x10fb4: 82, 0x10fb5: 82, 0x10fb6: 82, 0x10fb7: 85, 0x10fb8: 68, 0x10fb9: 82, 0x10fba: 82, 0x10fbb: 68, 0x10fbc: 68, 0x10fbd: 82, 0x10fbe: 68, 0x10fbf: 68, 0x10fc0: 85, 0x10fc1: 68, 0x10fc2: 82, 0x10fc3: 82, 0x10fc4: 68, 0x10fc5: 85, 0x10fc6: 85, 0x10fc7: 85, 0x10fc8: 85, 0x10fc9: 82, 0x10fca: 68, 0x10fcb: 76, 0x110bd: 85, 0x110cd: 85, 0x1e900: 68, 0x1e901: 68, 0x1e902: 68, 0x1e903: 68, 0x1e904: 68, 0x1e905: 68, 0x1e906: 68, 0x1e907: 68, 0x1e908: 68, 0x1e909: 68, 0x1e90a: 68, 0x1e90b: 68, 0x1e90c: 68, 0x1e90d: 68, 0x1e90e: 68, 0x1e90f: 68, 0x1e910: 68, 0x1e911: 68, 0x1e912: 68, 0x1e913: 68, 0x1e914: 68, 0x1e915: 68, 0x1e916: 68, 0x1e917: 68, 0x1e918: 68, 0x1e919: 68, 0x1e91a: 68, 0x1e91b: 68, 0x1e91c: 68, 0x1e91d: 68, 0x1e91e: 68, 0x1e91f: 68, 0x1e920: 68, 0x1e921: 68, 0x1e922: 68, 0x1e923: 68, 0x1e924: 68, 0x1e925: 68, 0x1e926: 68, 0x1e927: 68, 0x1e928: 68, 0x1e929: 68, 0x1e92a: 68, 0x1e92b: 68, 0x1e92c: 68, 0x1e92d: 68, 0x1e92e: 68, 0x1e92f: 68, 0x1e930: 68, 0x1e931: 68, 0x1e932: 68, 0x1e933: 68, 0x1e934: 68, 0x1e935: 68, 0x1e936: 68, 0x1e937: 68, 0x1e938: 68, 0x1e939: 68, 0x1e93a: 68, 0x1e93b: 68, 0x1e93c: 68, 0x1e93d: 68, 0x1e93e: 68, 0x1e93f: 68, 0x1e940: 68, 0x1e941: 68, 0x1e942: 68, 0x1e943: 68, 0x1e94b: 84, } codepoint_classes = { 'PVALID': ( 0x2d0000002e, 0x300000003a, 0x610000007b, 0xdf000000f7, 0xf800000100, 0x10100000102, 0x10300000104, 0x10500000106, 0x10700000108, 0x1090000010a, 0x10b0000010c, 0x10d0000010e, 0x10f00000110, 0x11100000112, 0x11300000114, 0x11500000116, 0x11700000118, 0x1190000011a, 0x11b0000011c, 0x11d0000011e, 0x11f00000120, 0x12100000122, 0x12300000124, 0x12500000126, 0x12700000128, 0x1290000012a, 0x12b0000012c, 0x12d0000012e, 0x12f00000130, 0x13100000132, 0x13500000136, 0x13700000139, 0x13a0000013b, 0x13c0000013d, 0x13e0000013f, 0x14200000143, 0x14400000145, 0x14600000147, 0x14800000149, 0x14b0000014c, 0x14d0000014e, 0x14f00000150, 0x15100000152, 0x15300000154, 0x15500000156, 0x15700000158, 0x1590000015a, 0x15b0000015c, 0x15d0000015e, 0x15f00000160, 0x16100000162, 0x16300000164, 0x16500000166, 0x16700000168, 0x1690000016a, 0x16b0000016c, 0x16d0000016e, 0x16f00000170, 0x17100000172, 0x17300000174, 0x17500000176, 0x17700000178, 0x17a0000017b, 0x17c0000017d, 0x17e0000017f, 0x18000000181, 0x18300000184, 0x18500000186, 0x18800000189, 0x18c0000018e, 0x19200000193, 0x19500000196, 0x1990000019c, 0x19e0000019f, 0x1a1000001a2, 0x1a3000001a4, 0x1a5000001a6, 0x1a8000001a9, 0x1aa000001ac, 0x1ad000001ae, 0x1b0000001b1, 0x1b4000001b5, 0x1b6000001b7, 0x1b9000001bc, 0x1bd000001c4, 0x1ce000001cf, 0x1d0000001d1, 0x1d2000001d3, 0x1d4000001d5, 0x1d6000001d7, 0x1d8000001d9, 0x1da000001db, 0x1dc000001de, 0x1df000001e0, 0x1e1000001e2, 0x1e3000001e4, 0x1e5000001e6, 0x1e7000001e8, 0x1e9000001ea, 0x1eb000001ec, 0x1ed000001ee, 0x1ef000001f1, 0x1f5000001f6, 0x1f9000001fa, 0x1fb000001fc, 0x1fd000001fe, 0x1ff00000200, 0x20100000202, 0x20300000204, 0x20500000206, 0x20700000208, 0x2090000020a, 0x20b0000020c, 0x20d0000020e, 0x20f00000210, 0x21100000212, 0x21300000214, 0x21500000216, 0x21700000218, 0x2190000021a, 0x21b0000021c, 0x21d0000021e, 0x21f00000220, 0x22100000222, 0x22300000224, 0x22500000226, 0x22700000228, 0x2290000022a, 0x22b0000022c, 0x22d0000022e, 0x22f00000230, 0x23100000232, 0x2330000023a, 0x23c0000023d, 0x23f00000241, 0x24200000243, 0x24700000248, 0x2490000024a, 0x24b0000024c, 0x24d0000024e, 0x24f000002b0, 0x2b9000002c2, 0x2c6000002d2, 0x2ec000002ed, 0x2ee000002ef, 0x30000000340, 0x34200000343, 0x3460000034f, 0x35000000370, 0x37100000372, 0x37300000374, 0x37700000378, 0x37b0000037e, 0x39000000391, 0x3ac000003cf, 0x3d7000003d8, 0x3d9000003da, 0x3db000003dc, 0x3dd000003de, 0x3df000003e0, 0x3e1000003e2, 0x3e3000003e4, 0x3e5000003e6, 0x3e7000003e8, 0x3e9000003ea, 0x3eb000003ec, 0x3ed000003ee, 0x3ef000003f0, 0x3f3000003f4, 0x3f8000003f9, 0x3fb000003fd, 0x43000000460, 0x46100000462, 0x46300000464, 0x46500000466, 0x46700000468, 0x4690000046a, 0x46b0000046c, 0x46d0000046e, 0x46f00000470, 0x47100000472, 0x47300000474, 0x47500000476, 0x47700000478, 0x4790000047a, 0x47b0000047c, 0x47d0000047e, 0x47f00000480, 0x48100000482, 0x48300000488, 0x48b0000048c, 0x48d0000048e, 0x48f00000490, 0x49100000492, 0x49300000494, 0x49500000496, 0x49700000498, 0x4990000049a, 0x49b0000049c, 0x49d0000049e, 0x49f000004a0, 0x4a1000004a2, 0x4a3000004a4, 0x4a5000004a6, 0x4a7000004a8, 0x4a9000004aa, 0x4ab000004ac, 0x4ad000004ae, 0x4af000004b0, 0x4b1000004b2, 0x4b3000004b4, 0x4b5000004b6, 0x4b7000004b8, 0x4b9000004ba, 0x4bb000004bc, 0x4bd000004be, 0x4bf000004c0, 0x4c2000004c3, 0x4c4000004c5, 0x4c6000004c7, 0x4c8000004c9, 0x4ca000004cb, 0x4cc000004cd, 0x4ce000004d0, 0x4d1000004d2, 0x4d3000004d4, 0x4d5000004d6, 0x4d7000004d8, 0x4d9000004da, 0x4db000004dc, 0x4dd000004de, 0x4df000004e0, 0x4e1000004e2, 0x4e3000004e4, 0x4e5000004e6, 0x4e7000004e8, 0x4e9000004ea, 0x4eb000004ec, 0x4ed000004ee, 0x4ef000004f0, 0x4f1000004f2, 0x4f3000004f4, 0x4f5000004f6, 0x4f7000004f8, 0x4f9000004fa, 0x4fb000004fc, 0x4fd000004fe, 0x4ff00000500, 0x50100000502, 0x50300000504, 0x50500000506, 0x50700000508, 0x5090000050a, 0x50b0000050c, 0x50d0000050e, 0x50f00000510, 0x51100000512, 0x51300000514, 0x51500000516, 0x51700000518, 0x5190000051a, 0x51b0000051c, 0x51d0000051e, 0x51f00000520, 0x52100000522, 0x52300000524, 0x52500000526, 0x52700000528, 0x5290000052a, 0x52b0000052c, 0x52d0000052e, 0x52f00000530, 0x5590000055a, 0x56000000587, 0x58800000589, 0x591000005be, 0x5bf000005c0, 0x5c1000005c3, 0x5c4000005c6, 0x5c7000005c8, 0x5d0000005eb, 0x5ef000005f3, 0x6100000061b, 0x62000000640, 0x64100000660, 0x66e00000675, 0x679000006d4, 0x6d5000006dd, 0x6df000006e9, 0x6ea000006f0, 0x6fa00000700, 0x7100000074b, 0x74d000007b2, 0x7c0000007f6, 0x7fd000007fe, 0x8000000082e, 0x8400000085c, 0x8600000086b, 0x8a0000008b5, 0x8b6000008c8, 0x8d3000008e2, 0x8e300000958, 0x96000000964, 0x96600000970, 0x97100000984, 0x9850000098d, 0x98f00000991, 0x993000009a9, 0x9aa000009b1, 0x9b2000009b3, 0x9b6000009ba, 0x9bc000009c5, 0x9c7000009c9, 0x9cb000009cf, 0x9d7000009d8, 0x9e0000009e4, 0x9e6000009f2, 0x9fc000009fd, 0x9fe000009ff, 0xa0100000a04, 0xa0500000a0b, 0xa0f00000a11, 0xa1300000a29, 0xa2a00000a31, 0xa3200000a33, 0xa3500000a36, 0xa3800000a3a, 0xa3c00000a3d, 0xa3e00000a43, 0xa4700000a49, 0xa4b00000a4e, 0xa5100000a52, 0xa5c00000a5d, 0xa6600000a76, 0xa8100000a84, 0xa8500000a8e, 0xa8f00000a92, 0xa9300000aa9, 0xaaa00000ab1, 0xab200000ab4, 0xab500000aba, 0xabc00000ac6, 0xac700000aca, 0xacb00000ace, 0xad000000ad1, 0xae000000ae4, 0xae600000af0, 0xaf900000b00, 0xb0100000b04, 0xb0500000b0d, 0xb0f00000b11, 0xb1300000b29, 0xb2a00000b31, 0xb3200000b34, 0xb3500000b3a, 0xb3c00000b45, 0xb4700000b49, 0xb4b00000b4e, 0xb5500000b58, 0xb5f00000b64, 0xb6600000b70, 0xb7100000b72, 0xb8200000b84, 0xb8500000b8b, 0xb8e00000b91, 0xb9200000b96, 0xb9900000b9b, 0xb9c00000b9d, 0xb9e00000ba0, 0xba300000ba5, 0xba800000bab, 0xbae00000bba, 0xbbe00000bc3, 0xbc600000bc9, 0xbca00000bce, 0xbd000000bd1, 0xbd700000bd8, 0xbe600000bf0, 0xc0000000c0d, 0xc0e00000c11, 0xc1200000c29, 0xc2a00000c3a, 0xc3d00000c45, 0xc4600000c49, 0xc4a00000c4e, 0xc5500000c57, 0xc5800000c5b, 0xc6000000c64, 0xc6600000c70, 0xc8000000c84, 0xc8500000c8d, 0xc8e00000c91, 0xc9200000ca9, 0xcaa00000cb4, 0xcb500000cba, 0xcbc00000cc5, 0xcc600000cc9, 0xcca00000cce, 0xcd500000cd7, 0xcde00000cdf, 0xce000000ce4, 0xce600000cf0, 0xcf100000cf3, 0xd0000000d0d, 0xd0e00000d11, 0xd1200000d45, 0xd4600000d49, 0xd4a00000d4f, 0xd5400000d58, 0xd5f00000d64, 0xd6600000d70, 0xd7a00000d80, 0xd8100000d84, 0xd8500000d97, 0xd9a00000db2, 0xdb300000dbc, 0xdbd00000dbe, 0xdc000000dc7, 0xdca00000dcb, 0xdcf00000dd5, 0xdd600000dd7, 0xdd800000de0, 0xde600000df0, 0xdf200000df4, 0xe0100000e33, 0xe3400000e3b, 0xe4000000e4f, 0xe5000000e5a, 0xe8100000e83, 0xe8400000e85, 0xe8600000e8b, 0xe8c00000ea4, 0xea500000ea6, 0xea700000eb3, 0xeb400000ebe, 0xec000000ec5, 0xec600000ec7, 0xec800000ece, 0xed000000eda, 0xede00000ee0, 0xf0000000f01, 0xf0b00000f0c, 0xf1800000f1a, 0xf2000000f2a, 0xf3500000f36, 0xf3700000f38, 0xf3900000f3a, 0xf3e00000f43, 0xf4400000f48, 0xf4900000f4d, 0xf4e00000f52, 0xf5300000f57, 0xf5800000f5c, 0xf5d00000f69, 0xf6a00000f6d, 0xf7100000f73, 0xf7400000f75, 0xf7a00000f81, 0xf8200000f85, 0xf8600000f93, 0xf9400000f98, 0xf9900000f9d, 0xf9e00000fa2, 0xfa300000fa7, 0xfa800000fac, 0xfad00000fb9, 0xfba00000fbd, 0xfc600000fc7, 0x10000000104a, 0x10500000109e, 0x10d0000010fb, 0x10fd00001100, 0x120000001249, 0x124a0000124e, 0x125000001257, 0x125800001259, 0x125a0000125e, 0x126000001289, 0x128a0000128e, 0x1290000012b1, 0x12b2000012b6, 0x12b8000012bf, 0x12c0000012c1, 0x12c2000012c6, 0x12c8000012d7, 0x12d800001311, 0x131200001316, 0x13180000135b, 0x135d00001360, 0x138000001390, 0x13a0000013f6, 0x14010000166d, 0x166f00001680, 0x16810000169b, 0x16a0000016eb, 0x16f1000016f9, 0x17000000170d, 0x170e00001715, 0x172000001735, 0x174000001754, 0x17600000176d, 0x176e00001771, 0x177200001774, 0x1780000017b4, 0x17b6000017d4, 0x17d7000017d8, 0x17dc000017de, 0x17e0000017ea, 0x18100000181a, 0x182000001879, 0x1880000018ab, 0x18b0000018f6, 0x19000000191f, 0x19200000192c, 0x19300000193c, 0x19460000196e, 0x197000001975, 0x1980000019ac, 0x19b0000019ca, 0x19d0000019da, 0x1a0000001a1c, 0x1a2000001a5f, 0x1a6000001a7d, 0x1a7f00001a8a, 0x1a9000001a9a, 0x1aa700001aa8, 0x1ab000001abe, 0x1abf00001ac1, 0x1b0000001b4c, 0x1b5000001b5a, 0x1b6b00001b74, 0x1b8000001bf4, 0x1c0000001c38, 0x1c4000001c4a, 0x1c4d00001c7e, 0x1cd000001cd3, 0x1cd400001cfb, 0x1d0000001d2c, 0x1d2f00001d30, 0x1d3b00001d3c, 0x1d4e00001d4f, 0x1d6b00001d78, 0x1d7900001d9b, 0x1dc000001dfa, 0x1dfb00001e00, 0x1e0100001e02, 0x1e0300001e04, 0x1e0500001e06, 0x1e0700001e08, 0x1e0900001e0a, 0x1e0b00001e0c, 0x1e0d00001e0e, 0x1e0f00001e10, 0x1e1100001e12, 0x1e1300001e14, 0x1e1500001e16, 0x1e1700001e18, 0x1e1900001e1a, 0x1e1b00001e1c, 0x1e1d00001e1e, 0x1e1f00001e20, 0x1e2100001e22, 0x1e2300001e24, 0x1e2500001e26, 0x1e2700001e28, 0x1e2900001e2a, 0x1e2b00001e2c, 0x1e2d00001e2e, 0x1e2f00001e30, 0x1e3100001e32, 0x1e3300001e34, 0x1e3500001e36, 0x1e3700001e38, 0x1e3900001e3a, 0x1e3b00001e3c, 0x1e3d00001e3e, 0x1e3f00001e40, 0x1e4100001e42, 0x1e4300001e44, 0x1e4500001e46, 0x1e4700001e48, 0x1e4900001e4a, 0x1e4b00001e4c, 0x1e4d00001e4e, 0x1e4f00001e50, 0x1e5100001e52, 0x1e5300001e54, 0x1e5500001e56, 0x1e5700001e58, 0x1e5900001e5a, 0x1e5b00001e5c, 0x1e5d00001e5e, 0x1e5f00001e60, 0x1e6100001e62, 0x1e6300001e64, 0x1e6500001e66, 0x1e6700001e68, 0x1e6900001e6a, 0x1e6b00001e6c, 0x1e6d00001e6e, 0x1e6f00001e70, 0x1e7100001e72, 0x1e7300001e74, 0x1e7500001e76, 0x1e7700001e78, 0x1e7900001e7a, 0x1e7b00001e7c, 0x1e7d00001e7e, 0x1e7f00001e80, 0x1e8100001e82, 0x1e8300001e84, 0x1e8500001e86, 0x1e8700001e88, 0x1e8900001e8a, 0x1e8b00001e8c, 0x1e8d00001e8e, 0x1e8f00001e90, 0x1e9100001e92, 0x1e9300001e94, 0x1e9500001e9a, 0x1e9c00001e9e, 0x1e9f00001ea0, 0x1ea100001ea2, 0x1ea300001ea4, 0x1ea500001ea6, 0x1ea700001ea8, 0x1ea900001eaa, 0x1eab00001eac, 0x1ead00001eae, 0x1eaf00001eb0, 0x1eb100001eb2, 0x1eb300001eb4, 0x1eb500001eb6, 0x1eb700001eb8, 0x1eb900001eba, 0x1ebb00001ebc, 0x1ebd00001ebe, 0x1ebf00001ec0, 0x1ec100001ec2, 0x1ec300001ec4, 0x1ec500001ec6, 0x1ec700001ec8, 0x1ec900001eca, 0x1ecb00001ecc, 0x1ecd00001ece, 0x1ecf00001ed0, 0x1ed100001ed2, 0x1ed300001ed4, 0x1ed500001ed6, 0x1ed700001ed8, 0x1ed900001eda, 0x1edb00001edc, 0x1edd00001ede, 0x1edf00001ee0, 0x1ee100001ee2, 0x1ee300001ee4, 0x1ee500001ee6, 0x1ee700001ee8, 0x1ee900001eea, 0x1eeb00001eec, 0x1eed00001eee, 0x1eef00001ef0, 0x1ef100001ef2, 0x1ef300001ef4, 0x1ef500001ef6, 0x1ef700001ef8, 0x1ef900001efa, 0x1efb00001efc, 0x1efd00001efe, 0x1eff00001f08, 0x1f1000001f16, 0x1f2000001f28, 0x1f3000001f38, 0x1f4000001f46, 0x1f5000001f58, 0x1f6000001f68, 0x1f7000001f71, 0x1f7200001f73, 0x1f7400001f75, 0x1f7600001f77, 0x1f7800001f79, 0x1f7a00001f7b, 0x1f7c00001f7d, 0x1fb000001fb2, 0x1fb600001fb7, 0x1fc600001fc7, 0x1fd000001fd3, 0x1fd600001fd8, 0x1fe000001fe3, 0x1fe400001fe8, 0x1ff600001ff7, 0x214e0000214f, 0x218400002185, 0x2c3000002c5f, 0x2c6100002c62, 0x2c6500002c67, 0x2c6800002c69, 0x2c6a00002c6b, 0x2c6c00002c6d, 0x2c7100002c72, 0x2c7300002c75, 0x2c7600002c7c, 0x2c8100002c82, 0x2c8300002c84, 0x2c8500002c86, 0x2c8700002c88, 0x2c8900002c8a, 0x2c8b00002c8c, 0x2c8d00002c8e, 0x2c8f00002c90, 0x2c9100002c92, 0x2c9300002c94, 0x2c9500002c96, 0x2c9700002c98, 0x2c9900002c9a, 0x2c9b00002c9c, 0x2c9d00002c9e, 0x2c9f00002ca0, 0x2ca100002ca2, 0x2ca300002ca4, 0x2ca500002ca6, 0x2ca700002ca8, 0x2ca900002caa, 0x2cab00002cac, 0x2cad00002cae, 0x2caf00002cb0, 0x2cb100002cb2, 0x2cb300002cb4, 0x2cb500002cb6, 0x2cb700002cb8, 0x2cb900002cba, 0x2cbb00002cbc, 0x2cbd00002cbe, 0x2cbf00002cc0, 0x2cc100002cc2, 0x2cc300002cc4, 0x2cc500002cc6, 0x2cc700002cc8, 0x2cc900002cca, 0x2ccb00002ccc, 0x2ccd00002cce, 0x2ccf00002cd0, 0x2cd100002cd2, 0x2cd300002cd4, 0x2cd500002cd6, 0x2cd700002cd8, 0x2cd900002cda, 0x2cdb00002cdc, 0x2cdd00002cde, 0x2cdf00002ce0, 0x2ce100002ce2, 0x2ce300002ce5, 0x2cec00002ced, 0x2cee00002cf2, 0x2cf300002cf4, 0x2d0000002d26, 0x2d2700002d28, 0x2d2d00002d2e, 0x2d3000002d68, 0x2d7f00002d97, 0x2da000002da7, 0x2da800002daf, 0x2db000002db7, 0x2db800002dbf, 0x2dc000002dc7, 0x2dc800002dcf, 0x2dd000002dd7, 0x2dd800002ddf, 0x2de000002e00, 0x2e2f00002e30, 0x300500003008, 0x302a0000302e, 0x303c0000303d, 0x304100003097, 0x30990000309b, 0x309d0000309f, 0x30a1000030fb, 0x30fc000030ff, 0x310500003130, 0x31a0000031c0, 0x31f000003200, 0x340000004dc0, 0x4e0000009ffd, 0xa0000000a48d, 0xa4d00000a4fe, 0xa5000000a60d, 0xa6100000a62c, 0xa6410000a642, 0xa6430000a644, 0xa6450000a646, 0xa6470000a648, 0xa6490000a64a, 0xa64b0000a64c, 0xa64d0000a64e, 0xa64f0000a650, 0xa6510000a652, 0xa6530000a654, 0xa6550000a656, 0xa6570000a658, 0xa6590000a65a, 0xa65b0000a65c, 0xa65d0000a65e, 0xa65f0000a660, 0xa6610000a662, 0xa6630000a664, 0xa6650000a666, 0xa6670000a668, 0xa6690000a66a, 0xa66b0000a66c, 0xa66d0000a670, 0xa6740000a67e, 0xa67f0000a680, 0xa6810000a682, 0xa6830000a684, 0xa6850000a686, 0xa6870000a688, 0xa6890000a68a, 0xa68b0000a68c, 0xa68d0000a68e, 0xa68f0000a690, 0xa6910000a692, 0xa6930000a694, 0xa6950000a696, 0xa6970000a698, 0xa6990000a69a, 0xa69b0000a69c, 0xa69e0000a6e6, 0xa6f00000a6f2, 0xa7170000a720, 0xa7230000a724, 0xa7250000a726, 0xa7270000a728, 0xa7290000a72a, 0xa72b0000a72c, 0xa72d0000a72e, 0xa72f0000a732, 0xa7330000a734, 0xa7350000a736, 0xa7370000a738, 0xa7390000a73a, 0xa73b0000a73c, 0xa73d0000a73e, 0xa73f0000a740, 0xa7410000a742, 0xa7430000a744, 0xa7450000a746, 0xa7470000a748, 0xa7490000a74a, 0xa74b0000a74c, 0xa74d0000a74e, 0xa74f0000a750, 0xa7510000a752, 0xa7530000a754, 0xa7550000a756, 0xa7570000a758, 0xa7590000a75a, 0xa75b0000a75c, 0xa75d0000a75e, 0xa75f0000a760, 0xa7610000a762, 0xa7630000a764, 0xa7650000a766, 0xa7670000a768, 0xa7690000a76a, 0xa76b0000a76c, 0xa76d0000a76e, 0xa76f0000a770, 0xa7710000a779, 0xa77a0000a77b, 0xa77c0000a77d, 0xa77f0000a780, 0xa7810000a782, 0xa7830000a784, 0xa7850000a786, 0xa7870000a789, 0xa78c0000a78d, 0xa78e0000a790, 0xa7910000a792, 0xa7930000a796, 0xa7970000a798, 0xa7990000a79a, 0xa79b0000a79c, 0xa79d0000a79e, 0xa79f0000a7a0, 0xa7a10000a7a2, 0xa7a30000a7a4, 0xa7a50000a7a6, 0xa7a70000a7a8, 0xa7a90000a7aa, 0xa7af0000a7b0, 0xa7b50000a7b6, 0xa7b70000a7b8, 0xa7b90000a7ba, 0xa7bb0000a7bc, 0xa7bd0000a7be, 0xa7bf0000a7c0, 0xa7c30000a7c4, 0xa7c80000a7c9, 0xa7ca0000a7cb, 0xa7f60000a7f8, 0xa7fa0000a828, 0xa82c0000a82d, 0xa8400000a874, 0xa8800000a8c6, 0xa8d00000a8da, 0xa8e00000a8f8, 0xa8fb0000a8fc, 0xa8fd0000a92e, 0xa9300000a954, 0xa9800000a9c1, 0xa9cf0000a9da, 0xa9e00000a9ff, 0xaa000000aa37, 0xaa400000aa4e, 0xaa500000aa5a, 0xaa600000aa77, 0xaa7a0000aac3, 0xaadb0000aade, 0xaae00000aaf0, 0xaaf20000aaf7, 0xab010000ab07, 0xab090000ab0f, 0xab110000ab17, 0xab200000ab27, 0xab280000ab2f, 0xab300000ab5b, 0xab600000ab6a, 0xabc00000abeb, 0xabec0000abee, 0xabf00000abfa, 0xac000000d7a4, 0xfa0e0000fa10, 0xfa110000fa12, 0xfa130000fa15, 0xfa1f0000fa20, 0xfa210000fa22, 0xfa230000fa25, 0xfa270000fa2a, 0xfb1e0000fb1f, 0xfe200000fe30, 0xfe730000fe74, 0x100000001000c, 0x1000d00010027, 0x100280001003b, 0x1003c0001003e, 0x1003f0001004e, 0x100500001005e, 0x10080000100fb, 0x101fd000101fe, 0x102800001029d, 0x102a0000102d1, 0x102e0000102e1, 0x1030000010320, 0x1032d00010341, 0x103420001034a, 0x103500001037b, 0x103800001039e, 0x103a0000103c4, 0x103c8000103d0, 0x104280001049e, 0x104a0000104aa, 0x104d8000104fc, 0x1050000010528, 0x1053000010564, 0x1060000010737, 0x1074000010756, 0x1076000010768, 0x1080000010806, 0x1080800010809, 0x1080a00010836, 0x1083700010839, 0x1083c0001083d, 0x1083f00010856, 0x1086000010877, 0x108800001089f, 0x108e0000108f3, 0x108f4000108f6, 0x1090000010916, 0x109200001093a, 0x10980000109b8, 0x109be000109c0, 0x10a0000010a04, 0x10a0500010a07, 0x10a0c00010a14, 0x10a1500010a18, 0x10a1900010a36, 0x10a3800010a3b, 0x10a3f00010a40, 0x10a6000010a7d, 0x10a8000010a9d, 0x10ac000010ac8, 0x10ac900010ae7, 0x10b0000010b36, 0x10b4000010b56, 0x10b6000010b73, 0x10b8000010b92, 0x10c0000010c49, 0x10cc000010cf3, 0x10d0000010d28, 0x10d3000010d3a, 0x10e8000010eaa, 0x10eab00010ead, 0x10eb000010eb2, 0x10f0000010f1d, 0x10f2700010f28, 0x10f3000010f51, 0x10fb000010fc5, 0x10fe000010ff7, 0x1100000011047, 0x1106600011070, 0x1107f000110bb, 0x110d0000110e9, 0x110f0000110fa, 0x1110000011135, 0x1113600011140, 0x1114400011148, 0x1115000011174, 0x1117600011177, 0x11180000111c5, 0x111c9000111cd, 0x111ce000111db, 0x111dc000111dd, 0x1120000011212, 0x1121300011238, 0x1123e0001123f, 0x1128000011287, 0x1128800011289, 0x1128a0001128e, 0x1128f0001129e, 0x1129f000112a9, 0x112b0000112eb, 0x112f0000112fa, 0x1130000011304, 0x113050001130d, 0x1130f00011311, 0x1131300011329, 0x1132a00011331, 0x1133200011334, 0x113350001133a, 0x1133b00011345, 0x1134700011349, 0x1134b0001134e, 0x1135000011351, 0x1135700011358, 0x1135d00011364, 0x113660001136d, 0x1137000011375, 0x114000001144b, 0x114500001145a, 0x1145e00011462, 0x11480000114c6, 0x114c7000114c8, 0x114d0000114da, 0x11580000115b6, 0x115b8000115c1, 0x115d8000115de, 0x1160000011641, 0x1164400011645, 0x116500001165a, 0x11680000116b9, 0x116c0000116ca, 0x117000001171b, 0x1171d0001172c, 0x117300001173a, 0x118000001183b, 0x118c0000118ea, 0x118ff00011907, 0x119090001190a, 0x1190c00011914, 0x1191500011917, 0x1191800011936, 0x1193700011939, 0x1193b00011944, 0x119500001195a, 0x119a0000119a8, 0x119aa000119d8, 0x119da000119e2, 0x119e3000119e5, 0x11a0000011a3f, 0x11a4700011a48, 0x11a5000011a9a, 0x11a9d00011a9e, 0x11ac000011af9, 0x11c0000011c09, 0x11c0a00011c37, 0x11c3800011c41, 0x11c5000011c5a, 0x11c7200011c90, 0x11c9200011ca8, 0x11ca900011cb7, 0x11d0000011d07, 0x11d0800011d0a, 0x11d0b00011d37, 0x11d3a00011d3b, 0x11d3c00011d3e, 0x11d3f00011d48, 0x11d5000011d5a, 0x11d6000011d66, 0x11d6700011d69, 0x11d6a00011d8f, 0x11d9000011d92, 0x11d9300011d99, 0x11da000011daa, 0x11ee000011ef7, 0x11fb000011fb1, 0x120000001239a, 0x1248000012544, 0x130000001342f, 0x1440000014647, 0x1680000016a39, 0x16a4000016a5f, 0x16a6000016a6a, 0x16ad000016aee, 0x16af000016af5, 0x16b0000016b37, 0x16b4000016b44, 0x16b5000016b5a, 0x16b6300016b78, 0x16b7d00016b90, 0x16e6000016e80, 0x16f0000016f4b, 0x16f4f00016f88, 0x16f8f00016fa0, 0x16fe000016fe2, 0x16fe300016fe5, 0x16ff000016ff2, 0x17000000187f8, 0x1880000018cd6, 0x18d0000018d09, 0x1b0000001b11f, 0x1b1500001b153, 0x1b1640001b168, 0x1b1700001b2fc, 0x1bc000001bc6b, 0x1bc700001bc7d, 0x1bc800001bc89, 0x1bc900001bc9a, 0x1bc9d0001bc9f, 0x1da000001da37, 0x1da3b0001da6d, 0x1da750001da76, 0x1da840001da85, 0x1da9b0001daa0, 0x1daa10001dab0, 0x1e0000001e007, 0x1e0080001e019, 0x1e01b0001e022, 0x1e0230001e025, 0x1e0260001e02b, 0x1e1000001e12d, 0x1e1300001e13e, 0x1e1400001e14a, 0x1e14e0001e14f, 0x1e2c00001e2fa, 0x1e8000001e8c5, 0x1e8d00001e8d7, 0x1e9220001e94c, 0x1e9500001e95a, 0x1fbf00001fbfa, 0x200000002a6de, 0x2a7000002b735, 0x2b7400002b81e, 0x2b8200002cea2, 0x2ceb00002ebe1, 0x300000003134b, ), 'CONTEXTJ': ( 0x200c0000200e, ), 'CONTEXTO': ( 0xb7000000b8, 0x37500000376, 0x5f3000005f5, 0x6600000066a, 0x6f0000006fa, 0x30fb000030fc, ), }
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/idna/__init__.py
from .package_data import __version__ from .core import *
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/idna/core.py
from . import idnadata import bisect import unicodedata import re import sys from .intranges import intranges_contain _virama_combining_class = 9 _alabel_prefix = b'xn--' _unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]') if sys.version_info[0] >= 3: unicode = str unichr = chr class IDNAError(UnicodeError): """ Base exception for all IDNA-encoding related problems """ pass class IDNABidiError(IDNAError): """ Exception when bidirectional requirements are not satisfied """ pass class InvalidCodepoint(IDNAError): """ Exception when a disallowed or unallocated codepoint is used """ pass class InvalidCodepointContext(IDNAError): """ Exception when the codepoint is not valid in the context it is used """ pass def _combining_class(cp): v = unicodedata.combining(unichr(cp)) if v == 0: if not unicodedata.name(unichr(cp)): raise ValueError("Unknown character in unicodedata") return v def _is_script(cp, script): return intranges_contain(ord(cp), idnadata.scripts[script]) def _punycode(s): return s.encode('punycode') def _unot(s): return 'U+{0:04X}'.format(s) def valid_label_length(label): if len(label) > 63: return False return True def valid_string_length(label, trailing_dot): if len(label) > (254 if trailing_dot else 253): return False return True def check_bidi(label, check_ltr=False): # Bidi rules should only be applied if string contains RTL characters bidi_label = False for (idx, cp) in enumerate(label, 1): direction = unicodedata.bidirectional(cp) if direction == '': # String likely comes from a newer version of Unicode raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx)) if direction in ['R', 'AL', 'AN']: bidi_label = True if not bidi_label and not check_ltr: return True # Bidi rule 1 direction = unicodedata.bidirectional(label[0]) if direction in ['R', 'AL']: rtl = True elif direction == 'L': rtl = False else: raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label))) valid_ending = False number_type = False for (idx, cp) in enumerate(label, 1): direction = unicodedata.bidirectional(cp) if rtl: # Bidi rule 2 if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx)) # Bidi rule 3 if direction in ['R', 'AL', 'EN', 'AN']: valid_ending = True elif direction != 'NSM': valid_ending = False # Bidi rule 4 if direction in ['AN', 'EN']: if not number_type: number_type = direction else: if number_type != direction: raise IDNABidiError('Can not mix numeral types in a right-to-left label') else: # Bidi rule 5 if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']: raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx)) # Bidi rule 6 if direction in ['L', 'EN']: valid_ending = True elif direction != 'NSM': valid_ending = False if not valid_ending: raise IDNABidiError('Label ends with illegal codepoint directionality') return True def check_initial_combiner(label): if unicodedata.category(label[0])[0] == 'M': raise IDNAError('Label begins with an illegal combining character') return True def check_hyphen_ok(label): if label[2:4] == '--': raise IDNAError('Label has disallowed hyphens in 3rd and 4th position') if label[0] == '-' or label[-1] == '-': raise IDNAError('Label must not start or end with a hyphen') return True def check_nfc(label): if unicodedata.normalize('NFC', label) != label: raise IDNAError('Label must be in Normalization Form C') def valid_contextj(label, pos): cp_value = ord(label[pos]) if cp_value == 0x200c: if pos > 0: if _combining_class(ord(label[pos - 1])) == _virama_combining_class: return True ok = False for i in range(pos-1, -1, -1): joining_type = idnadata.joining_types.get(ord(label[i])) if joining_type == ord('T'): continue if joining_type in [ord('L'), ord('D')]: ok = True break if not ok: return False ok = False for i in range(pos+1, len(label)): joining_type = idnadata.joining_types.get(ord(label[i])) if joining_type == ord('T'): continue if joining_type in [ord('R'), ord('D')]: ok = True break return ok if cp_value == 0x200d: if pos > 0: if _combining_class(ord(label[pos - 1])) == _virama_combining_class: return True return False else: return False def valid_contexto(label, pos, exception=False): cp_value = ord(label[pos]) if cp_value == 0x00b7: if 0 < pos < len(label)-1: if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c: return True return False elif cp_value == 0x0375: if pos < len(label)-1 and len(label) > 1: return _is_script(label[pos + 1], 'Greek') return False elif cp_value == 0x05f3 or cp_value == 0x05f4: if pos > 0: return _is_script(label[pos - 1], 'Hebrew') return False elif cp_value == 0x30fb: for cp in label: if cp == u'\u30fb': continue if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'): return True return False elif 0x660 <= cp_value <= 0x669: for cp in label: if 0x6f0 <= ord(cp) <= 0x06f9: return False return True elif 0x6f0 <= cp_value <= 0x6f9: for cp in label: if 0x660 <= ord(cp) <= 0x0669: return False return True def check_label(label): if isinstance(label, (bytes, bytearray)): label = label.decode('utf-8') if len(label) == 0: raise IDNAError('Empty Label') check_nfc(label) check_hyphen_ok(label) check_initial_combiner(label) for (pos, cp) in enumerate(label): cp_value = ord(cp) if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']): continue elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']): try: if not valid_contextj(label, pos): raise InvalidCodepointContext('Joiner {0} not allowed at position {1} in {2}'.format( _unot(cp_value), pos+1, repr(label))) except ValueError: raise IDNAError('Unknown codepoint adjacent to joiner {0} at position {1} in {2}'.format( _unot(cp_value), pos+1, repr(label))) elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']): if not valid_contexto(label, pos): raise InvalidCodepointContext('Codepoint {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label))) else: raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label))) check_bidi(label) def alabel(label): try: label = label.encode('ascii') ulabel(label) if not valid_label_length(label): raise IDNAError('Label too long') return label except UnicodeEncodeError: pass if not label: raise IDNAError('No Input') label = unicode(label) check_label(label) label = _punycode(label) label = _alabel_prefix + label if not valid_label_length(label): raise IDNAError('Label too long') return label def ulabel(label): if not isinstance(label, (bytes, bytearray)): try: label = label.encode('ascii') except UnicodeEncodeError: check_label(label) return label label = label.lower() if label.startswith(_alabel_prefix): label = label[len(_alabel_prefix):] if not label: raise IDNAError('Malformed A-label, no Punycode eligible content found') if label.decode('ascii')[-1] == '-': raise IDNAError('A-label must not end with a hyphen') else: check_label(label) return label.decode('ascii') label = label.decode('punycode') check_label(label) return label def uts46_remap(domain, std3_rules=True, transitional=False): """Re-map the characters in the string according to UTS46 processing.""" from .uts46data import uts46data output = u"" try: for pos, char in enumerate(domain): code_point = ord(char) uts46row = uts46data[code_point if code_point < 256 else bisect.bisect_left(uts46data, (code_point, "Z")) - 1] status = uts46row[1] replacement = uts46row[2] if len(uts46row) == 3 else None if (status == "V" or (status == "D" and not transitional) or (status == "3" and not std3_rules and replacement is None)): output += char elif replacement is not None and (status == "M" or (status == "3" and not std3_rules) or (status == "D" and transitional)): output += replacement elif status != "I": raise IndexError() return unicodedata.normalize("NFC", output) except IndexError: raise InvalidCodepoint( "Codepoint {0} not allowed at position {1} in {2}".format( _unot(code_point), pos + 1, repr(domain))) def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False): if isinstance(s, (bytes, bytearray)): s = s.decode("ascii") if uts46: s = uts46_remap(s, std3_rules, transitional) trailing_dot = False result = [] if strict: labels = s.split('.') else: labels = _unicode_dots_re.split(s) if not labels or labels == ['']: raise IDNAError('Empty domain') if labels[-1] == '': del labels[-1] trailing_dot = True for label in labels: s = alabel(label) if s: result.append(s) else: raise IDNAError('Empty label') if trailing_dot: result.append(b'') s = b'.'.join(result) if not valid_string_length(s, trailing_dot): raise IDNAError('Domain too long') return s def decode(s, strict=False, uts46=False, std3_rules=False): if isinstance(s, (bytes, bytearray)): s = s.decode("ascii") if uts46: s = uts46_remap(s, std3_rules, False) trailing_dot = False result = [] if not strict: labels = _unicode_dots_re.split(s) else: labels = s.split(u'.') if not labels or labels == ['']: raise IDNAError('Empty domain') if not labels[-1]: del labels[-1] trailing_dot = True for label in labels: s = ulabel(label) if s: result.append(s) else: raise IDNAError('Empty label') if trailing_dot: result.append(u'') return u'.'.join(result)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/idna/codec.py
from .core import encode, decode, alabel, ulabel, IDNAError import codecs import re _unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]') class Codec(codecs.Codec): def encode(self, data, errors='strict'): if errors != 'strict': raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) if not data: return "", 0 return encode(data), len(data) def decode(self, data, errors='strict'): if errors != 'strict': raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) if not data: return u"", 0 return decode(data), len(data) class IncrementalEncoder(codecs.BufferedIncrementalEncoder): def _buffer_encode(self, data, errors, final): if errors != 'strict': raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) if not data: return ("", 0) labels = _unicode_dots_re.split(data) trailing_dot = u'' if labels: if not labels[-1]: trailing_dot = '.' del labels[-1] elif not final: # Keep potentially unfinished label until the next call del labels[-1] if labels: trailing_dot = '.' result = [] size = 0 for label in labels: result.append(alabel(label)) if size: size += 1 size += len(label) # Join with U+002E result = ".".join(result) + trailing_dot size += len(trailing_dot) return (result, size) class IncrementalDecoder(codecs.BufferedIncrementalDecoder): def _buffer_decode(self, data, errors, final): if errors != 'strict': raise IDNAError("Unsupported error handling \"{0}\"".format(errors)) if not data: return (u"", 0) # IDNA allows decoding to operate on Unicode strings, too. if isinstance(data, unicode): labels = _unicode_dots_re.split(data) else: # Must be ASCII string data = str(data) unicode(data, "ascii") labels = data.split(".") trailing_dot = u'' if labels: if not labels[-1]: trailing_dot = u'.' del labels[-1] elif not final: # Keep potentially unfinished label until the next call del labels[-1] if labels: trailing_dot = u'.' result = [] size = 0 for label in labels: result.append(ulabel(label)) if size: size += 1 size += len(label) result = u".".join(result) + trailing_dot size += len(trailing_dot) return (result, size) class StreamWriter(Codec, codecs.StreamWriter): pass class StreamReader(Codec, codecs.StreamReader): pass def getregentry(): return codecs.CodecInfo( name='idna', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, )
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/idna/uts46data.py
# This file is automatically generated by tools/idna-data # vim: set fileencoding=utf-8 : """IDNA Mapping Table from UTS46.""" __version__ = "13.0.0" def _seg_0(): return [ (0x0, '3'), (0x1, '3'), (0x2, '3'), (0x3, '3'), (0x4, '3'), (0x5, '3'), (0x6, '3'), (0x7, '3'), (0x8, '3'), (0x9, '3'), (0xA, '3'), (0xB, '3'), (0xC, '3'), (0xD, '3'), (0xE, '3'), (0xF, '3'), (0x10, '3'), (0x11, '3'), (0x12, '3'), (0x13, '3'), (0x14, '3'), (0x15, '3'), (0x16, '3'), (0x17, '3'), (0x18, '3'), (0x19, '3'), (0x1A, '3'), (0x1B, '3'), (0x1C, '3'), (0x1D, '3'), (0x1E, '3'), (0x1F, '3'), (0x20, '3'), (0x21, '3'), (0x22, '3'), (0x23, '3'), (0x24, '3'), (0x25, '3'), (0x26, '3'), (0x27, '3'), (0x28, '3'), (0x29, '3'), (0x2A, '3'), (0x2B, '3'), (0x2C, '3'), (0x2D, 'V'), (0x2E, 'V'), (0x2F, '3'), (0x30, 'V'), (0x31, 'V'), (0x32, 'V'), (0x33, 'V'), (0x34, 'V'), (0x35, 'V'), (0x36, 'V'), (0x37, 'V'), (0x38, 'V'), (0x39, 'V'), (0x3A, '3'), (0x3B, '3'), (0x3C, '3'), (0x3D, '3'), (0x3E, '3'), (0x3F, '3'), (0x40, '3'), (0x41, 'M', u'a'), (0x42, 'M', u'b'), (0x43, 'M', u'c'), (0x44, 'M', u'd'), (0x45, 'M', u'e'), (0x46, 'M', u'f'), (0x47, 'M', u'g'), (0x48, 'M', u'h'), (0x49, 'M', u'i'), (0x4A, 'M', u'j'), (0x4B, 'M', u'k'), (0x4C, 'M', u'l'), (0x4D, 'M', u'm'), (0x4E, 'M', u'n'), (0x4F, 'M', u'o'), (0x50, 'M', u'p'), (0x51, 'M', u'q'), (0x52, 'M', u'r'), (0x53, 'M', u's'), (0x54, 'M', u't'), (0x55, 'M', u'u'), (0x56, 'M', u'v'), (0x57, 'M', u'w'), (0x58, 'M', u'x'), (0x59, 'M', u'y'), (0x5A, 'M', u'z'), (0x5B, '3'), (0x5C, '3'), (0x5D, '3'), (0x5E, '3'), (0x5F, '3'), (0x60, '3'), (0x61, 'V'), (0x62, 'V'), (0x63, 'V'), ] def _seg_1(): return [ (0x64, 'V'), (0x65, 'V'), (0x66, 'V'), (0x67, 'V'), (0x68, 'V'), (0x69, 'V'), (0x6A, 'V'), (0x6B, 'V'), (0x6C, 'V'), (0x6D, 'V'), (0x6E, 'V'), (0x6F, 'V'), (0x70, 'V'), (0x71, 'V'), (0x72, 'V'), (0x73, 'V'), (0x74, 'V'), (0x75, 'V'), (0x76, 'V'), (0x77, 'V'), (0x78, 'V'), (0x79, 'V'), (0x7A, 'V'), (0x7B, '3'), (0x7C, '3'), (0x7D, '3'), (0x7E, '3'), (0x7F, '3'), (0x80, 'X'), (0x81, 'X'), (0x82, 'X'), (0x83, 'X'), (0x84, 'X'), (0x85, 'X'), (0x86, 'X'), (0x87, 'X'), (0x88, 'X'), (0x89, 'X'), (0x8A, 'X'), (0x8B, 'X'), (0x8C, 'X'), (0x8D, 'X'), (0x8E, 'X'), (0x8F, 'X'), (0x90, 'X'), (0x91, 'X'), (0x92, 'X'), (0x93, 'X'), (0x94, 'X'), (0x95, 'X'), (0x96, 'X'), (0x97, 'X'), (0x98, 'X'), (0x99, 'X'), (0x9A, 'X'), (0x9B, 'X'), (0x9C, 'X'), (0x9D, 'X'), (0x9E, 'X'), (0x9F, 'X'), (0xA0, '3', u' '), (0xA1, 'V'), (0xA2, 'V'), (0xA3, 'V'), (0xA4, 'V'), (0xA5, 'V'), (0xA6, 'V'), (0xA7, 'V'), (0xA8, '3', u' ̈'), (0xA9, 'V'), (0xAA, 'M', u'a'), (0xAB, 'V'), (0xAC, 'V'), (0xAD, 'I'), (0xAE, 'V'), (0xAF, '3', u' ̄'), (0xB0, 'V'), (0xB1, 'V'), (0xB2, 'M', u'2'), (0xB3, 'M', u'3'), (0xB4, '3', u' ́'), (0xB5, 'M', u'μ'), (0xB6, 'V'), (0xB7, 'V'), (0xB8, '3', u' ̧'), (0xB9, 'M', u'1'), (0xBA, 'M', u'o'), (0xBB, 'V'), (0xBC, 'M', u'1⁄4'), (0xBD, 'M', u'1⁄2'), (0xBE, 'M', u'3⁄4'), (0xBF, 'V'), (0xC0, 'M', u'à'), (0xC1, 'M', u'á'), (0xC2, 'M', u'â'), (0xC3, 'M', u'ã'), (0xC4, 'M', u'ä'), (0xC5, 'M', u'å'), (0xC6, 'M', u'æ'), (0xC7, 'M', u'ç'), ] def _seg_2(): return [ (0xC8, 'M', u'è'), (0xC9, 'M', u'é'), (0xCA, 'M', u'ê'), (0xCB, 'M', u'ë'), (0xCC, 'M', u'ì'), (0xCD, 'M', u'í'), (0xCE, 'M', u'î'), (0xCF, 'M', u'ï'), (0xD0, 'M', u'ð'), (0xD1, 'M', u'ñ'), (0xD2, 'M', u'ò'), (0xD3, 'M', u'ó'), (0xD4, 'M', u'ô'), (0xD5, 'M', u'õ'), (0xD6, 'M', u'ö'), (0xD7, 'V'), (0xD8, 'M', u'ø'), (0xD9, 'M', u'ù'), (0xDA, 'M', u'ú'), (0xDB, 'M', u'û'), (0xDC, 'M', u'ü'), (0xDD, 'M', u'ý'), (0xDE, 'M', u'þ'), (0xDF, 'D', u'ss'), (0xE0, 'V'), (0xE1, 'V'), (0xE2, 'V'), (0xE3, 'V'), (0xE4, 'V'), (0xE5, 'V'), (0xE6, 'V'), (0xE7, 'V'), (0xE8, 'V'), (0xE9, 'V'), (0xEA, 'V'), (0xEB, 'V'), (0xEC, 'V'), (0xED, 'V'), (0xEE, 'V'), (0xEF, 'V'), (0xF0, 'V'), (0xF1, 'V'), (0xF2, 'V'), (0xF3, 'V'), (0xF4, 'V'), (0xF5, 'V'), (0xF6, 'V'), (0xF7, 'V'), (0xF8, 'V'), (0xF9, 'V'), (0xFA, 'V'), (0xFB, 'V'), (0xFC, 'V'), (0xFD, 'V'), (0xFE, 'V'), (0xFF, 'V'), (0x100, 'M', u'ā'), (0x101, 'V'), (0x102, 'M', u'ă'), (0x103, 'V'), (0x104, 'M', u'ą'), (0x105, 'V'), (0x106, 'M', u'ć'), (0x107, 'V'), (0x108, 'M', u'ĉ'), (0x109, 'V'), (0x10A, 'M', u'ċ'), (0x10B, 'V'), (0x10C, 'M', u'č'), (0x10D, 'V'), (0x10E, 'M', u'ď'), (0x10F, 'V'), (0x110, 'M', u'đ'), (0x111, 'V'), (0x112, 'M', u'ē'), (0x113, 'V'), (0x114, 'M', u'ĕ'), (0x115, 'V'), (0x116, 'M', u'ė'), (0x117, 'V'), (0x118, 'M', u'ę'), (0x119, 'V'), (0x11A, 'M', u'ě'), (0x11B, 'V'), (0x11C, 'M', u'ĝ'), (0x11D, 'V'), (0x11E, 'M', u'ğ'), (0x11F, 'V'), (0x120, 'M', u'ġ'), (0x121, 'V'), (0x122, 'M', u'ģ'), (0x123, 'V'), (0x124, 'M', u'ĥ'), (0x125, 'V'), (0x126, 'M', u'ħ'), (0x127, 'V'), (0x128, 'M', u'ĩ'), (0x129, 'V'), (0x12A, 'M', u'ī'), (0x12B, 'V'), ] def _seg_3(): return [ (0x12C, 'M', u'ĭ'), (0x12D, 'V'), (0x12E, 'M', u'į'), (0x12F, 'V'), (0x130, 'M', u'i̇'), (0x131, 'V'), (0x132, 'M', u'ij'), (0x134, 'M', u'ĵ'), (0x135, 'V'), (0x136, 'M', u'ķ'), (0x137, 'V'), (0x139, 'M', u'ĺ'), (0x13A, 'V'), (0x13B, 'M', u'ļ'), (0x13C, 'V'), (0x13D, 'M', u'ľ'), (0x13E, 'V'), (0x13F, 'M', u'l·'), (0x141, 'M', u'ł'), (0x142, 'V'), (0x143, 'M', u'ń'), (0x144, 'V'), (0x145, 'M', u'ņ'), (0x146, 'V'), (0x147, 'M', u'ň'), (0x148, 'V'), (0x149, 'M', u'ʼn'), (0x14A, 'M', u'ŋ'), (0x14B, 'V'), (0x14C, 'M', u'ō'), (0x14D, 'V'), (0x14E, 'M', u'ŏ'), (0x14F, 'V'), (0x150, 'M', u'ő'), (0x151, 'V'), (0x152, 'M', u'œ'), (0x153, 'V'), (0x154, 'M', u'ŕ'), (0x155, 'V'), (0x156, 'M', u'ŗ'), (0x157, 'V'), (0x158, 'M', u'ř'), (0x159, 'V'), (0x15A, 'M', u'ś'), (0x15B, 'V'), (0x15C, 'M', u'ŝ'), (0x15D, 'V'), (0x15E, 'M', u'ş'), (0x15F, 'V'), (0x160, 'M', u'š'), (0x161, 'V'), (0x162, 'M', u'ţ'), (0x163, 'V'), (0x164, 'M', u'ť'), (0x165, 'V'), (0x166, 'M', u'ŧ'), (0x167, 'V'), (0x168, 'M', u'ũ'), (0x169, 'V'), (0x16A, 'M', u'ū'), (0x16B, 'V'), (0x16C, 'M', u'ŭ'), (0x16D, 'V'), (0x16E, 'M', u'ů'), (0x16F, 'V'), (0x170, 'M', u'ű'), (0x171, 'V'), (0x172, 'M', u'ų'), (0x173, 'V'), (0x174, 'M', u'ŵ'), (0x175, 'V'), (0x176, 'M', u'ŷ'), (0x177, 'V'), (0x178, 'M', u'ÿ'), (0x179, 'M', u'ź'), (0x17A, 'V'), (0x17B, 'M', u'ż'), (0x17C, 'V'), (0x17D, 'M', u'ž'), (0x17E, 'V'), (0x17F, 'M', u's'), (0x180, 'V'), (0x181, 'M', u'ɓ'), (0x182, 'M', u'ƃ'), (0x183, 'V'), (0x184, 'M', u'ƅ'), (0x185, 'V'), (0x186, 'M', u'ɔ'), (0x187, 'M', u'ƈ'), (0x188, 'V'), (0x189, 'M', u'ɖ'), (0x18A, 'M', u'ɗ'), (0x18B, 'M', u'ƌ'), (0x18C, 'V'), (0x18E, 'M', u'ǝ'), (0x18F, 'M', u'ə'), (0x190, 'M', u'ɛ'), (0x191, 'M', u'ƒ'), (0x192, 'V'), (0x193, 'M', u'ɠ'), ] def _seg_4(): return [ (0x194, 'M', u'ɣ'), (0x195, 'V'), (0x196, 'M', u'ɩ'), (0x197, 'M', u'ɨ'), (0x198, 'M', u'ƙ'), (0x199, 'V'), (0x19C, 'M', u'ɯ'), (0x19D, 'M', u'ɲ'), (0x19E, 'V'), (0x19F, 'M', u'ɵ'), (0x1A0, 'M', u'ơ'), (0x1A1, 'V'), (0x1A2, 'M', u'ƣ'), (0x1A3, 'V'), (0x1A4, 'M', u'ƥ'), (0x1A5, 'V'), (0x1A6, 'M', u'ʀ'), (0x1A7, 'M', u'ƨ'), (0x1A8, 'V'), (0x1A9, 'M', u'ʃ'), (0x1AA, 'V'), (0x1AC, 'M', u'ƭ'), (0x1AD, 'V'), (0x1AE, 'M', u'ʈ'), (0x1AF, 'M', u'ư'), (0x1B0, 'V'), (0x1B1, 'M', u'ʊ'), (0x1B2, 'M', u'ʋ'), (0x1B3, 'M', u'ƴ'), (0x1B4, 'V'), (0x1B5, 'M', u'ƶ'), (0x1B6, 'V'), (0x1B7, 'M', u'ʒ'), (0x1B8, 'M', u'ƹ'), (0x1B9, 'V'), (0x1BC, 'M', u'ƽ'), (0x1BD, 'V'), (0x1C4, 'M', u'dž'), (0x1C7, 'M', u'lj'), (0x1CA, 'M', u'nj'), (0x1CD, 'M', u'ǎ'), (0x1CE, 'V'), (0x1CF, 'M', u'ǐ'), (0x1D0, 'V'), (0x1D1, 'M', u'ǒ'), (0x1D2, 'V'), (0x1D3, 'M', u'ǔ'), (0x1D4, 'V'), (0x1D5, 'M', u'ǖ'), (0x1D6, 'V'), (0x1D7, 'M', u'ǘ'), (0x1D8, 'V'), (0x1D9, 'M', u'ǚ'), (0x1DA, 'V'), (0x1DB, 'M', u'ǜ'), (0x1DC, 'V'), (0x1DE, 'M', u'ǟ'), (0x1DF, 'V'), (0x1E0, 'M', u'ǡ'), (0x1E1, 'V'), (0x1E2, 'M', u'ǣ'), (0x1E3, 'V'), (0x1E4, 'M', u'ǥ'), (0x1E5, 'V'), (0x1E6, 'M', u'ǧ'), (0x1E7, 'V'), (0x1E8, 'M', u'ǩ'), (0x1E9, 'V'), (0x1EA, 'M', u'ǫ'), (0x1EB, 'V'), (0x1EC, 'M', u'ǭ'), (0x1ED, 'V'), (0x1EE, 'M', u'ǯ'), (0x1EF, 'V'), (0x1F1, 'M', u'dz'), (0x1F4, 'M', u'ǵ'), (0x1F5, 'V'), (0x1F6, 'M', u'ƕ'), (0x1F7, 'M', u'ƿ'), (0x1F8, 'M', u'ǹ'), (0x1F9, 'V'), (0x1FA, 'M', u'ǻ'), (0x1FB, 'V'), (0x1FC, 'M', u'ǽ'), (0x1FD, 'V'), (0x1FE, 'M', u'ǿ'), (0x1FF, 'V'), (0x200, 'M', u'ȁ'), (0x201, 'V'), (0x202, 'M', u'ȃ'), (0x203, 'V'), (0x204, 'M', u'ȅ'), (0x205, 'V'), (0x206, 'M', u'ȇ'), (0x207, 'V'), (0x208, 'M', u'ȉ'), (0x209, 'V'), (0x20A, 'M', u'ȋ'), (0x20B, 'V'), (0x20C, 'M', u'ȍ'), ] def _seg_5(): return [ (0x20D, 'V'), (0x20E, 'M', u'ȏ'), (0x20F, 'V'), (0x210, 'M', u'ȑ'), (0x211, 'V'), (0x212, 'M', u'ȓ'), (0x213, 'V'), (0x214, 'M', u'ȕ'), (0x215, 'V'), (0x216, 'M', u'ȗ'), (0x217, 'V'), (0x218, 'M', u'ș'), (0x219, 'V'), (0x21A, 'M', u'ț'), (0x21B, 'V'), (0x21C, 'M', u'ȝ'), (0x21D, 'V'), (0x21E, 'M', u'ȟ'), (0x21F, 'V'), (0x220, 'M', u'ƞ'), (0x221, 'V'), (0x222, 'M', u'ȣ'), (0x223, 'V'), (0x224, 'M', u'ȥ'), (0x225, 'V'), (0x226, 'M', u'ȧ'), (0x227, 'V'), (0x228, 'M', u'ȩ'), (0x229, 'V'), (0x22A, 'M', u'ȫ'), (0x22B, 'V'), (0x22C, 'M', u'ȭ'), (0x22D, 'V'), (0x22E, 'M', u'ȯ'), (0x22F, 'V'), (0x230, 'M', u'ȱ'), (0x231, 'V'), (0x232, 'M', u'ȳ'), (0x233, 'V'), (0x23A, 'M', u'ⱥ'), (0x23B, 'M', u'ȼ'), (0x23C, 'V'), (0x23D, 'M', u'ƚ'), (0x23E, 'M', u'ⱦ'), (0x23F, 'V'), (0x241, 'M', u'ɂ'), (0x242, 'V'), (0x243, 'M', u'ƀ'), (0x244, 'M', u'ʉ'), (0x245, 'M', u'ʌ'), (0x246, 'M', u'ɇ'), (0x247, 'V'), (0x248, 'M', u'ɉ'), (0x249, 'V'), (0x24A, 'M', u'ɋ'), (0x24B, 'V'), (0x24C, 'M', u'ɍ'), (0x24D, 'V'), (0x24E, 'M', u'ɏ'), (0x24F, 'V'), (0x2B0, 'M', u'h'), (0x2B1, 'M', u'ɦ'), (0x2B2, 'M', u'j'), (0x2B3, 'M', u'r'), (0x2B4, 'M', u'ɹ'), (0x2B5, 'M', u'ɻ'), (0x2B6, 'M', u'ʁ'), (0x2B7, 'M', u'w'), (0x2B8, 'M', u'y'), (0x2B9, 'V'), (0x2D8, '3', u' ̆'), (0x2D9, '3', u' ̇'), (0x2DA, '3', u' ̊'), (0x2DB, '3', u' ̨'), (0x2DC, '3', u' ̃'), (0x2DD, '3', u' ̋'), (0x2DE, 'V'), (0x2E0, 'M', u'ɣ'), (0x2E1, 'M', u'l'), (0x2E2, 'M', u's'), (0x2E3, 'M', u'x'), (0x2E4, 'M', u'ʕ'), (0x2E5, 'V'), (0x340, 'M', u'̀'), (0x341, 'M', u'́'), (0x342, 'V'), (0x343, 'M', u'̓'), (0x344, 'M', u'̈́'), (0x345, 'M', u'ι'), (0x346, 'V'), (0x34F, 'I'), (0x350, 'V'), (0x370, 'M', u'ͱ'), (0x371, 'V'), (0x372, 'M', u'ͳ'), (0x373, 'V'), (0x374, 'M', u'ʹ'), (0x375, 'V'), (0x376, 'M', u'ͷ'), (0x377, 'V'), ] def _seg_6(): return [ (0x378, 'X'), (0x37A, '3', u' ι'), (0x37B, 'V'), (0x37E, '3', u';'), (0x37F, 'M', u'ϳ'), (0x380, 'X'), (0x384, '3', u' ́'), (0x385, '3', u' ̈́'), (0x386, 'M', u'ά'), (0x387, 'M', u'·'), (0x388, 'M', u'έ'), (0x389, 'M', u'ή'), (0x38A, 'M', u'ί'), (0x38B, 'X'), (0x38C, 'M', u'ό'), (0x38D, 'X'), (0x38E, 'M', u'ύ'), (0x38F, 'M', u'ώ'), (0x390, 'V'), (0x391, 'M', u'α'), (0x392, 'M', u'β'), (0x393, 'M', u'γ'), (0x394, 'M', u'δ'), (0x395, 'M', u'ε'), (0x396, 'M', u'ζ'), (0x397, 'M', u'η'), (0x398, 'M', u'θ'), (0x399, 'M', u'ι'), (0x39A, 'M', u'κ'), (0x39B, 'M', u'λ'), (0x39C, 'M', u'μ'), (0x39D, 'M', u'ν'), (0x39E, 'M', u'ξ'), (0x39F, 'M', u'ο'), (0x3A0, 'M', u'π'), (0x3A1, 'M', u'ρ'), (0x3A2, 'X'), (0x3A3, 'M', u'σ'), (0x3A4, 'M', u'τ'), (0x3A5, 'M', u'υ'), (0x3A6, 'M', u'φ'), (0x3A7, 'M', u'χ'), (0x3A8, 'M', u'ψ'), (0x3A9, 'M', u'ω'), (0x3AA, 'M', u'ϊ'), (0x3AB, 'M', u'ϋ'), (0x3AC, 'V'), (0x3C2, 'D', u'σ'), (0x3C3, 'V'), (0x3CF, 'M', u'ϗ'), (0x3D0, 'M', u'β'), (0x3D1, 'M', u'θ'), (0x3D2, 'M', u'υ'), (0x3D3, 'M', u'ύ'), (0x3D4, 'M', u'ϋ'), (0x3D5, 'M', u'φ'), (0x3D6, 'M', u'π'), (0x3D7, 'V'), (0x3D8, 'M', u'ϙ'), (0x3D9, 'V'), (0x3DA, 'M', u'ϛ'), (0x3DB, 'V'), (0x3DC, 'M', u'ϝ'), (0x3DD, 'V'), (0x3DE, 'M', u'ϟ'), (0x3DF, 'V'), (0x3E0, 'M', u'ϡ'), (0x3E1, 'V'), (0x3E2, 'M', u'ϣ'), (0x3E3, 'V'), (0x3E4, 'M', u'ϥ'), (0x3E5, 'V'), (0x3E6, 'M', u'ϧ'), (0x3E7, 'V'), (0x3E8, 'M', u'ϩ'), (0x3E9, 'V'), (0x3EA, 'M', u'ϫ'), (0x3EB, 'V'), (0x3EC, 'M', u'ϭ'), (0x3ED, 'V'), (0x3EE, 'M', u'ϯ'), (0x3EF, 'V'), (0x3F0, 'M', u'κ'), (0x3F1, 'M', u'ρ'), (0x3F2, 'M', u'σ'), (0x3F3, 'V'), (0x3F4, 'M', u'θ'), (0x3F5, 'M', u'ε'), (0x3F6, 'V'), (0x3F7, 'M', u'ϸ'), (0x3F8, 'V'), (0x3F9, 'M', u'σ'), (0x3FA, 'M', u'ϻ'), (0x3FB, 'V'), (0x3FD, 'M', u'ͻ'), (0x3FE, 'M', u'ͼ'), (0x3FF, 'M', u'ͽ'), (0x400, 'M', u'ѐ'), (0x401, 'M', u'ё'), (0x402, 'M', u'ђ'), ] def _seg_7(): return [ (0x403, 'M', u'ѓ'), (0x404, 'M', u'є'), (0x405, 'M', u'ѕ'), (0x406, 'M', u'і'), (0x407, 'M', u'ї'), (0x408, 'M', u'ј'), (0x409, 'M', u'љ'), (0x40A, 'M', u'њ'), (0x40B, 'M', u'ћ'), (0x40C, 'M', u'ќ'), (0x40D, 'M', u'ѝ'), (0x40E, 'M', u'ў'), (0x40F, 'M', u'џ'), (0x410, 'M', u'а'), (0x411, 'M', u'б'), (0x412, 'M', u'в'), (0x413, 'M', u'г'), (0x414, 'M', u'д'), (0x415, 'M', u'е'), (0x416, 'M', u'ж'), (0x417, 'M', u'з'), (0x418, 'M', u'и'), (0x419, 'M', u'й'), (0x41A, 'M', u'к'), (0x41B, 'M', u'л'), (0x41C, 'M', u'м'), (0x41D, 'M', u'н'), (0x41E, 'M', u'о'), (0x41F, 'M', u'п'), (0x420, 'M', u'р'), (0x421, 'M', u'с'), (0x422, 'M', u'т'), (0x423, 'M', u'у'), (0x424, 'M', u'ф'), (0x425, 'M', u'х'), (0x426, 'M', u'ц'), (0x427, 'M', u'ч'), (0x428, 'M', u'ш'), (0x429, 'M', u'щ'), (0x42A, 'M', u'ъ'), (0x42B, 'M', u'ы'), (0x42C, 'M', u'ь'), (0x42D, 'M', u'э'), (0x42E, 'M', u'ю'), (0x42F, 'M', u'я'), (0x430, 'V'), (0x460, 'M', u'ѡ'), (0x461, 'V'), (0x462, 'M', u'ѣ'), (0x463, 'V'), (0x464, 'M', u'ѥ'), (0x465, 'V'), (0x466, 'M', u'ѧ'), (0x467, 'V'), (0x468, 'M', u'ѩ'), (0x469, 'V'), (0x46A, 'M', u'ѫ'), (0x46B, 'V'), (0x46C, 'M', u'ѭ'), (0x46D, 'V'), (0x46E, 'M', u'ѯ'), (0x46F, 'V'), (0x470, 'M', u'ѱ'), (0x471, 'V'), (0x472, 'M', u'ѳ'), (0x473, 'V'), (0x474, 'M', u'ѵ'), (0x475, 'V'), (0x476, 'M', u'ѷ'), (0x477, 'V'), (0x478, 'M', u'ѹ'), (0x479, 'V'), (0x47A, 'M', u'ѻ'), (0x47B, 'V'), (0x47C, 'M', u'ѽ'), (0x47D, 'V'), (0x47E, 'M', u'ѿ'), (0x47F, 'V'), (0x480, 'M', u'ҁ'), (0x481, 'V'), (0x48A, 'M', u'ҋ'), (0x48B, 'V'), (0x48C, 'M', u'ҍ'), (0x48D, 'V'), (0x48E, 'M', u'ҏ'), (0x48F, 'V'), (0x490, 'M', u'ґ'), (0x491, 'V'), (0x492, 'M', u'ғ'), (0x493, 'V'), (0x494, 'M', u'ҕ'), (0x495, 'V'), (0x496, 'M', u'җ'), (0x497, 'V'), (0x498, 'M', u'ҙ'), (0x499, 'V'), (0x49A, 'M', u'қ'), (0x49B, 'V'), (0x49C, 'M', u'ҝ'), (0x49D, 'V'), ] def _seg_8(): return [ (0x49E, 'M', u'ҟ'), (0x49F, 'V'), (0x4A0, 'M', u'ҡ'), (0x4A1, 'V'), (0x4A2, 'M', u'ң'), (0x4A3, 'V'), (0x4A4, 'M', u'ҥ'), (0x4A5, 'V'), (0x4A6, 'M', u'ҧ'), (0x4A7, 'V'), (0x4A8, 'M', u'ҩ'), (0x4A9, 'V'), (0x4AA, 'M', u'ҫ'), (0x4AB, 'V'), (0x4AC, 'M', u'ҭ'), (0x4AD, 'V'), (0x4AE, 'M', u'ү'), (0x4AF, 'V'), (0x4B0, 'M', u'ұ'), (0x4B1, 'V'), (0x4B2, 'M', u'ҳ'), (0x4B3, 'V'), (0x4B4, 'M', u'ҵ'), (0x4B5, 'V'), (0x4B6, 'M', u'ҷ'), (0x4B7, 'V'), (0x4B8, 'M', u'ҹ'), (0x4B9, 'V'), (0x4BA, 'M', u'һ'), (0x4BB, 'V'), (0x4BC, 'M', u'ҽ'), (0x4BD, 'V'), (0x4BE, 'M', u'ҿ'), (0x4BF, 'V'), (0x4C0, 'X'), (0x4C1, 'M', u'ӂ'), (0x4C2, 'V'), (0x4C3, 'M', u'ӄ'), (0x4C4, 'V'), (0x4C5, 'M', u'ӆ'), (0x4C6, 'V'), (0x4C7, 'M', u'ӈ'), (0x4C8, 'V'), (0x4C9, 'M', u'ӊ'), (0x4CA, 'V'), (0x4CB, 'M', u'ӌ'), (0x4CC, 'V'), (0x4CD, 'M', u'ӎ'), (0x4CE, 'V'), (0x4D0, 'M', u'ӑ'), (0x4D1, 'V'), (0x4D2, 'M', u'ӓ'), (0x4D3, 'V'), (0x4D4, 'M', u'ӕ'), (0x4D5, 'V'), (0x4D6, 'M', u'ӗ'), (0x4D7, 'V'), (0x4D8, 'M', u'ә'), (0x4D9, 'V'), (0x4DA, 'M', u'ӛ'), (0x4DB, 'V'), (0x4DC, 'M', u'ӝ'), (0x4DD, 'V'), (0x4DE, 'M', u'ӟ'), (0x4DF, 'V'), (0x4E0, 'M', u'ӡ'), (0x4E1, 'V'), (0x4E2, 'M', u'ӣ'), (0x4E3, 'V'), (0x4E4, 'M', u'ӥ'), (0x4E5, 'V'), (0x4E6, 'M', u'ӧ'), (0x4E7, 'V'), (0x4E8, 'M', u'ө'), (0x4E9, 'V'), (0x4EA, 'M', u'ӫ'), (0x4EB, 'V'), (0x4EC, 'M', u'ӭ'), (0x4ED, 'V'), (0x4EE, 'M', u'ӯ'), (0x4EF, 'V'), (0x4F0, 'M', u'ӱ'), (0x4F1, 'V'), (0x4F2, 'M', u'ӳ'), (0x4F3, 'V'), (0x4F4, 'M', u'ӵ'), (0x4F5, 'V'), (0x4F6, 'M', u'ӷ'), (0x4F7, 'V'), (0x4F8, 'M', u'ӹ'), (0x4F9, 'V'), (0x4FA, 'M', u'ӻ'), (0x4FB, 'V'), (0x4FC, 'M', u'ӽ'), (0x4FD, 'V'), (0x4FE, 'M', u'ӿ'), (0x4FF, 'V'), (0x500, 'M', u'ԁ'), (0x501, 'V'), (0x502, 'M', u'ԃ'), ] def _seg_9(): return [ (0x503, 'V'), (0x504, 'M', u'ԅ'), (0x505, 'V'), (0x506, 'M', u'ԇ'), (0x507, 'V'), (0x508, 'M', u'ԉ'), (0x509, 'V'), (0x50A, 'M', u'ԋ'), (0x50B, 'V'), (0x50C, 'M', u'ԍ'), (0x50D, 'V'), (0x50E, 'M', u'ԏ'), (0x50F, 'V'), (0x510, 'M', u'ԑ'), (0x511, 'V'), (0x512, 'M', u'ԓ'), (0x513, 'V'), (0x514, 'M', u'ԕ'), (0x515, 'V'), (0x516, 'M', u'ԗ'), (0x517, 'V'), (0x518, 'M', u'ԙ'), (0x519, 'V'), (0x51A, 'M', u'ԛ'), (0x51B, 'V'), (0x51C, 'M', u'ԝ'), (0x51D, 'V'), (0x51E, 'M', u'ԟ'), (0x51F, 'V'), (0x520, 'M', u'ԡ'), (0x521, 'V'), (0x522, 'M', u'ԣ'), (0x523, 'V'), (0x524, 'M', u'ԥ'), (0x525, 'V'), (0x526, 'M', u'ԧ'), (0x527, 'V'), (0x528, 'M', u'ԩ'), (0x529, 'V'), (0x52A, 'M', u'ԫ'), (0x52B, 'V'), (0x52C, 'M', u'ԭ'), (0x52D, 'V'), (0x52E, 'M', u'ԯ'), (0x52F, 'V'), (0x530, 'X'), (0x531, 'M', u'ա'), (0x532, 'M', u'բ'), (0x533, 'M', u'գ'), (0x534, 'M', u'դ'), (0x535, 'M', u'ե'), (0x536, 'M', u'զ'), (0x537, 'M', u'է'), (0x538, 'M', u'ը'), (0x539, 'M', u'թ'), (0x53A, 'M', u'ժ'), (0x53B, 'M', u'ի'), (0x53C, 'M', u'լ'), (0x53D, 'M', u'խ'), (0x53E, 'M', u'ծ'), (0x53F, 'M', u'կ'), (0x540, 'M', u'հ'), (0x541, 'M', u'ձ'), (0x542, 'M', u'ղ'), (0x543, 'M', u'ճ'), (0x544, 'M', u'մ'), (0x545, 'M', u'յ'), (0x546, 'M', u'ն'), (0x547, 'M', u'շ'), (0x548, 'M', u'ո'), (0x549, 'M', u'չ'), (0x54A, 'M', u'պ'), (0x54B, 'M', u'ջ'), (0x54C, 'M', u'ռ'), (0x54D, 'M', u'ս'), (0x54E, 'M', u'վ'), (0x54F, 'M', u'տ'), (0x550, 'M', u'ր'), (0x551, 'M', u'ց'), (0x552, 'M', u'ւ'), (0x553, 'M', u'փ'), (0x554, 'M', u'ք'), (0x555, 'M', u'օ'), (0x556, 'M', u'ֆ'), (0x557, 'X'), (0x559, 'V'), (0x587, 'M', u'եւ'), (0x588, 'V'), (0x58B, 'X'), (0x58D, 'V'), (0x590, 'X'), (0x591, 'V'), (0x5C8, 'X'), (0x5D0, 'V'), (0x5EB, 'X'), (0x5EF, 'V'), (0x5F5, 'X'), (0x606, 'V'), (0x61C, 'X'), (0x61E, 'V'), ] def _seg_10(): return [ (0x675, 'M', u'اٴ'), (0x676, 'M', u'وٴ'), (0x677, 'M', u'ۇٴ'), (0x678, 'M', u'يٴ'), (0x679, 'V'), (0x6DD, 'X'), (0x6DE, 'V'), (0x70E, 'X'), (0x710, 'V'), (0x74B, 'X'), (0x74D, 'V'), (0x7B2, 'X'), (0x7C0, 'V'), (0x7FB, 'X'), (0x7FD, 'V'), (0x82E, 'X'), (0x830, 'V'), (0x83F, 'X'), (0x840, 'V'), (0x85C, 'X'), (0x85E, 'V'), (0x85F, 'X'), (0x860, 'V'), (0x86B, 'X'), (0x8A0, 'V'), (0x8B5, 'X'), (0x8B6, 'V'), (0x8C8, 'X'), (0x8D3, 'V'), (0x8E2, 'X'), (0x8E3, 'V'), (0x958, 'M', u'क़'), (0x959, 'M', u'ख़'), (0x95A, 'M', u'ग़'), (0x95B, 'M', u'ज़'), (0x95C, 'M', u'ड़'), (0x95D, 'M', u'ढ़'), (0x95E, 'M', u'फ़'), (0x95F, 'M', u'य़'), (0x960, 'V'), (0x984, 'X'), (0x985, 'V'), (0x98D, 'X'), (0x98F, 'V'), (0x991, 'X'), (0x993, 'V'), (0x9A9, 'X'), (0x9AA, 'V'), (0x9B1, 'X'), (0x9B2, 'V'), (0x9B3, 'X'), (0x9B6, 'V'), (0x9BA, 'X'), (0x9BC, 'V'), (0x9C5, 'X'), (0x9C7, 'V'), (0x9C9, 'X'), (0x9CB, 'V'), (0x9CF, 'X'), (0x9D7, 'V'), (0x9D8, 'X'), (0x9DC, 'M', u'ড়'), (0x9DD, 'M', u'ঢ়'), (0x9DE, 'X'), (0x9DF, 'M', u'য়'), (0x9E0, 'V'), (0x9E4, 'X'), (0x9E6, 'V'), (0x9FF, 'X'), (0xA01, 'V'), (0xA04, 'X'), (0xA05, 'V'), (0xA0B, 'X'), (0xA0F, 'V'), (0xA11, 'X'), (0xA13, 'V'), (0xA29, 'X'), (0xA2A, 'V'), (0xA31, 'X'), (0xA32, 'V'), (0xA33, 'M', u'ਲ਼'), (0xA34, 'X'), (0xA35, 'V'), (0xA36, 'M', u'ਸ਼'), (0xA37, 'X'), (0xA38, 'V'), (0xA3A, 'X'), (0xA3C, 'V'), (0xA3D, 'X'), (0xA3E, 'V'), (0xA43, 'X'), (0xA47, 'V'), (0xA49, 'X'), (0xA4B, 'V'), (0xA4E, 'X'), (0xA51, 'V'), (0xA52, 'X'), (0xA59, 'M', u'ਖ਼'), (0xA5A, 'M', u'ਗ਼'), (0xA5B, 'M', u'ਜ਼'), ] def _seg_11(): return [ (0xA5C, 'V'), (0xA5D, 'X'), (0xA5E, 'M', u'ਫ਼'), (0xA5F, 'X'), (0xA66, 'V'), (0xA77, 'X'), (0xA81, 'V'), (0xA84, 'X'), (0xA85, 'V'), (0xA8E, 'X'), (0xA8F, 'V'), (0xA92, 'X'), (0xA93, 'V'), (0xAA9, 'X'), (0xAAA, 'V'), (0xAB1, 'X'), (0xAB2, 'V'), (0xAB4, 'X'), (0xAB5, 'V'), (0xABA, 'X'), (0xABC, 'V'), (0xAC6, 'X'), (0xAC7, 'V'), (0xACA, 'X'), (0xACB, 'V'), (0xACE, 'X'), (0xAD0, 'V'), (0xAD1, 'X'), (0xAE0, 'V'), (0xAE4, 'X'), (0xAE6, 'V'), (0xAF2, 'X'), (0xAF9, 'V'), (0xB00, 'X'), (0xB01, 'V'), (0xB04, 'X'), (0xB05, 'V'), (0xB0D, 'X'), (0xB0F, 'V'), (0xB11, 'X'), (0xB13, 'V'), (0xB29, 'X'), (0xB2A, 'V'), (0xB31, 'X'), (0xB32, 'V'), (0xB34, 'X'), (0xB35, 'V'), (0xB3A, 'X'), (0xB3C, 'V'), (0xB45, 'X'), (0xB47, 'V'), (0xB49, 'X'), (0xB4B, 'V'), (0xB4E, 'X'), (0xB55, 'V'), (0xB58, 'X'), (0xB5C, 'M', u'ଡ଼'), (0xB5D, 'M', u'ଢ଼'), (0xB5E, 'X'), (0xB5F, 'V'), (0xB64, 'X'), (0xB66, 'V'), (0xB78, 'X'), (0xB82, 'V'), (0xB84, 'X'), (0xB85, 'V'), (0xB8B, 'X'), (0xB8E, 'V'), (0xB91, 'X'), (0xB92, 'V'), (0xB96, 'X'), (0xB99, 'V'), (0xB9B, 'X'), (0xB9C, 'V'), (0xB9D, 'X'), (0xB9E, 'V'), (0xBA0, 'X'), (0xBA3, 'V'), (0xBA5, 'X'), (0xBA8, 'V'), (0xBAB, 'X'), (0xBAE, 'V'), (0xBBA, 'X'), (0xBBE, 'V'), (0xBC3, 'X'), (0xBC6, 'V'), (0xBC9, 'X'), (0xBCA, 'V'), (0xBCE, 'X'), (0xBD0, 'V'), (0xBD1, 'X'), (0xBD7, 'V'), (0xBD8, 'X'), (0xBE6, 'V'), (0xBFB, 'X'), (0xC00, 'V'), (0xC0D, 'X'), (0xC0E, 'V'), (0xC11, 'X'), (0xC12, 'V'), ] def _seg_12(): return [ (0xC29, 'X'), (0xC2A, 'V'), (0xC3A, 'X'), (0xC3D, 'V'), (0xC45, 'X'), (0xC46, 'V'), (0xC49, 'X'), (0xC4A, 'V'), (0xC4E, 'X'), (0xC55, 'V'), (0xC57, 'X'), (0xC58, 'V'), (0xC5B, 'X'), (0xC60, 'V'), (0xC64, 'X'), (0xC66, 'V'), (0xC70, 'X'), (0xC77, 'V'), (0xC8D, 'X'), (0xC8E, 'V'), (0xC91, 'X'), (0xC92, 'V'), (0xCA9, 'X'), (0xCAA, 'V'), (0xCB4, 'X'), (0xCB5, 'V'), (0xCBA, 'X'), (0xCBC, 'V'), (0xCC5, 'X'), (0xCC6, 'V'), (0xCC9, 'X'), (0xCCA, 'V'), (0xCCE, 'X'), (0xCD5, 'V'), (0xCD7, 'X'), (0xCDE, 'V'), (0xCDF, 'X'), (0xCE0, 'V'), (0xCE4, 'X'), (0xCE6, 'V'), (0xCF0, 'X'), (0xCF1, 'V'), (0xCF3, 'X'), (0xD00, 'V'), (0xD0D, 'X'), (0xD0E, 'V'), (0xD11, 'X'), (0xD12, 'V'), (0xD45, 'X'), (0xD46, 'V'), (0xD49, 'X'), (0xD4A, 'V'), (0xD50, 'X'), (0xD54, 'V'), (0xD64, 'X'), (0xD66, 'V'), (0xD80, 'X'), (0xD81, 'V'), (0xD84, 'X'), (0xD85, 'V'), (0xD97, 'X'), (0xD9A, 'V'), (0xDB2, 'X'), (0xDB3, 'V'), (0xDBC, 'X'), (0xDBD, 'V'), (0xDBE, 'X'), (0xDC0, 'V'), (0xDC7, 'X'), (0xDCA, 'V'), (0xDCB, 'X'), (0xDCF, 'V'), (0xDD5, 'X'), (0xDD6, 'V'), (0xDD7, 'X'), (0xDD8, 'V'), (0xDE0, 'X'), (0xDE6, 'V'), (0xDF0, 'X'), (0xDF2, 'V'), (0xDF5, 'X'), (0xE01, 'V'), (0xE33, 'M', u'ํา'), (0xE34, 'V'), (0xE3B, 'X'), (0xE3F, 'V'), (0xE5C, 'X'), (0xE81, 'V'), (0xE83, 'X'), (0xE84, 'V'), (0xE85, 'X'), (0xE86, 'V'), (0xE8B, 'X'), (0xE8C, 'V'), (0xEA4, 'X'), (0xEA5, 'V'), (0xEA6, 'X'), (0xEA7, 'V'), (0xEB3, 'M', u'ໍາ'), (0xEB4, 'V'), ] def _seg_13(): return [ (0xEBE, 'X'), (0xEC0, 'V'), (0xEC5, 'X'), (0xEC6, 'V'), (0xEC7, 'X'), (0xEC8, 'V'), (0xECE, 'X'), (0xED0, 'V'), (0xEDA, 'X'), (0xEDC, 'M', u'ຫນ'), (0xEDD, 'M', u'ຫມ'), (0xEDE, 'V'), (0xEE0, 'X'), (0xF00, 'V'), (0xF0C, 'M', u'་'), (0xF0D, 'V'), (0xF43, 'M', u'གྷ'), (0xF44, 'V'), (0xF48, 'X'), (0xF49, 'V'), (0xF4D, 'M', u'ཌྷ'), (0xF4E, 'V'), (0xF52, 'M', u'དྷ'), (0xF53, 'V'), (0xF57, 'M', u'བྷ'), (0xF58, 'V'), (0xF5C, 'M', u'ཛྷ'), (0xF5D, 'V'), (0xF69, 'M', u'ཀྵ'), (0xF6A, 'V'), (0xF6D, 'X'), (0xF71, 'V'), (0xF73, 'M', u'ཱི'), (0xF74, 'V'), (0xF75, 'M', u'ཱུ'), (0xF76, 'M', u'ྲྀ'), (0xF77, 'M', u'ྲཱྀ'), (0xF78, 'M', u'ླྀ'), (0xF79, 'M', u'ླཱྀ'), (0xF7A, 'V'), (0xF81, 'M', u'ཱྀ'), (0xF82, 'V'), (0xF93, 'M', u'ྒྷ'), (0xF94, 'V'), (0xF98, 'X'), (0xF99, 'V'), (0xF9D, 'M', u'ྜྷ'), (0xF9E, 'V'), (0xFA2, 'M', u'ྡྷ'), (0xFA3, 'V'), (0xFA7, 'M', u'ྦྷ'), (0xFA8, 'V'), (0xFAC, 'M', u'ྫྷ'), (0xFAD, 'V'), (0xFB9, 'M', u'ྐྵ'), (0xFBA, 'V'), (0xFBD, 'X'), (0xFBE, 'V'), (0xFCD, 'X'), (0xFCE, 'V'), (0xFDB, 'X'), (0x1000, 'V'), (0x10A0, 'X'), (0x10C7, 'M', u'ⴧ'), (0x10C8, 'X'), (0x10CD, 'M', u'ⴭ'), (0x10CE, 'X'), (0x10D0, 'V'), (0x10FC, 'M', u'ნ'), (0x10FD, 'V'), (0x115F, 'X'), (0x1161, 'V'), (0x1249, 'X'), (0x124A, 'V'), (0x124E, 'X'), (0x1250, 'V'), (0x1257, 'X'), (0x1258, 'V'), (0x1259, 'X'), (0x125A, 'V'), (0x125E, 'X'), (0x1260, 'V'), (0x1289, 'X'), (0x128A, 'V'), (0x128E, 'X'), (0x1290, 'V'), (0x12B1, 'X'), (0x12B2, 'V'), (0x12B6, 'X'), (0x12B8, 'V'), (0x12BF, 'X'), (0x12C0, 'V'), (0x12C1, 'X'), (0x12C2, 'V'), (0x12C6, 'X'), (0x12C8, 'V'), (0x12D7, 'X'), (0x12D8, 'V'), (0x1311, 'X'), (0x1312, 'V'), ] def _seg_14(): return [ (0x1316, 'X'), (0x1318, 'V'), (0x135B, 'X'), (0x135D, 'V'), (0x137D, 'X'), (0x1380, 'V'), (0x139A, 'X'), (0x13A0, 'V'), (0x13F6, 'X'), (0x13F8, 'M', u'Ᏸ'), (0x13F9, 'M', u'Ᏹ'), (0x13FA, 'M', u'Ᏺ'), (0x13FB, 'M', u'Ᏻ'), (0x13FC, 'M', u'Ᏼ'), (0x13FD, 'M', u'Ᏽ'), (0x13FE, 'X'), (0x1400, 'V'), (0x1680, 'X'), (0x1681, 'V'), (0x169D, 'X'), (0x16A0, 'V'), (0x16F9, 'X'), (0x1700, 'V'), (0x170D, 'X'), (0x170E, 'V'), (0x1715, 'X'), (0x1720, 'V'), (0x1737, 'X'), (0x1740, 'V'), (0x1754, 'X'), (0x1760, 'V'), (0x176D, 'X'), (0x176E, 'V'), (0x1771, 'X'), (0x1772, 'V'), (0x1774, 'X'), (0x1780, 'V'), (0x17B4, 'X'), (0x17B6, 'V'), (0x17DE, 'X'), (0x17E0, 'V'), (0x17EA, 'X'), (0x17F0, 'V'), (0x17FA, 'X'), (0x1800, 'V'), (0x1806, 'X'), (0x1807, 'V'), (0x180B, 'I'), (0x180E, 'X'), (0x1810, 'V'), (0x181A, 'X'), (0x1820, 'V'), (0x1879, 'X'), (0x1880, 'V'), (0x18AB, 'X'), (0x18B0, 'V'), (0x18F6, 'X'), (0x1900, 'V'), (0x191F, 'X'), (0x1920, 'V'), (0x192C, 'X'), (0x1930, 'V'), (0x193C, 'X'), (0x1940, 'V'), (0x1941, 'X'), (0x1944, 'V'), (0x196E, 'X'), (0x1970, 'V'), (0x1975, 'X'), (0x1980, 'V'), (0x19AC, 'X'), (0x19B0, 'V'), (0x19CA, 'X'), (0x19D0, 'V'), (0x19DB, 'X'), (0x19DE, 'V'), (0x1A1C, 'X'), (0x1A1E, 'V'), (0x1A5F, 'X'), (0x1A60, 'V'), (0x1A7D, 'X'), (0x1A7F, 'V'), (0x1A8A, 'X'), (0x1A90, 'V'), (0x1A9A, 'X'), (0x1AA0, 'V'), (0x1AAE, 'X'), (0x1AB0, 'V'), (0x1AC1, 'X'), (0x1B00, 'V'), (0x1B4C, 'X'), (0x1B50, 'V'), (0x1B7D, 'X'), (0x1B80, 'V'), (0x1BF4, 'X'), (0x1BFC, 'V'), (0x1C38, 'X'), (0x1C3B, 'V'), (0x1C4A, 'X'), (0x1C4D, 'V'), ] def _seg_15(): return [ (0x1C80, 'M', u'в'), (0x1C81, 'M', u'д'), (0x1C82, 'M', u'о'), (0x1C83, 'M', u'с'), (0x1C84, 'M', u'т'), (0x1C86, 'M', u'ъ'), (0x1C87, 'M', u'ѣ'), (0x1C88, 'M', u'ꙋ'), (0x1C89, 'X'), (0x1C90, 'M', u'ა'), (0x1C91, 'M', u'ბ'), (0x1C92, 'M', u'გ'), (0x1C93, 'M', u'დ'), (0x1C94, 'M', u'ე'), (0x1C95, 'M', u'ვ'), (0x1C96, 'M', u'ზ'), (0x1C97, 'M', u'თ'), (0x1C98, 'M', u'ი'), (0x1C99, 'M', u'კ'), (0x1C9A, 'M', u'ლ'), (0x1C9B, 'M', u'მ'), (0x1C9C, 'M', u'ნ'), (0x1C9D, 'M', u'ო'), (0x1C9E, 'M', u'პ'), (0x1C9F, 'M', u'ჟ'), (0x1CA0, 'M', u'რ'), (0x1CA1, 'M', u'ს'), (0x1CA2, 'M', u'ტ'), (0x1CA3, 'M', u'უ'), (0x1CA4, 'M', u'ფ'), (0x1CA5, 'M', u'ქ'), (0x1CA6, 'M', u'ღ'), (0x1CA7, 'M', u'ყ'), (0x1CA8, 'M', u'შ'), (0x1CA9, 'M', u'ჩ'), (0x1CAA, 'M', u'ც'), (0x1CAB, 'M', u'ძ'), (0x1CAC, 'M', u'წ'), (0x1CAD, 'M', u'ჭ'), (0x1CAE, 'M', u'ხ'), (0x1CAF, 'M', u'ჯ'), (0x1CB0, 'M', u'ჰ'), (0x1CB1, 'M', u'ჱ'), (0x1CB2, 'M', u'ჲ'), (0x1CB3, 'M', u'ჳ'), (0x1CB4, 'M', u'ჴ'), (0x1CB5, 'M', u'ჵ'), (0x1CB6, 'M', u'ჶ'), (0x1CB7, 'M', u'ჷ'), (0x1CB8, 'M', u'ჸ'), (0x1CB9, 'M', u'ჹ'), (0x1CBA, 'M', u'ჺ'), (0x1CBB, 'X'), (0x1CBD, 'M', u'ჽ'), (0x1CBE, 'M', u'ჾ'), (0x1CBF, 'M', u'ჿ'), (0x1CC0, 'V'), (0x1CC8, 'X'), (0x1CD0, 'V'), (0x1CFB, 'X'), (0x1D00, 'V'), (0x1D2C, 'M', u'a'), (0x1D2D, 'M', u'æ'), (0x1D2E, 'M', u'b'), (0x1D2F, 'V'), (0x1D30, 'M', u'd'), (0x1D31, 'M', u'e'), (0x1D32, 'M', u'ǝ'), (0x1D33, 'M', u'g'), (0x1D34, 'M', u'h'), (0x1D35, 'M', u'i'), (0x1D36, 'M', u'j'), (0x1D37, 'M', u'k'), (0x1D38, 'M', u'l'), (0x1D39, 'M', u'm'), (0x1D3A, 'M', u'n'), (0x1D3B, 'V'), (0x1D3C, 'M', u'o'), (0x1D3D, 'M', u'ȣ'), (0x1D3E, 'M', u'p'), (0x1D3F, 'M', u'r'), (0x1D40, 'M', u't'), (0x1D41, 'M', u'u'), (0x1D42, 'M', u'w'), (0x1D43, 'M', u'a'), (0x1D44, 'M', u'ɐ'), (0x1D45, 'M', u'ɑ'), (0x1D46, 'M', u'ᴂ'), (0x1D47, 'M', u'b'), (0x1D48, 'M', u'd'), (0x1D49, 'M', u'e'), (0x1D4A, 'M', u'ə'), (0x1D4B, 'M', u'ɛ'), (0x1D4C, 'M', u'ɜ'), (0x1D4D, 'M', u'g'), (0x1D4E, 'V'), (0x1D4F, 'M', u'k'), (0x1D50, 'M', u'm'), (0x1D51, 'M', u'ŋ'), (0x1D52, 'M', u'o'), ] def _seg_16(): return [ (0x1D53, 'M', u'ɔ'), (0x1D54, 'M', u'ᴖ'), (0x1D55, 'M', u'ᴗ'), (0x1D56, 'M', u'p'), (0x1D57, 'M', u't'), (0x1D58, 'M', u'u'), (0x1D59, 'M', u'ᴝ'), (0x1D5A, 'M', u'ɯ'), (0x1D5B, 'M', u'v'), (0x1D5C, 'M', u'ᴥ'), (0x1D5D, 'M', u'β'), (0x1D5E, 'M', u'γ'), (0x1D5F, 'M', u'δ'), (0x1D60, 'M', u'φ'), (0x1D61, 'M', u'χ'), (0x1D62, 'M', u'i'), (0x1D63, 'M', u'r'), (0x1D64, 'M', u'u'), (0x1D65, 'M', u'v'), (0x1D66, 'M', u'β'), (0x1D67, 'M', u'γ'), (0x1D68, 'M', u'ρ'), (0x1D69, 'M', u'φ'), (0x1D6A, 'M', u'χ'), (0x1D6B, 'V'), (0x1D78, 'M', u'н'), (0x1D79, 'V'), (0x1D9B, 'M', u'ɒ'), (0x1D9C, 'M', u'c'), (0x1D9D, 'M', u'ɕ'), (0x1D9E, 'M', u'ð'), (0x1D9F, 'M', u'ɜ'), (0x1DA0, 'M', u'f'), (0x1DA1, 'M', u'ɟ'), (0x1DA2, 'M', u'ɡ'), (0x1DA3, 'M', u'ɥ'), (0x1DA4, 'M', u'ɨ'), (0x1DA5, 'M', u'ɩ'), (0x1DA6, 'M', u'ɪ'), (0x1DA7, 'M', u'ᵻ'), (0x1DA8, 'M', u'ʝ'), (0x1DA9, 'M', u'ɭ'), (0x1DAA, 'M', u'ᶅ'), (0x1DAB, 'M', u'ʟ'), (0x1DAC, 'M', u'ɱ'), (0x1DAD, 'M', u'ɰ'), (0x1DAE, 'M', u'ɲ'), (0x1DAF, 'M', u'ɳ'), (0x1DB0, 'M', u'ɴ'), (0x1DB1, 'M', u'ɵ'), (0x1DB2, 'M', u'ɸ'), (0x1DB3, 'M', u'ʂ'), (0x1DB4, 'M', u'ʃ'), (0x1DB5, 'M', u'ƫ'), (0x1DB6, 'M', u'ʉ'), (0x1DB7, 'M', u'ʊ'), (0x1DB8, 'M', u'ᴜ'), (0x1DB9, 'M', u'ʋ'), (0x1DBA, 'M', u'ʌ'), (0x1DBB, 'M', u'z'), (0x1DBC, 'M', u'ʐ'), (0x1DBD, 'M', u'ʑ'), (0x1DBE, 'M', u'ʒ'), (0x1DBF, 'M', u'θ'), (0x1DC0, 'V'), (0x1DFA, 'X'), (0x1DFB, 'V'), (0x1E00, 'M', u'ḁ'), (0x1E01, 'V'), (0x1E02, 'M', u'ḃ'), (0x1E03, 'V'), (0x1E04, 'M', u'ḅ'), (0x1E05, 'V'), (0x1E06, 'M', u'ḇ'), (0x1E07, 'V'), (0x1E08, 'M', u'ḉ'), (0x1E09, 'V'), (0x1E0A, 'M', u'ḋ'), (0x1E0B, 'V'), (0x1E0C, 'M', u'ḍ'), (0x1E0D, 'V'), (0x1E0E, 'M', u'ḏ'), (0x1E0F, 'V'), (0x1E10, 'M', u'ḑ'), (0x1E11, 'V'), (0x1E12, 'M', u'ḓ'), (0x1E13, 'V'), (0x1E14, 'M', u'ḕ'), (0x1E15, 'V'), (0x1E16, 'M', u'ḗ'), (0x1E17, 'V'), (0x1E18, 'M', u'ḙ'), (0x1E19, 'V'), (0x1E1A, 'M', u'ḛ'), (0x1E1B, 'V'), (0x1E1C, 'M', u'ḝ'), (0x1E1D, 'V'), (0x1E1E, 'M', u'ḟ'), (0x1E1F, 'V'), (0x1E20, 'M', u'ḡ'), ] def _seg_17(): return [ (0x1E21, 'V'), (0x1E22, 'M', u'ḣ'), (0x1E23, 'V'), (0x1E24, 'M', u'ḥ'), (0x1E25, 'V'), (0x1E26, 'M', u'ḧ'), (0x1E27, 'V'), (0x1E28, 'M', u'ḩ'), (0x1E29, 'V'), (0x1E2A, 'M', u'ḫ'), (0x1E2B, 'V'), (0x1E2C, 'M', u'ḭ'), (0x1E2D, 'V'), (0x1E2E, 'M', u'ḯ'), (0x1E2F, 'V'), (0x1E30, 'M', u'ḱ'), (0x1E31, 'V'), (0x1E32, 'M', u'ḳ'), (0x1E33, 'V'), (0x1E34, 'M', u'ḵ'), (0x1E35, 'V'), (0x1E36, 'M', u'ḷ'), (0x1E37, 'V'), (0x1E38, 'M', u'ḹ'), (0x1E39, 'V'), (0x1E3A, 'M', u'ḻ'), (0x1E3B, 'V'), (0x1E3C, 'M', u'ḽ'), (0x1E3D, 'V'), (0x1E3E, 'M', u'ḿ'), (0x1E3F, 'V'), (0x1E40, 'M', u'ṁ'), (0x1E41, 'V'), (0x1E42, 'M', u'ṃ'), (0x1E43, 'V'), (0x1E44, 'M', u'ṅ'), (0x1E45, 'V'), (0x1E46, 'M', u'ṇ'), (0x1E47, 'V'), (0x1E48, 'M', u'ṉ'), (0x1E49, 'V'), (0x1E4A, 'M', u'ṋ'), (0x1E4B, 'V'), (0x1E4C, 'M', u'ṍ'), (0x1E4D, 'V'), (0x1E4E, 'M', u'ṏ'), (0x1E4F, 'V'), (0x1E50, 'M', u'ṑ'), (0x1E51, 'V'), (0x1E52, 'M', u'ṓ'), (0x1E53, 'V'), (0x1E54, 'M', u'ṕ'), (0x1E55, 'V'), (0x1E56, 'M', u'ṗ'), (0x1E57, 'V'), (0x1E58, 'M', u'ṙ'), (0x1E59, 'V'), (0x1E5A, 'M', u'ṛ'), (0x1E5B, 'V'), (0x1E5C, 'M', u'ṝ'), (0x1E5D, 'V'), (0x1E5E, 'M', u'ṟ'), (0x1E5F, 'V'), (0x1E60, 'M', u'ṡ'), (0x1E61, 'V'), (0x1E62, 'M', u'ṣ'), (0x1E63, 'V'), (0x1E64, 'M', u'ṥ'), (0x1E65, 'V'), (0x1E66, 'M', u'ṧ'), (0x1E67, 'V'), (0x1E68, 'M', u'ṩ'), (0x1E69, 'V'), (0x1E6A, 'M', u'ṫ'), (0x1E6B, 'V'), (0x1E6C, 'M', u'ṭ'), (0x1E6D, 'V'), (0x1E6E, 'M', u'ṯ'), (0x1E6F, 'V'), (0x1E70, 'M', u'ṱ'), (0x1E71, 'V'), (0x1E72, 'M', u'ṳ'), (0x1E73, 'V'), (0x1E74, 'M', u'ṵ'), (0x1E75, 'V'), (0x1E76, 'M', u'ṷ'), (0x1E77, 'V'), (0x1E78, 'M', u'ṹ'), (0x1E79, 'V'), (0x1E7A, 'M', u'ṻ'), (0x1E7B, 'V'), (0x1E7C, 'M', u'ṽ'), (0x1E7D, 'V'), (0x1E7E, 'M', u'ṿ'), (0x1E7F, 'V'), (0x1E80, 'M', u'ẁ'), (0x1E81, 'V'), (0x1E82, 'M', u'ẃ'), (0x1E83, 'V'), (0x1E84, 'M', u'ẅ'), ] def _seg_18(): return [ (0x1E85, 'V'), (0x1E86, 'M', u'ẇ'), (0x1E87, 'V'), (0x1E88, 'M', u'ẉ'), (0x1E89, 'V'), (0x1E8A, 'M', u'ẋ'), (0x1E8B, 'V'), (0x1E8C, 'M', u'ẍ'), (0x1E8D, 'V'), (0x1E8E, 'M', u'ẏ'), (0x1E8F, 'V'), (0x1E90, 'M', u'ẑ'), (0x1E91, 'V'), (0x1E92, 'M', u'ẓ'), (0x1E93, 'V'), (0x1E94, 'M', u'ẕ'), (0x1E95, 'V'), (0x1E9A, 'M', u'aʾ'), (0x1E9B, 'M', u'ṡ'), (0x1E9C, 'V'), (0x1E9E, 'M', u'ss'), (0x1E9F, 'V'), (0x1EA0, 'M', u'ạ'), (0x1EA1, 'V'), (0x1EA2, 'M', u'ả'), (0x1EA3, 'V'), (0x1EA4, 'M', u'ấ'), (0x1EA5, 'V'), (0x1EA6, 'M', u'ầ'), (0x1EA7, 'V'), (0x1EA8, 'M', u'ẩ'), (0x1EA9, 'V'), (0x1EAA, 'M', u'ẫ'), (0x1EAB, 'V'), (0x1EAC, 'M', u'ậ'), (0x1EAD, 'V'), (0x1EAE, 'M', u'ắ'), (0x1EAF, 'V'), (0x1EB0, 'M', u'ằ'), (0x1EB1, 'V'), (0x1EB2, 'M', u'ẳ'), (0x1EB3, 'V'), (0x1EB4, 'M', u'ẵ'), (0x1EB5, 'V'), (0x1EB6, 'M', u'ặ'), (0x1EB7, 'V'), (0x1EB8, 'M', u'ẹ'), (0x1EB9, 'V'), (0x1EBA, 'M', u'ẻ'), (0x1EBB, 'V'), (0x1EBC, 'M', u'ẽ'), (0x1EBD, 'V'), (0x1EBE, 'M', u'ế'), (0x1EBF, 'V'), (0x1EC0, 'M', u'ề'), (0x1EC1, 'V'), (0x1EC2, 'M', u'ể'), (0x1EC3, 'V'), (0x1EC4, 'M', u'ễ'), (0x1EC5, 'V'), (0x1EC6, 'M', u'ệ'), (0x1EC7, 'V'), (0x1EC8, 'M', u'ỉ'), (0x1EC9, 'V'), (0x1ECA, 'M', u'ị'), (0x1ECB, 'V'), (0x1ECC, 'M', u'ọ'), (0x1ECD, 'V'), (0x1ECE, 'M', u'ỏ'), (0x1ECF, 'V'), (0x1ED0, 'M', u'ố'), (0x1ED1, 'V'), (0x1ED2, 'M', u'ồ'), (0x1ED3, 'V'), (0x1ED4, 'M', u'ổ'), (0x1ED5, 'V'), (0x1ED6, 'M', u'ỗ'), (0x1ED7, 'V'), (0x1ED8, 'M', u'ộ'), (0x1ED9, 'V'), (0x1EDA, 'M', u'ớ'), (0x1EDB, 'V'), (0x1EDC, 'M', u'ờ'), (0x1EDD, 'V'), (0x1EDE, 'M', u'ở'), (0x1EDF, 'V'), (0x1EE0, 'M', u'ỡ'), (0x1EE1, 'V'), (0x1EE2, 'M', u'ợ'), (0x1EE3, 'V'), (0x1EE4, 'M', u'ụ'), (0x1EE5, 'V'), (0x1EE6, 'M', u'ủ'), (0x1EE7, 'V'), (0x1EE8, 'M', u'ứ'), (0x1EE9, 'V'), (0x1EEA, 'M', u'ừ'), (0x1EEB, 'V'), (0x1EEC, 'M', u'ử'), (0x1EED, 'V'), ] def _seg_19(): return [ (0x1EEE, 'M', u'ữ'), (0x1EEF, 'V'), (0x1EF0, 'M', u'ự'), (0x1EF1, 'V'), (0x1EF2, 'M', u'ỳ'), (0x1EF3, 'V'), (0x1EF4, 'M', u'ỵ'), (0x1EF5, 'V'), (0x1EF6, 'M', u'ỷ'), (0x1EF7, 'V'), (0x1EF8, 'M', u'ỹ'), (0x1EF9, 'V'), (0x1EFA, 'M', u'ỻ'), (0x1EFB, 'V'), (0x1EFC, 'M', u'ỽ'), (0x1EFD, 'V'), (0x1EFE, 'M', u'ỿ'), (0x1EFF, 'V'), (0x1F08, 'M', u'ἀ'), (0x1F09, 'M', u'ἁ'), (0x1F0A, 'M', u'ἂ'), (0x1F0B, 'M', u'ἃ'), (0x1F0C, 'M', u'ἄ'), (0x1F0D, 'M', u'ἅ'), (0x1F0E, 'M', u'ἆ'), (0x1F0F, 'M', u'ἇ'), (0x1F10, 'V'), (0x1F16, 'X'), (0x1F18, 'M', u'ἐ'), (0x1F19, 'M', u'ἑ'), (0x1F1A, 'M', u'ἒ'), (0x1F1B, 'M', u'ἓ'), (0x1F1C, 'M', u'ἔ'), (0x1F1D, 'M', u'ἕ'), (0x1F1E, 'X'), (0x1F20, 'V'), (0x1F28, 'M', u'ἠ'), (0x1F29, 'M', u'ἡ'), (0x1F2A, 'M', u'ἢ'), (0x1F2B, 'M', u'ἣ'), (0x1F2C, 'M', u'ἤ'), (0x1F2D, 'M', u'ἥ'), (0x1F2E, 'M', u'ἦ'), (0x1F2F, 'M', u'ἧ'), (0x1F30, 'V'), (0x1F38, 'M', u'ἰ'), (0x1F39, 'M', u'ἱ'), (0x1F3A, 'M', u'ἲ'), (0x1F3B, 'M', u'ἳ'), (0x1F3C, 'M', u'ἴ'), (0x1F3D, 'M', u'ἵ'), (0x1F3E, 'M', u'ἶ'), (0x1F3F, 'M', u'ἷ'), (0x1F40, 'V'), (0x1F46, 'X'), (0x1F48, 'M', u'ὀ'), (0x1F49, 'M', u'ὁ'), (0x1F4A, 'M', u'ὂ'), (0x1F4B, 'M', u'ὃ'), (0x1F4C, 'M', u'ὄ'), (0x1F4D, 'M', u'ὅ'), (0x1F4E, 'X'), (0x1F50, 'V'), (0x1F58, 'X'), (0x1F59, 'M', u'ὑ'), (0x1F5A, 'X'), (0x1F5B, 'M', u'ὓ'), (0x1F5C, 'X'), (0x1F5D, 'M', u'ὕ'), (0x1F5E, 'X'), (0x1F5F, 'M', u'ὗ'), (0x1F60, 'V'), (0x1F68, 'M', u'ὠ'), (0x1F69, 'M', u'ὡ'), (0x1F6A, 'M', u'ὢ'), (0x1F6B, 'M', u'ὣ'), (0x1F6C, 'M', u'ὤ'), (0x1F6D, 'M', u'ὥ'), (0x1F6E, 'M', u'ὦ'), (0x1F6F, 'M', u'ὧ'), (0x1F70, 'V'), (0x1F71, 'M', u'ά'), (0x1F72, 'V'), (0x1F73, 'M', u'έ'), (0x1F74, 'V'), (0x1F75, 'M', u'ή'), (0x1F76, 'V'), (0x1F77, 'M', u'ί'), (0x1F78, 'V'), (0x1F79, 'M', u'ό'), (0x1F7A, 'V'), (0x1F7B, 'M', u'ύ'), (0x1F7C, 'V'), (0x1F7D, 'M', u'ώ'), (0x1F7E, 'X'), (0x1F80, 'M', u'ἀι'), (0x1F81, 'M', u'ἁι'), (0x1F82, 'M', u'ἂι'), (0x1F83, 'M', u'ἃι'), (0x1F84, 'M', u'ἄι'), ] def _seg_20(): return [ (0x1F85, 'M', u'ἅι'), (0x1F86, 'M', u'ἆι'), (0x1F87, 'M', u'ἇι'), (0x1F88, 'M', u'ἀι'), (0x1F89, 'M', u'ἁι'), (0x1F8A, 'M', u'ἂι'), (0x1F8B, 'M', u'ἃι'), (0x1F8C, 'M', u'ἄι'), (0x1F8D, 'M', u'ἅι'), (0x1F8E, 'M', u'ἆι'), (0x1F8F, 'M', u'ἇι'), (0x1F90, 'M', u'ἠι'), (0x1F91, 'M', u'ἡι'), (0x1F92, 'M', u'ἢι'), (0x1F93, 'M', u'ἣι'), (0x1F94, 'M', u'ἤι'), (0x1F95, 'M', u'ἥι'), (0x1F96, 'M', u'ἦι'), (0x1F97, 'M', u'ἧι'), (0x1F98, 'M', u'ἠι'), (0x1F99, 'M', u'ἡι'), (0x1F9A, 'M', u'ἢι'), (0x1F9B, 'M', u'ἣι'), (0x1F9C, 'M', u'ἤι'), (0x1F9D, 'M', u'ἥι'), (0x1F9E, 'M', u'ἦι'), (0x1F9F, 'M', u'ἧι'), (0x1FA0, 'M', u'ὠι'), (0x1FA1, 'M', u'ὡι'), (0x1FA2, 'M', u'ὢι'), (0x1FA3, 'M', u'ὣι'), (0x1FA4, 'M', u'ὤι'), (0x1FA5, 'M', u'ὥι'), (0x1FA6, 'M', u'ὦι'), (0x1FA7, 'M', u'ὧι'), (0x1FA8, 'M', u'ὠι'), (0x1FA9, 'M', u'ὡι'), (0x1FAA, 'M', u'ὢι'), (0x1FAB, 'M', u'ὣι'), (0x1FAC, 'M', u'ὤι'), (0x1FAD, 'M', u'ὥι'), (0x1FAE, 'M', u'ὦι'), (0x1FAF, 'M', u'ὧι'), (0x1FB0, 'V'), (0x1FB2, 'M', u'ὰι'), (0x1FB3, 'M', u'αι'), (0x1FB4, 'M', u'άι'), (0x1FB5, 'X'), (0x1FB6, 'V'), (0x1FB7, 'M', u'ᾶι'), (0x1FB8, 'M', u'ᾰ'), (0x1FB9, 'M', u'ᾱ'), (0x1FBA, 'M', u'ὰ'), (0x1FBB, 'M', u'ά'), (0x1FBC, 'M', u'αι'), (0x1FBD, '3', u' ̓'), (0x1FBE, 'M', u'ι'), (0x1FBF, '3', u' ̓'), (0x1FC0, '3', u' ͂'), (0x1FC1, '3', u' ̈͂'), (0x1FC2, 'M', u'ὴι'), (0x1FC3, 'M', u'ηι'), (0x1FC4, 'M', u'ήι'), (0x1FC5, 'X'), (0x1FC6, 'V'), (0x1FC7, 'M', u'ῆι'), (0x1FC8, 'M', u'ὲ'), (0x1FC9, 'M', u'έ'), (0x1FCA, 'M', u'ὴ'), (0x1FCB, 'M', u'ή'), (0x1FCC, 'M', u'ηι'), (0x1FCD, '3', u' ̓̀'), (0x1FCE, '3', u' ̓́'), (0x1FCF, '3', u' ̓͂'), (0x1FD0, 'V'), (0x1FD3, 'M', u'ΐ'), (0x1FD4, 'X'), (0x1FD6, 'V'), (0x1FD8, 'M', u'ῐ'), (0x1FD9, 'M', u'ῑ'), (0x1FDA, 'M', u'ὶ'), (0x1FDB, 'M', u'ί'), (0x1FDC, 'X'), (0x1FDD, '3', u' ̔̀'), (0x1FDE, '3', u' ̔́'), (0x1FDF, '3', u' ̔͂'), (0x1FE0, 'V'), (0x1FE3, 'M', u'ΰ'), (0x1FE4, 'V'), (0x1FE8, 'M', u'ῠ'), (0x1FE9, 'M', u'ῡ'), (0x1FEA, 'M', u'ὺ'), (0x1FEB, 'M', u'ύ'), (0x1FEC, 'M', u'ῥ'), (0x1FED, '3', u' ̈̀'), (0x1FEE, '3', u' ̈́'), (0x1FEF, '3', u'`'), (0x1FF0, 'X'), (0x1FF2, 'M', u'ὼι'), (0x1FF3, 'M', u'ωι'), ] def _seg_21(): return [ (0x1FF4, 'M', u'ώι'), (0x1FF5, 'X'), (0x1FF6, 'V'), (0x1FF7, 'M', u'ῶι'), (0x1FF8, 'M', u'ὸ'), (0x1FF9, 'M', u'ό'), (0x1FFA, 'M', u'ὼ'), (0x1FFB, 'M', u'ώ'), (0x1FFC, 'M', u'ωι'), (0x1FFD, '3', u' ́'), (0x1FFE, '3', u' ̔'), (0x1FFF, 'X'), (0x2000, '3', u' '), (0x200B, 'I'), (0x200C, 'D', u''), (0x200E, 'X'), (0x2010, 'V'), (0x2011, 'M', u'‐'), (0x2012, 'V'), (0x2017, '3', u' ̳'), (0x2018, 'V'), (0x2024, 'X'), (0x2027, 'V'), (0x2028, 'X'), (0x202F, '3', u' '), (0x2030, 'V'), (0x2033, 'M', u'′′'), (0x2034, 'M', u'′′′'), (0x2035, 'V'), (0x2036, 'M', u'‵‵'), (0x2037, 'M', u'‵‵‵'), (0x2038, 'V'), (0x203C, '3', u'!!'), (0x203D, 'V'), (0x203E, '3', u' ̅'), (0x203F, 'V'), (0x2047, '3', u'??'), (0x2048, '3', u'?!'), (0x2049, '3', u'!?'), (0x204A, 'V'), (0x2057, 'M', u'′′′′'), (0x2058, 'V'), (0x205F, '3', u' '), (0x2060, 'I'), (0x2061, 'X'), (0x2064, 'I'), (0x2065, 'X'), (0x2070, 'M', u'0'), (0x2071, 'M', u'i'), (0x2072, 'X'), (0x2074, 'M', u'4'), (0x2075, 'M', u'5'), (0x2076, 'M', u'6'), (0x2077, 'M', u'7'), (0x2078, 'M', u'8'), (0x2079, 'M', u'9'), (0x207A, '3', u'+'), (0x207B, 'M', u'−'), (0x207C, '3', u'='), (0x207D, '3', u'('), (0x207E, '3', u')'), (0x207F, 'M', u'n'), (0x2080, 'M', u'0'), (0x2081, 'M', u'1'), (0x2082, 'M', u'2'), (0x2083, 'M', u'3'), (0x2084, 'M', u'4'), (0x2085, 'M', u'5'), (0x2086, 'M', u'6'), (0x2087, 'M', u'7'), (0x2088, 'M', u'8'), (0x2089, 'M', u'9'), (0x208A, '3', u'+'), (0x208B, 'M', u'−'), (0x208C, '3', u'='), (0x208D, '3', u'('), (0x208E, '3', u')'), (0x208F, 'X'), (0x2090, 'M', u'a'), (0x2091, 'M', u'e'), (0x2092, 'M', u'o'), (0x2093, 'M', u'x'), (0x2094, 'M', u'ə'), (0x2095, 'M', u'h'), (0x2096, 'M', u'k'), (0x2097, 'M', u'l'), (0x2098, 'M', u'm'), (0x2099, 'M', u'n'), (0x209A, 'M', u'p'), (0x209B, 'M', u's'), (0x209C, 'M', u't'), (0x209D, 'X'), (0x20A0, 'V'), (0x20A8, 'M', u'rs'), (0x20A9, 'V'), (0x20C0, 'X'), (0x20D0, 'V'), (0x20F1, 'X'), (0x2100, '3', u'a/c'), (0x2101, '3', u'a/s'), ] def _seg_22(): return [ (0x2102, 'M', u'c'), (0x2103, 'M', u'°c'), (0x2104, 'V'), (0x2105, '3', u'c/o'), (0x2106, '3', u'c/u'), (0x2107, 'M', u'ɛ'), (0x2108, 'V'), (0x2109, 'M', u'°f'), (0x210A, 'M', u'g'), (0x210B, 'M', u'h'), (0x210F, 'M', u'ħ'), (0x2110, 'M', u'i'), (0x2112, 'M', u'l'), (0x2114, 'V'), (0x2115, 'M', u'n'), (0x2116, 'M', u'no'), (0x2117, 'V'), (0x2119, 'M', u'p'), (0x211A, 'M', u'q'), (0x211B, 'M', u'r'), (0x211E, 'V'), (0x2120, 'M', u'sm'), (0x2121, 'M', u'tel'), (0x2122, 'M', u'tm'), (0x2123, 'V'), (0x2124, 'M', u'z'), (0x2125, 'V'), (0x2126, 'M', u'ω'), (0x2127, 'V'), (0x2128, 'M', u'z'), (0x2129, 'V'), (0x212A, 'M', u'k'), (0x212B, 'M', u'å'), (0x212C, 'M', u'b'), (0x212D, 'M', u'c'), (0x212E, 'V'), (0x212F, 'M', u'e'), (0x2131, 'M', u'f'), (0x2132, 'X'), (0x2133, 'M', u'm'), (0x2134, 'M', u'o'), (0x2135, 'M', u'א'), (0x2136, 'M', u'ב'), (0x2137, 'M', u'ג'), (0x2138, 'M', u'ד'), (0x2139, 'M', u'i'), (0x213A, 'V'), (0x213B, 'M', u'fax'), (0x213C, 'M', u'π'), (0x213D, 'M', u'γ'), (0x213F, 'M', u'π'), (0x2140, 'M', u'∑'), (0x2141, 'V'), (0x2145, 'M', u'd'), (0x2147, 'M', u'e'), (0x2148, 'M', u'i'), (0x2149, 'M', u'j'), (0x214A, 'V'), (0x2150, 'M', u'1⁄7'), (0x2151, 'M', u'1⁄9'), (0x2152, 'M', u'1⁄10'), (0x2153, 'M', u'1⁄3'), (0x2154, 'M', u'2⁄3'), (0x2155, 'M', u'1⁄5'), (0x2156, 'M', u'2⁄5'), (0x2157, 'M', u'3⁄5'), (0x2158, 'M', u'4⁄5'), (0x2159, 'M', u'1⁄6'), (0x215A, 'M', u'5⁄6'), (0x215B, 'M', u'1⁄8'), (0x215C, 'M', u'3⁄8'), (0x215D, 'M', u'5⁄8'), (0x215E, 'M', u'7⁄8'), (0x215F, 'M', u'1⁄'), (0x2160, 'M', u'i'), (0x2161, 'M', u'ii'), (0x2162, 'M', u'iii'), (0x2163, 'M', u'iv'), (0x2164, 'M', u'v'), (0x2165, 'M', u'vi'), (0x2166, 'M', u'vii'), (0x2167, 'M', u'viii'), (0x2168, 'M', u'ix'), (0x2169, 'M', u'x'), (0x216A, 'M', u'xi'), (0x216B, 'M', u'xii'), (0x216C, 'M', u'l'), (0x216D, 'M', u'c'), (0x216E, 'M', u'd'), (0x216F, 'M', u'm'), (0x2170, 'M', u'i'), (0x2171, 'M', u'ii'), (0x2172, 'M', u'iii'), (0x2173, 'M', u'iv'), (0x2174, 'M', u'v'), (0x2175, 'M', u'vi'), (0x2176, 'M', u'vii'), (0x2177, 'M', u'viii'), (0x2178, 'M', u'ix'), (0x2179, 'M', u'x'), ] def _seg_23(): return [ (0x217A, 'M', u'xi'), (0x217B, 'M', u'xii'), (0x217C, 'M', u'l'), (0x217D, 'M', u'c'), (0x217E, 'M', u'd'), (0x217F, 'M', u'm'), (0x2180, 'V'), (0x2183, 'X'), (0x2184, 'V'), (0x2189, 'M', u'0⁄3'), (0x218A, 'V'), (0x218C, 'X'), (0x2190, 'V'), (0x222C, 'M', u'∫∫'), (0x222D, 'M', u'∫∫∫'), (0x222E, 'V'), (0x222F, 'M', u'∮∮'), (0x2230, 'M', u'∮∮∮'), (0x2231, 'V'), (0x2260, '3'), (0x2261, 'V'), (0x226E, '3'), (0x2270, 'V'), (0x2329, 'M', u'〈'), (0x232A, 'M', u'〉'), (0x232B, 'V'), (0x2427, 'X'), (0x2440, 'V'), (0x244B, 'X'), (0x2460, 'M', u'1'), (0x2461, 'M', u'2'), (0x2462, 'M', u'3'), (0x2463, 'M', u'4'), (0x2464, 'M', u'5'), (0x2465, 'M', u'6'), (0x2466, 'M', u'7'), (0x2467, 'M', u'8'), (0x2468, 'M', u'9'), (0x2469, 'M', u'10'), (0x246A, 'M', u'11'), (0x246B, 'M', u'12'), (0x246C, 'M', u'13'), (0x246D, 'M', u'14'), (0x246E, 'M', u'15'), (0x246F, 'M', u'16'), (0x2470, 'M', u'17'), (0x2471, 'M', u'18'), (0x2472, 'M', u'19'), (0x2473, 'M', u'20'), (0x2474, '3', u'(1)'), (0x2475, '3', u'(2)'), (0x2476, '3', u'(3)'), (0x2477, '3', u'(4)'), (0x2478, '3', u'(5)'), (0x2479, '3', u'(6)'), (0x247A, '3', u'(7)'), (0x247B, '3', u'(8)'), (0x247C, '3', u'(9)'), (0x247D, '3', u'(10)'), (0x247E, '3', u'(11)'), (0x247F, '3', u'(12)'), (0x2480, '3', u'(13)'), (0x2481, '3', u'(14)'), (0x2482, '3', u'(15)'), (0x2483, '3', u'(16)'), (0x2484, '3', u'(17)'), (0x2485, '3', u'(18)'), (0x2486, '3', u'(19)'), (0x2487, '3', u'(20)'), (0x2488, 'X'), (0x249C, '3', u'(a)'), (0x249D, '3', u'(b)'), (0x249E, '3', u'(c)'), (0x249F, '3', u'(d)'), (0x24A0, '3', u'(e)'), (0x24A1, '3', u'(f)'), (0x24A2, '3', u'(g)'), (0x24A3, '3', u'(h)'), (0x24A4, '3', u'(i)'), (0x24A5, '3', u'(j)'), (0x24A6, '3', u'(k)'), (0x24A7, '3', u'(l)'), (0x24A8, '3', u'(m)'), (0x24A9, '3', u'(n)'), (0x24AA, '3', u'(o)'), (0x24AB, '3', u'(p)'), (0x24AC, '3', u'(q)'), (0x24AD, '3', u'(r)'), (0x24AE, '3', u'(s)'), (0x24AF, '3', u'(t)'), (0x24B0, '3', u'(u)'), (0x24B1, '3', u'(v)'), (0x24B2, '3', u'(w)'), (0x24B3, '3', u'(x)'), (0x24B4, '3', u'(y)'), (0x24B5, '3', u'(z)'), (0x24B6, 'M', u'a'), (0x24B7, 'M', u'b'), (0x24B8, 'M', u'c'), (0x24B9, 'M', u'd'), ] def _seg_24(): return [ (0x24BA, 'M', u'e'), (0x24BB, 'M', u'f'), (0x24BC, 'M', u'g'), (0x24BD, 'M', u'h'), (0x24BE, 'M', u'i'), (0x24BF, 'M', u'j'), (0x24C0, 'M', u'k'), (0x24C1, 'M', u'l'), (0x24C2, 'M', u'm'), (0x24C3, 'M', u'n'), (0x24C4, 'M', u'o'), (0x24C5, 'M', u'p'), (0x24C6, 'M', u'q'), (0x24C7, 'M', u'r'), (0x24C8, 'M', u's'), (0x24C9, 'M', u't'), (0x24CA, 'M', u'u'), (0x24CB, 'M', u'v'), (0x24CC, 'M', u'w'), (0x24CD, 'M', u'x'), (0x24CE, 'M', u'y'), (0x24CF, 'M', u'z'), (0x24D0, 'M', u'a'), (0x24D1, 'M', u'b'), (0x24D2, 'M', u'c'), (0x24D3, 'M', u'd'), (0x24D4, 'M', u'e'), (0x24D5, 'M', u'f'), (0x24D6, 'M', u'g'), (0x24D7, 'M', u'h'), (0x24D8, 'M', u'i'), (0x24D9, 'M', u'j'), (0x24DA, 'M', u'k'), (0x24DB, 'M', u'l'), (0x24DC, 'M', u'm'), (0x24DD, 'M', u'n'), (0x24DE, 'M', u'o'), (0x24DF, 'M', u'p'), (0x24E0, 'M', u'q'), (0x24E1, 'M', u'r'), (0x24E2, 'M', u's'), (0x24E3, 'M', u't'), (0x24E4, 'M', u'u'), (0x24E5, 'M', u'v'), (0x24E6, 'M', u'w'), (0x24E7, 'M', u'x'), (0x24E8, 'M', u'y'), (0x24E9, 'M', u'z'), (0x24EA, 'M', u'0'), (0x24EB, 'V'), (0x2A0C, 'M', u'∫∫∫∫'), (0x2A0D, 'V'), (0x2A74, '3', u'::='), (0x2A75, '3', u'=='), (0x2A76, '3', u'==='), (0x2A77, 'V'), (0x2ADC, 'M', u'⫝̸'), (0x2ADD, 'V'), (0x2B74, 'X'), (0x2B76, 'V'), (0x2B96, 'X'), (0x2B97, 'V'), (0x2C00, 'M', u'ⰰ'), (0x2C01, 'M', u'ⰱ'), (0x2C02, 'M', u'ⰲ'), (0x2C03, 'M', u'ⰳ'), (0x2C04, 'M', u'ⰴ'), (0x2C05, 'M', u'ⰵ'), (0x2C06, 'M', u'ⰶ'), (0x2C07, 'M', u'ⰷ'), (0x2C08, 'M', u'ⰸ'), (0x2C09, 'M', u'ⰹ'), (0x2C0A, 'M', u'ⰺ'), (0x2C0B, 'M', u'ⰻ'), (0x2C0C, 'M', u'ⰼ'), (0x2C0D, 'M', u'ⰽ'), (0x2C0E, 'M', u'ⰾ'), (0x2C0F, 'M', u'ⰿ'), (0x2C10, 'M', u'ⱀ'), (0x2C11, 'M', u'ⱁ'), (0x2C12, 'M', u'ⱂ'), (0x2C13, 'M', u'ⱃ'), (0x2C14, 'M', u'ⱄ'), (0x2C15, 'M', u'ⱅ'), (0x2C16, 'M', u'ⱆ'), (0x2C17, 'M', u'ⱇ'), (0x2C18, 'M', u'ⱈ'), (0x2C19, 'M', u'ⱉ'), (0x2C1A, 'M', u'ⱊ'), (0x2C1B, 'M', u'ⱋ'), (0x2C1C, 'M', u'ⱌ'), (0x2C1D, 'M', u'ⱍ'), (0x2C1E, 'M', u'ⱎ'), (0x2C1F, 'M', u'ⱏ'), (0x2C20, 'M', u'ⱐ'), (0x2C21, 'M', u'ⱑ'), (0x2C22, 'M', u'ⱒ'), (0x2C23, 'M', u'ⱓ'), (0x2C24, 'M', u'ⱔ'), (0x2C25, 'M', u'ⱕ'), ] def _seg_25(): return [ (0x2C26, 'M', u'ⱖ'), (0x2C27, 'M', u'ⱗ'), (0x2C28, 'M', u'ⱘ'), (0x2C29, 'M', u'ⱙ'), (0x2C2A, 'M', u'ⱚ'), (0x2C2B, 'M', u'ⱛ'), (0x2C2C, 'M', u'ⱜ'), (0x2C2D, 'M', u'ⱝ'), (0x2C2E, 'M', u'ⱞ'), (0x2C2F, 'X'), (0x2C30, 'V'), (0x2C5F, 'X'), (0x2C60, 'M', u'ⱡ'), (0x2C61, 'V'), (0x2C62, 'M', u'ɫ'), (0x2C63, 'M', u'ᵽ'), (0x2C64, 'M', u'ɽ'), (0x2C65, 'V'), (0x2C67, 'M', u'ⱨ'), (0x2C68, 'V'), (0x2C69, 'M', u'ⱪ'), (0x2C6A, 'V'), (0x2C6B, 'M', u'ⱬ'), (0x2C6C, 'V'), (0x2C6D, 'M', u'ɑ'), (0x2C6E, 'M', u'ɱ'), (0x2C6F, 'M', u'ɐ'), (0x2C70, 'M', u'ɒ'), (0x2C71, 'V'), (0x2C72, 'M', u'ⱳ'), (0x2C73, 'V'), (0x2C75, 'M', u'ⱶ'), (0x2C76, 'V'), (0x2C7C, 'M', u'j'), (0x2C7D, 'M', u'v'), (0x2C7E, 'M', u'ȿ'), (0x2C7F, 'M', u'ɀ'), (0x2C80, 'M', u'ⲁ'), (0x2C81, 'V'), (0x2C82, 'M', u'ⲃ'), (0x2C83, 'V'), (0x2C84, 'M', u'ⲅ'), (0x2C85, 'V'), (0x2C86, 'M', u'ⲇ'), (0x2C87, 'V'), (0x2C88, 'M', u'ⲉ'), (0x2C89, 'V'), (0x2C8A, 'M', u'ⲋ'), (0x2C8B, 'V'), (0x2C8C, 'M', u'ⲍ'), (0x2C8D, 'V'), (0x2C8E, 'M', u'ⲏ'), (0x2C8F, 'V'), (0x2C90, 'M', u'ⲑ'), (0x2C91, 'V'), (0x2C92, 'M', u'ⲓ'), (0x2C93, 'V'), (0x2C94, 'M', u'ⲕ'), (0x2C95, 'V'), (0x2C96, 'M', u'ⲗ'), (0x2C97, 'V'), (0x2C98, 'M', u'ⲙ'), (0x2C99, 'V'), (0x2C9A, 'M', u'ⲛ'), (0x2C9B, 'V'), (0x2C9C, 'M', u'ⲝ'), (0x2C9D, 'V'), (0x2C9E, 'M', u'ⲟ'), (0x2C9F, 'V'), (0x2CA0, 'M', u'ⲡ'), (0x2CA1, 'V'), (0x2CA2, 'M', u'ⲣ'), (0x2CA3, 'V'), (0x2CA4, 'M', u'ⲥ'), (0x2CA5, 'V'), (0x2CA6, 'M', u'ⲧ'), (0x2CA7, 'V'), (0x2CA8, 'M', u'ⲩ'), (0x2CA9, 'V'), (0x2CAA, 'M', u'ⲫ'), (0x2CAB, 'V'), (0x2CAC, 'M', u'ⲭ'), (0x2CAD, 'V'), (0x2CAE, 'M', u'ⲯ'), (0x2CAF, 'V'), (0x2CB0, 'M', u'ⲱ'), (0x2CB1, 'V'), (0x2CB2, 'M', u'ⲳ'), (0x2CB3, 'V'), (0x2CB4, 'M', u'ⲵ'), (0x2CB5, 'V'), (0x2CB6, 'M', u'ⲷ'), (0x2CB7, 'V'), (0x2CB8, 'M', u'ⲹ'), (0x2CB9, 'V'), (0x2CBA, 'M', u'ⲻ'), (0x2CBB, 'V'), (0x2CBC, 'M', u'ⲽ'), (0x2CBD, 'V'), (0x2CBE, 'M', u'ⲿ'), ] def _seg_26(): return [ (0x2CBF, 'V'), (0x2CC0, 'M', u'ⳁ'), (0x2CC1, 'V'), (0x2CC2, 'M', u'ⳃ'), (0x2CC3, 'V'), (0x2CC4, 'M', u'ⳅ'), (0x2CC5, 'V'), (0x2CC6, 'M', u'ⳇ'), (0x2CC7, 'V'), (0x2CC8, 'M', u'ⳉ'), (0x2CC9, 'V'), (0x2CCA, 'M', u'ⳋ'), (0x2CCB, 'V'), (0x2CCC, 'M', u'ⳍ'), (0x2CCD, 'V'), (0x2CCE, 'M', u'ⳏ'), (0x2CCF, 'V'), (0x2CD0, 'M', u'ⳑ'), (0x2CD1, 'V'), (0x2CD2, 'M', u'ⳓ'), (0x2CD3, 'V'), (0x2CD4, 'M', u'ⳕ'), (0x2CD5, 'V'), (0x2CD6, 'M', u'ⳗ'), (0x2CD7, 'V'), (0x2CD8, 'M', u'ⳙ'), (0x2CD9, 'V'), (0x2CDA, 'M', u'ⳛ'), (0x2CDB, 'V'), (0x2CDC, 'M', u'ⳝ'), (0x2CDD, 'V'), (0x2CDE, 'M', u'ⳟ'), (0x2CDF, 'V'), (0x2CE0, 'M', u'ⳡ'), (0x2CE1, 'V'), (0x2CE2, 'M', u'ⳣ'), (0x2CE3, 'V'), (0x2CEB, 'M', u'ⳬ'), (0x2CEC, 'V'), (0x2CED, 'M', u'ⳮ'), (0x2CEE, 'V'), (0x2CF2, 'M', u'ⳳ'), (0x2CF3, 'V'), (0x2CF4, 'X'), (0x2CF9, 'V'), (0x2D26, 'X'), (0x2D27, 'V'), (0x2D28, 'X'), (0x2D2D, 'V'), (0x2D2E, 'X'), (0x2D30, 'V'), (0x2D68, 'X'), (0x2D6F, 'M', u'ⵡ'), (0x2D70, 'V'), (0x2D71, 'X'), (0x2D7F, 'V'), (0x2D97, 'X'), (0x2DA0, 'V'), (0x2DA7, 'X'), (0x2DA8, 'V'), (0x2DAF, 'X'), (0x2DB0, 'V'), (0x2DB7, 'X'), (0x2DB8, 'V'), (0x2DBF, 'X'), (0x2DC0, 'V'), (0x2DC7, 'X'), (0x2DC8, 'V'), (0x2DCF, 'X'), (0x2DD0, 'V'), (0x2DD7, 'X'), (0x2DD8, 'V'), (0x2DDF, 'X'), (0x2DE0, 'V'), (0x2E53, 'X'), (0x2E80, 'V'), (0x2E9A, 'X'), (0x2E9B, 'V'), (0x2E9F, 'M', u'母'), (0x2EA0, 'V'), (0x2EF3, 'M', u'龟'), (0x2EF4, 'X'), (0x2F00, 'M', u'一'), (0x2F01, 'M', u'丨'), (0x2F02, 'M', u'丶'), (0x2F03, 'M', u'丿'), (0x2F04, 'M', u'乙'), (0x2F05, 'M', u'亅'), (0x2F06, 'M', u'二'), (0x2F07, 'M', u'亠'), (0x2F08, 'M', u'人'), (0x2F09, 'M', u'儿'), (0x2F0A, 'M', u'入'), (0x2F0B, 'M', u'八'), (0x2F0C, 'M', u'冂'), (0x2F0D, 'M', u'冖'), (0x2F0E, 'M', u'冫'), (0x2F0F, 'M', u'几'), (0x2F10, 'M', u'凵'), (0x2F11, 'M', u'刀'), ] def _seg_27(): return [ (0x2F12, 'M', u'力'), (0x2F13, 'M', u'勹'), (0x2F14, 'M', u'匕'), (0x2F15, 'M', u'匚'), (0x2F16, 'M', u'匸'), (0x2F17, 'M', u'十'), (0x2F18, 'M', u'卜'), (0x2F19, 'M', u'卩'), (0x2F1A, 'M', u'厂'), (0x2F1B, 'M', u'厶'), (0x2F1C, 'M', u'又'), (0x2F1D, 'M', u'口'), (0x2F1E, 'M', u'囗'), (0x2F1F, 'M', u'土'), (0x2F20, 'M', u'士'), (0x2F21, 'M', u'夂'), (0x2F22, 'M', u'夊'), (0x2F23, 'M', u'夕'), (0x2F24, 'M', u'大'), (0x2F25, 'M', u'女'), (0x2F26, 'M', u'子'), (0x2F27, 'M', u'宀'), (0x2F28, 'M', u'寸'), (0x2F29, 'M', u'小'), (0x2F2A, 'M', u'尢'), (0x2F2B, 'M', u'尸'), (0x2F2C, 'M', u'屮'), (0x2F2D, 'M', u'山'), (0x2F2E, 'M', u'巛'), (0x2F2F, 'M', u'工'), (0x2F30, 'M', u'己'), (0x2F31, 'M', u'巾'), (0x2F32, 'M', u'干'), (0x2F33, 'M', u'幺'), (0x2F34, 'M', u'广'), (0x2F35, 'M', u'廴'), (0x2F36, 'M', u'廾'), (0x2F37, 'M', u'弋'), (0x2F38, 'M', u'弓'), (0x2F39, 'M', u'彐'), (0x2F3A, 'M', u'彡'), (0x2F3B, 'M', u'彳'), (0x2F3C, 'M', u'心'), (0x2F3D, 'M', u'戈'), (0x2F3E, 'M', u'戶'), (0x2F3F, 'M', u'手'), (0x2F40, 'M', u'支'), (0x2F41, 'M', u'攴'), (0x2F42, 'M', u'文'), (0x2F43, 'M', u'斗'), (0x2F44, 'M', u'斤'), (0x2F45, 'M', u'方'), (0x2F46, 'M', u'无'), (0x2F47, 'M', u'日'), (0x2F48, 'M', u'曰'), (0x2F49, 'M', u'月'), (0x2F4A, 'M', u'木'), (0x2F4B, 'M', u'欠'), (0x2F4C, 'M', u'止'), (0x2F4D, 'M', u'歹'), (0x2F4E, 'M', u'殳'), (0x2F4F, 'M', u'毋'), (0x2F50, 'M', u'比'), (0x2F51, 'M', u'毛'), (0x2F52, 'M', u'氏'), (0x2F53, 'M', u'气'), (0x2F54, 'M', u'水'), (0x2F55, 'M', u'火'), (0x2F56, 'M', u'爪'), (0x2F57, 'M', u'父'), (0x2F58, 'M', u'爻'), (0x2F59, 'M', u'爿'), (0x2F5A, 'M', u'片'), (0x2F5B, 'M', u'牙'), (0x2F5C, 'M', u'牛'), (0x2F5D, 'M', u'犬'), (0x2F5E, 'M', u'玄'), (0x2F5F, 'M', u'玉'), (0x2F60, 'M', u'瓜'), (0x2F61, 'M', u'瓦'), (0x2F62, 'M', u'甘'), (0x2F63, 'M', u'生'), (0x2F64, 'M', u'用'), (0x2F65, 'M', u'田'), (0x2F66, 'M', u'疋'), (0x2F67, 'M', u'疒'), (0x2F68, 'M', u'癶'), (0x2F69, 'M', u'白'), (0x2F6A, 'M', u'皮'), (0x2F6B, 'M', u'皿'), (0x2F6C, 'M', u'目'), (0x2F6D, 'M', u'矛'), (0x2F6E, 'M', u'矢'), (0x2F6F, 'M', u'石'), (0x2F70, 'M', u'示'), (0x2F71, 'M', u'禸'), (0x2F72, 'M', u'禾'), (0x2F73, 'M', u'穴'), (0x2F74, 'M', u'立'), (0x2F75, 'M', u'竹'), ] def _seg_28(): return [ (0x2F76, 'M', u'米'), (0x2F77, 'M', u'糸'), (0x2F78, 'M', u'缶'), (0x2F79, 'M', u'网'), (0x2F7A, 'M', u'羊'), (0x2F7B, 'M', u'羽'), (0x2F7C, 'M', u'老'), (0x2F7D, 'M', u'而'), (0x2F7E, 'M', u'耒'), (0x2F7F, 'M', u'耳'), (0x2F80, 'M', u'聿'), (0x2F81, 'M', u'肉'), (0x2F82, 'M', u'臣'), (0x2F83, 'M', u'自'), (0x2F84, 'M', u'至'), (0x2F85, 'M', u'臼'), (0x2F86, 'M', u'舌'), (0x2F87, 'M', u'舛'), (0x2F88, 'M', u'舟'), (0x2F89, 'M', u'艮'), (0x2F8A, 'M', u'色'), (0x2F8B, 'M', u'艸'), (0x2F8C, 'M', u'虍'), (0x2F8D, 'M', u'虫'), (0x2F8E, 'M', u'血'), (0x2F8F, 'M', u'行'), (0x2F90, 'M', u'衣'), (0x2F91, 'M', u'襾'), (0x2F92, 'M', u'見'), (0x2F93, 'M', u'角'), (0x2F94, 'M', u'言'), (0x2F95, 'M', u'谷'), (0x2F96, 'M', u'豆'), (0x2F97, 'M', u'豕'), (0x2F98, 'M', u'豸'), (0x2F99, 'M', u'貝'), (0x2F9A, 'M', u'赤'), (0x2F9B, 'M', u'走'), (0x2F9C, 'M', u'足'), (0x2F9D, 'M', u'身'), (0x2F9E, 'M', u'車'), (0x2F9F, 'M', u'辛'), (0x2FA0, 'M', u'辰'), (0x2FA1, 'M', u'辵'), (0x2FA2, 'M', u'邑'), (0x2FA3, 'M', u'酉'), (0x2FA4, 'M', u'釆'), (0x2FA5, 'M', u'里'), (0x2FA6, 'M', u'金'), (0x2FA7, 'M', u'長'), (0x2FA8, 'M', u'門'), (0x2FA9, 'M', u'阜'), (0x2FAA, 'M', u'隶'), (0x2FAB, 'M', u'隹'), (0x2FAC, 'M', u'雨'), (0x2FAD, 'M', u'靑'), (0x2FAE, 'M', u'非'), (0x2FAF, 'M', u'面'), (0x2FB0, 'M', u'革'), (0x2FB1, 'M', u'韋'), (0x2FB2, 'M', u'韭'), (0x2FB3, 'M', u'音'), (0x2FB4, 'M', u'頁'), (0x2FB5, 'M', u'風'), (0x2FB6, 'M', u'飛'), (0x2FB7, 'M', u'食'), (0x2FB8, 'M', u'首'), (0x2FB9, 'M', u'香'), (0x2FBA, 'M', u'馬'), (0x2FBB, 'M', u'骨'), (0x2FBC, 'M', u'高'), (0x2FBD, 'M', u'髟'), (0x2FBE, 'M', u'鬥'), (0x2FBF, 'M', u'鬯'), (0x2FC0, 'M', u'鬲'), (0x2FC1, 'M', u'鬼'), (0x2FC2, 'M', u'魚'), (0x2FC3, 'M', u'鳥'), (0x2FC4, 'M', u'鹵'), (0x2FC5, 'M', u'鹿'), (0x2FC6, 'M', u'麥'), (0x2FC7, 'M', u'麻'), (0x2FC8, 'M', u'黃'), (0x2FC9, 'M', u'黍'), (0x2FCA, 'M', u'黑'), (0x2FCB, 'M', u'黹'), (0x2FCC, 'M', u'黽'), (0x2FCD, 'M', u'鼎'), (0x2FCE, 'M', u'鼓'), (0x2FCF, 'M', u'鼠'), (0x2FD0, 'M', u'鼻'), (0x2FD1, 'M', u'齊'), (0x2FD2, 'M', u'齒'), (0x2FD3, 'M', u'龍'), (0x2FD4, 'M', u'龜'), (0x2FD5, 'M', u'龠'), (0x2FD6, 'X'), (0x3000, '3', u' '), (0x3001, 'V'), (0x3002, 'M', u'.'), ] def _seg_29(): return [ (0x3003, 'V'), (0x3036, 'M', u'〒'), (0x3037, 'V'), (0x3038, 'M', u'十'), (0x3039, 'M', u'卄'), (0x303A, 'M', u'卅'), (0x303B, 'V'), (0x3040, 'X'), (0x3041, 'V'), (0x3097, 'X'), (0x3099, 'V'), (0x309B, '3', u' ゙'), (0x309C, '3', u' ゚'), (0x309D, 'V'), (0x309F, 'M', u'より'), (0x30A0, 'V'), (0x30FF, 'M', u'コト'), (0x3100, 'X'), (0x3105, 'V'), (0x3130, 'X'), (0x3131, 'M', u'ᄀ'), (0x3132, 'M', u'ᄁ'), (0x3133, 'M', u'ᆪ'), (0x3134, 'M', u'ᄂ'), (0x3135, 'M', u'ᆬ'), (0x3136, 'M', u'ᆭ'), (0x3137, 'M', u'ᄃ'), (0x3138, 'M', u'ᄄ'), (0x3139, 'M', u'ᄅ'), (0x313A, 'M', u'ᆰ'), (0x313B, 'M', u'ᆱ'), (0x313C, 'M', u'ᆲ'), (0x313D, 'M', u'ᆳ'), (0x313E, 'M', u'ᆴ'), (0x313F, 'M', u'ᆵ'), (0x3140, 'M', u'ᄚ'), (0x3141, 'M', u'ᄆ'), (0x3142, 'M', u'ᄇ'), (0x3143, 'M', u'ᄈ'), (0x3144, 'M', u'ᄡ'), (0x3145, 'M', u'ᄉ'), (0x3146, 'M', u'ᄊ'), (0x3147, 'M', u'ᄋ'), (0x3148, 'M', u'ᄌ'), (0x3149, 'M', u'ᄍ'), (0x314A, 'M', u'ᄎ'), (0x314B, 'M', u'ᄏ'), (0x314C, 'M', u'ᄐ'), (0x314D, 'M', u'ᄑ'), (0x314E, 'M', u'ᄒ'), (0x314F, 'M', u'ᅡ'), (0x3150, 'M', u'ᅢ'), (0x3151, 'M', u'ᅣ'), (0x3152, 'M', u'ᅤ'), (0x3153, 'M', u'ᅥ'), (0x3154, 'M', u'ᅦ'), (0x3155, 'M', u'ᅧ'), (0x3156, 'M', u'ᅨ'), (0x3157, 'M', u'ᅩ'), (0x3158, 'M', u'ᅪ'), (0x3159, 'M', u'ᅫ'), (0x315A, 'M', u'ᅬ'), (0x315B, 'M', u'ᅭ'), (0x315C, 'M', u'ᅮ'), (0x315D, 'M', u'ᅯ'), (0x315E, 'M', u'ᅰ'), (0x315F, 'M', u'ᅱ'), (0x3160, 'M', u'ᅲ'), (0x3161, 'M', u'ᅳ'), (0x3162, 'M', u'ᅴ'), (0x3163, 'M', u'ᅵ'), (0x3164, 'X'), (0x3165, 'M', u'ᄔ'), (0x3166, 'M', u'ᄕ'), (0x3167, 'M', u'ᇇ'), (0x3168, 'M', u'ᇈ'), (0x3169, 'M', u'ᇌ'), (0x316A, 'M', u'ᇎ'), (0x316B, 'M', u'ᇓ'), (0x316C, 'M', u'ᇗ'), (0x316D, 'M', u'ᇙ'), (0x316E, 'M', u'ᄜ'), (0x316F, 'M', u'ᇝ'), (0x3170, 'M', u'ᇟ'), (0x3171, 'M', u'ᄝ'), (0x3172, 'M', u'ᄞ'), (0x3173, 'M', u'ᄠ'), (0x3174, 'M', u'ᄢ'), (0x3175, 'M', u'ᄣ'), (0x3176, 'M', u'ᄧ'), (0x3177, 'M', u'ᄩ'), (0x3178, 'M', u'ᄫ'), (0x3179, 'M', u'ᄬ'), (0x317A, 'M', u'ᄭ'), (0x317B, 'M', u'ᄮ'), (0x317C, 'M', u'ᄯ'), (0x317D, 'M', u'ᄲ'), (0x317E, 'M', u'ᄶ'), (0x317F, 'M', u'ᅀ'), (0x3180, 'M', u'ᅇ'), ] def _seg_30(): return [ (0x3181, 'M', u'ᅌ'), (0x3182, 'M', u'ᇱ'), (0x3183, 'M', u'ᇲ'), (0x3184, 'M', u'ᅗ'), (0x3185, 'M', u'ᅘ'), (0x3186, 'M', u'ᅙ'), (0x3187, 'M', u'ᆄ'), (0x3188, 'M', u'ᆅ'), (0x3189, 'M', u'ᆈ'), (0x318A, 'M', u'ᆑ'), (0x318B, 'M', u'ᆒ'), (0x318C, 'M', u'ᆔ'), (0x318D, 'M', u'ᆞ'), (0x318E, 'M', u'ᆡ'), (0x318F, 'X'), (0x3190, 'V'), (0x3192, 'M', u'一'), (0x3193, 'M', u'二'), (0x3194, 'M', u'三'), (0x3195, 'M', u'四'), (0x3196, 'M', u'上'), (0x3197, 'M', u'中'), (0x3198, 'M', u'下'), (0x3199, 'M', u'甲'), (0x319A, 'M', u'乙'), (0x319B, 'M', u'丙'), (0x319C, 'M', u'丁'), (0x319D, 'M', u'天'), (0x319E, 'M', u'地'), (0x319F, 'M', u'人'), (0x31A0, 'V'), (0x31E4, 'X'), (0x31F0, 'V'), (0x3200, '3', u'(ᄀ)'), (0x3201, '3', u'(ᄂ)'), (0x3202, '3', u'(ᄃ)'), (0x3203, '3', u'(ᄅ)'), (0x3204, '3', u'(ᄆ)'), (0x3205, '3', u'(ᄇ)'), (0x3206, '3', u'(ᄉ)'), (0x3207, '3', u'(ᄋ)'), (0x3208, '3', u'(ᄌ)'), (0x3209, '3', u'(ᄎ)'), (0x320A, '3', u'(ᄏ)'), (0x320B, '3', u'(ᄐ)'), (0x320C, '3', u'(ᄑ)'), (0x320D, '3', u'(ᄒ)'), (0x320E, '3', u'(가)'), (0x320F, '3', u'(나)'), (0x3210, '3', u'(다)'), (0x3211, '3', u'(라)'), (0x3212, '3', u'(마)'), (0x3213, '3', u'(바)'), (0x3214, '3', u'(사)'), (0x3215, '3', u'(아)'), (0x3216, '3', u'(자)'), (0x3217, '3', u'(차)'), (0x3218, '3', u'(카)'), (0x3219, '3', u'(타)'), (0x321A, '3', u'(파)'), (0x321B, '3', u'(하)'), (0x321C, '3', u'(주)'), (0x321D, '3', u'(오전)'), (0x321E, '3', u'(오후)'), (0x321F, 'X'), (0x3220, '3', u'(一)'), (0x3221, '3', u'(二)'), (0x3222, '3', u'(三)'), (0x3223, '3', u'(四)'), (0x3224, '3', u'(五)'), (0x3225, '3', u'(六)'), (0x3226, '3', u'(七)'), (0x3227, '3', u'(八)'), (0x3228, '3', u'(九)'), (0x3229, '3', u'(十)'), (0x322A, '3', u'(月)'), (0x322B, '3', u'(火)'), (0x322C, '3', u'(水)'), (0x322D, '3', u'(木)'), (0x322E, '3', u'(金)'), (0x322F, '3', u'(土)'), (0x3230, '3', u'(日)'), (0x3231, '3', u'(株)'), (0x3232, '3', u'(有)'), (0x3233, '3', u'(社)'), (0x3234, '3', u'(名)'), (0x3235, '3', u'(特)'), (0x3236, '3', u'(財)'), (0x3237, '3', u'(祝)'), (0x3238, '3', u'(労)'), (0x3239, '3', u'(代)'), (0x323A, '3', u'(呼)'), (0x323B, '3', u'(学)'), (0x323C, '3', u'(監)'), (0x323D, '3', u'(企)'), (0x323E, '3', u'(資)'), (0x323F, '3', u'(協)'), (0x3240, '3', u'(祭)'), (0x3241, '3', u'(休)'), (0x3242, '3', u'(自)'), ] def _seg_31(): return [ (0x3243, '3', u'(至)'), (0x3244, 'M', u'問'), (0x3245, 'M', u'幼'), (0x3246, 'M', u'文'), (0x3247, 'M', u'箏'), (0x3248, 'V'), (0x3250, 'M', u'pte'), (0x3251, 'M', u'21'), (0x3252, 'M', u'22'), (0x3253, 'M', u'23'), (0x3254, 'M', u'24'), (0x3255, 'M', u'25'), (0x3256, 'M', u'26'), (0x3257, 'M', u'27'), (0x3258, 'M', u'28'), (0x3259, 'M', u'29'), (0x325A, 'M', u'30'), (0x325B, 'M', u'31'), (0x325C, 'M', u'32'), (0x325D, 'M', u'33'), (0x325E, 'M', u'34'), (0x325F, 'M', u'35'), (0x3260, 'M', u'ᄀ'), (0x3261, 'M', u'ᄂ'), (0x3262, 'M', u'ᄃ'), (0x3263, 'M', u'ᄅ'), (0x3264, 'M', u'ᄆ'), (0x3265, 'M', u'ᄇ'), (0x3266, 'M', u'ᄉ'), (0x3267, 'M', u'ᄋ'), (0x3268, 'M', u'ᄌ'), (0x3269, 'M', u'ᄎ'), (0x326A, 'M', u'ᄏ'), (0x326B, 'M', u'ᄐ'), (0x326C, 'M', u'ᄑ'), (0x326D, 'M', u'ᄒ'), (0x326E, 'M', u'가'), (0x326F, 'M', u'나'), (0x3270, 'M', u'다'), (0x3271, 'M', u'라'), (0x3272, 'M', u'마'), (0x3273, 'M', u'바'), (0x3274, 'M', u'사'), (0x3275, 'M', u'아'), (0x3276, 'M', u'자'), (0x3277, 'M', u'차'), (0x3278, 'M', u'카'), (0x3279, 'M', u'타'), (0x327A, 'M', u'파'), (0x327B, 'M', u'하'), (0x327C, 'M', u'참고'), (0x327D, 'M', u'주의'), (0x327E, 'M', u'우'), (0x327F, 'V'), (0x3280, 'M', u'一'), (0x3281, 'M', u'二'), (0x3282, 'M', u'三'), (0x3283, 'M', u'四'), (0x3284, 'M', u'五'), (0x3285, 'M', u'六'), (0x3286, 'M', u'七'), (0x3287, 'M', u'八'), (0x3288, 'M', u'九'), (0x3289, 'M', u'十'), (0x328A, 'M', u'月'), (0x328B, 'M', u'火'), (0x328C, 'M', u'水'), (0x328D, 'M', u'木'), (0x328E, 'M', u'金'), (0x328F, 'M', u'土'), (0x3290, 'M', u'日'), (0x3291, 'M', u'株'), (0x3292, 'M', u'有'), (0x3293, 'M', u'社'), (0x3294, 'M', u'名'), (0x3295, 'M', u'特'), (0x3296, 'M', u'財'), (0x3297, 'M', u'祝'), (0x3298, 'M', u'労'), (0x3299, 'M', u'秘'), (0x329A, 'M', u'男'), (0x329B, 'M', u'女'), (0x329C, 'M', u'適'), (0x329D, 'M', u'優'), (0x329E, 'M', u'印'), (0x329F, 'M', u'注'), (0x32A0, 'M', u'項'), (0x32A1, 'M', u'休'), (0x32A2, 'M', u'写'), (0x32A3, 'M', u'正'), (0x32A4, 'M', u'上'), (0x32A5, 'M', u'中'), (0x32A6, 'M', u'下'), (0x32A7, 'M', u'左'), (0x32A8, 'M', u'右'), (0x32A9, 'M', u'医'), (0x32AA, 'M', u'宗'), (0x32AB, 'M', u'学'), (0x32AC, 'M', u'監'), (0x32AD, 'M', u'企'), ] def _seg_32(): return [ (0x32AE, 'M', u'資'), (0x32AF, 'M', u'協'), (0x32B0, 'M', u'夜'), (0x32B1, 'M', u'36'), (0x32B2, 'M', u'37'), (0x32B3, 'M', u'38'), (0x32B4, 'M', u'39'), (0x32B5, 'M', u'40'), (0x32B6, 'M', u'41'), (0x32B7, 'M', u'42'), (0x32B8, 'M', u'43'), (0x32B9, 'M', u'44'), (0x32BA, 'M', u'45'), (0x32BB, 'M', u'46'), (0x32BC, 'M', u'47'), (0x32BD, 'M', u'48'), (0x32BE, 'M', u'49'), (0x32BF, 'M', u'50'), (0x32C0, 'M', u'1月'), (0x32C1, 'M', u'2月'), (0x32C2, 'M', u'3月'), (0x32C3, 'M', u'4月'), (0x32C4, 'M', u'5月'), (0x32C5, 'M', u'6月'), (0x32C6, 'M', u'7月'), (0x32C7, 'M', u'8月'), (0x32C8, 'M', u'9月'), (0x32C9, 'M', u'10月'), (0x32CA, 'M', u'11月'), (0x32CB, 'M', u'12月'), (0x32CC, 'M', u'hg'), (0x32CD, 'M', u'erg'), (0x32CE, 'M', u'ev'), (0x32CF, 'M', u'ltd'), (0x32D0, 'M', u'ア'), (0x32D1, 'M', u'イ'), (0x32D2, 'M', u'ウ'), (0x32D3, 'M', u'エ'), (0x32D4, 'M', u'オ'), (0x32D5, 'M', u'カ'), (0x32D6, 'M', u'キ'), (0x32D7, 'M', u'ク'), (0x32D8, 'M', u'ケ'), (0x32D9, 'M', u'コ'), (0x32DA, 'M', u'サ'), (0x32DB, 'M', u'シ'), (0x32DC, 'M', u'ス'), (0x32DD, 'M', u'セ'), (0x32DE, 'M', u'ソ'), (0x32DF, 'M', u'タ'), (0x32E0, 'M', u'チ'), (0x32E1, 'M', u'ツ'), (0x32E2, 'M', u'テ'), (0x32E3, 'M', u'ト'), (0x32E4, 'M', u'ナ'), (0x32E5, 'M', u'ニ'), (0x32E6, 'M', u'ヌ'), (0x32E7, 'M', u'ネ'), (0x32E8, 'M', u'ノ'), (0x32E9, 'M', u'ハ'), (0x32EA, 'M', u'ヒ'), (0x32EB, 'M', u'フ'), (0x32EC, 'M', u'ヘ'), (0x32ED, 'M', u'ホ'), (0x32EE, 'M', u'マ'), (0x32EF, 'M', u'ミ'), (0x32F0, 'M', u'ム'), (0x32F1, 'M', u'メ'), (0x32F2, 'M', u'モ'), (0x32F3, 'M', u'ヤ'), (0x32F4, 'M', u'ユ'), (0x32F5, 'M', u'ヨ'), (0x32F6, 'M', u'ラ'), (0x32F7, 'M', u'リ'), (0x32F8, 'M', u'ル'), (0x32F9, 'M', u'レ'), (0x32FA, 'M', u'ロ'), (0x32FB, 'M', u'ワ'), (0x32FC, 'M', u'ヰ'), (0x32FD, 'M', u'ヱ'), (0x32FE, 'M', u'ヲ'), (0x32FF, 'M', u'令和'), (0x3300, 'M', u'アパート'), (0x3301, 'M', u'アルファ'), (0x3302, 'M', u'アンペア'), (0x3303, 'M', u'アール'), (0x3304, 'M', u'イニング'), (0x3305, 'M', u'インチ'), (0x3306, 'M', u'ウォン'), (0x3307, 'M', u'エスクード'), (0x3308, 'M', u'エーカー'), (0x3309, 'M', u'オンス'), (0x330A, 'M', u'オーム'), (0x330B, 'M', u'カイリ'), (0x330C, 'M', u'カラット'), (0x330D, 'M', u'カロリー'), (0x330E, 'M', u'ガロン'), (0x330F, 'M', u'ガンマ'), (0x3310, 'M', u'ギガ'), (0x3311, 'M', u'ギニー'), ] def _seg_33(): return [ (0x3312, 'M', u'キュリー'), (0x3313, 'M', u'ギルダー'), (0x3314, 'M', u'キロ'), (0x3315, 'M', u'キログラム'), (0x3316, 'M', u'キロメートル'), (0x3317, 'M', u'キロワット'), (0x3318, 'M', u'グラム'), (0x3319, 'M', u'グラムトン'), (0x331A, 'M', u'クルゼイロ'), (0x331B, 'M', u'クローネ'), (0x331C, 'M', u'ケース'), (0x331D, 'M', u'コルナ'), (0x331E, 'M', u'コーポ'), (0x331F, 'M', u'サイクル'), (0x3320, 'M', u'サンチーム'), (0x3321, 'M', u'シリング'), (0x3322, 'M', u'センチ'), (0x3323, 'M', u'セント'), (0x3324, 'M', u'ダース'), (0x3325, 'M', u'デシ'), (0x3326, 'M', u'ドル'), (0x3327, 'M', u'トン'), (0x3328, 'M', u'ナノ'), (0x3329, 'M', u'ノット'), (0x332A, 'M', u'ハイツ'), (0x332B, 'M', u'パーセント'), (0x332C, 'M', u'パーツ'), (0x332D, 'M', u'バーレル'), (0x332E, 'M', u'ピアストル'), (0x332F, 'M', u'ピクル'), (0x3330, 'M', u'ピコ'), (0x3331, 'M', u'ビル'), (0x3332, 'M', u'ファラッド'), (0x3333, 'M', u'フィート'), (0x3334, 'M', u'ブッシェル'), (0x3335, 'M', u'フラン'), (0x3336, 'M', u'ヘクタール'), (0x3337, 'M', u'ペソ'), (0x3338, 'M', u'ペニヒ'), (0x3339, 'M', u'ヘルツ'), (0x333A, 'M', u'ペンス'), (0x333B, 'M', u'ページ'), (0x333C, 'M', u'ベータ'), (0x333D, 'M', u'ポイント'), (0x333E, 'M', u'ボルト'), (0x333F, 'M', u'ホン'), (0x3340, 'M', u'ポンド'), (0x3341, 'M', u'ホール'), (0x3342, 'M', u'ホーン'), (0x3343, 'M', u'マイクロ'), (0x3344, 'M', u'マイル'), (0x3345, 'M', u'マッハ'), (0x3346, 'M', u'マルク'), (0x3347, 'M', u'マンション'), (0x3348, 'M', u'ミクロン'), (0x3349, 'M', u'ミリ'), (0x334A, 'M', u'ミリバール'), (0x334B, 'M', u'メガ'), (0x334C, 'M', u'メガトン'), (0x334D, 'M', u'メートル'), (0x334E, 'M', u'ヤード'), (0x334F, 'M', u'ヤール'), (0x3350, 'M', u'ユアン'), (0x3351, 'M', u'リットル'), (0x3352, 'M', u'リラ'), (0x3353, 'M', u'ルピー'), (0x3354, 'M', u'ルーブル'), (0x3355, 'M', u'レム'), (0x3356, 'M', u'レントゲン'), (0x3357, 'M', u'ワット'), (0x3358, 'M', u'0点'), (0x3359, 'M', u'1点'), (0x335A, 'M', u'2点'), (0x335B, 'M', u'3点'), (0x335C, 'M', u'4点'), (0x335D, 'M', u'5点'), (0x335E, 'M', u'6点'), (0x335F, 'M', u'7点'), (0x3360, 'M', u'8点'), (0x3361, 'M', u'9点'), (0x3362, 'M', u'10点'), (0x3363, 'M', u'11点'), (0x3364, 'M', u'12点'), (0x3365, 'M', u'13点'), (0x3366, 'M', u'14点'), (0x3367, 'M', u'15点'), (0x3368, 'M', u'16点'), (0x3369, 'M', u'17点'), (0x336A, 'M', u'18点'), (0x336B, 'M', u'19点'), (0x336C, 'M', u'20点'), (0x336D, 'M', u'21点'), (0x336E, 'M', u'22点'), (0x336F, 'M', u'23点'), (0x3370, 'M', u'24点'), (0x3371, 'M', u'hpa'), (0x3372, 'M', u'da'), (0x3373, 'M', u'au'), (0x3374, 'M', u'bar'), (0x3375, 'M', u'ov'), ] def _seg_34(): return [ (0x3376, 'M', u'pc'), (0x3377, 'M', u'dm'), (0x3378, 'M', u'dm2'), (0x3379, 'M', u'dm3'), (0x337A, 'M', u'iu'), (0x337B, 'M', u'平成'), (0x337C, 'M', u'昭和'), (0x337D, 'M', u'大正'), (0x337E, 'M', u'明治'), (0x337F, 'M', u'株式会社'), (0x3380, 'M', u'pa'), (0x3381, 'M', u'na'), (0x3382, 'M', u'μa'), (0x3383, 'M', u'ma'), (0x3384, 'M', u'ka'), (0x3385, 'M', u'kb'), (0x3386, 'M', u'mb'), (0x3387, 'M', u'gb'), (0x3388, 'M', u'cal'), (0x3389, 'M', u'kcal'), (0x338A, 'M', u'pf'), (0x338B, 'M', u'nf'), (0x338C, 'M', u'μf'), (0x338D, 'M', u'μg'), (0x338E, 'M', u'mg'), (0x338F, 'M', u'kg'), (0x3390, 'M', u'hz'), (0x3391, 'M', u'khz'), (0x3392, 'M', u'mhz'), (0x3393, 'M', u'ghz'), (0x3394, 'M', u'thz'), (0x3395, 'M', u'μl'), (0x3396, 'M', u'ml'), (0x3397, 'M', u'dl'), (0x3398, 'M', u'kl'), (0x3399, 'M', u'fm'), (0x339A, 'M', u'nm'), (0x339B, 'M', u'μm'), (0x339C, 'M', u'mm'), (0x339D, 'M', u'cm'), (0x339E, 'M', u'km'), (0x339F, 'M', u'mm2'), (0x33A0, 'M', u'cm2'), (0x33A1, 'M', u'm2'), (0x33A2, 'M', u'km2'), (0x33A3, 'M', u'mm3'), (0x33A4, 'M', u'cm3'), (0x33A5, 'M', u'm3'), (0x33A6, 'M', u'km3'), (0x33A7, 'M', u'm∕s'), (0x33A8, 'M', u'm∕s2'), (0x33A9, 'M', u'pa'), (0x33AA, 'M', u'kpa'), (0x33AB, 'M', u'mpa'), (0x33AC, 'M', u'gpa'), (0x33AD, 'M', u'rad'), (0x33AE, 'M', u'rad∕s'), (0x33AF, 'M', u'rad∕s2'), (0x33B0, 'M', u'ps'), (0x33B1, 'M', u'ns'), (0x33B2, 'M', u'μs'), (0x33B3, 'M', u'ms'), (0x33B4, 'M', u'pv'), (0x33B5, 'M', u'nv'), (0x33B6, 'M', u'μv'), (0x33B7, 'M', u'mv'), (0x33B8, 'M', u'kv'), (0x33B9, 'M', u'mv'), (0x33BA, 'M', u'pw'), (0x33BB, 'M', u'nw'), (0x33BC, 'M', u'μw'), (0x33BD, 'M', u'mw'), (0x33BE, 'M', u'kw'), (0x33BF, 'M', u'mw'), (0x33C0, 'M', u'kω'), (0x33C1, 'M', u'mω'), (0x33C2, 'X'), (0x33C3, 'M', u'bq'), (0x33C4, 'M', u'cc'), (0x33C5, 'M', u'cd'), (0x33C6, 'M', u'c∕kg'), (0x33C7, 'X'), (0x33C8, 'M', u'db'), (0x33C9, 'M', u'gy'), (0x33CA, 'M', u'ha'), (0x33CB, 'M', u'hp'), (0x33CC, 'M', u'in'), (0x33CD, 'M', u'kk'), (0x33CE, 'M', u'km'), (0x33CF, 'M', u'kt'), (0x33D0, 'M', u'lm'), (0x33D1, 'M', u'ln'), (0x33D2, 'M', u'log'), (0x33D3, 'M', u'lx'), (0x33D4, 'M', u'mb'), (0x33D5, 'M', u'mil'), (0x33D6, 'M', u'mol'), (0x33D7, 'M', u'ph'), (0x33D8, 'X'), (0x33D9, 'M', u'ppm'), ] def _seg_35(): return [ (0x33DA, 'M', u'pr'), (0x33DB, 'M', u'sr'), (0x33DC, 'M', u'sv'), (0x33DD, 'M', u'wb'), (0x33DE, 'M', u'v∕m'), (0x33DF, 'M', u'a∕m'), (0x33E0, 'M', u'1日'), (0x33E1, 'M', u'2日'), (0x33E2, 'M', u'3日'), (0x33E3, 'M', u'4日'), (0x33E4, 'M', u'5日'), (0x33E5, 'M', u'6日'), (0x33E6, 'M', u'7日'), (0x33E7, 'M', u'8日'), (0x33E8, 'M', u'9日'), (0x33E9, 'M', u'10日'), (0x33EA, 'M', u'11日'), (0x33EB, 'M', u'12日'), (0x33EC, 'M', u'13日'), (0x33ED, 'M', u'14日'), (0x33EE, 'M', u'15日'), (0x33EF, 'M', u'16日'), (0x33F0, 'M', u'17日'), (0x33F1, 'M', u'18日'), (0x33F2, 'M', u'19日'), (0x33F3, 'M', u'20日'), (0x33F4, 'M', u'21日'), (0x33F5, 'M', u'22日'), (0x33F6, 'M', u'23日'), (0x33F7, 'M', u'24日'), (0x33F8, 'M', u'25日'), (0x33F9, 'M', u'26日'), (0x33FA, 'M', u'27日'), (0x33FB, 'M', u'28日'), (0x33FC, 'M', u'29日'), (0x33FD, 'M', u'30日'), (0x33FE, 'M', u'31日'), (0x33FF, 'M', u'gal'), (0x3400, 'V'), (0x9FFD, 'X'), (0xA000, 'V'), (0xA48D, 'X'), (0xA490, 'V'), (0xA4C7, 'X'), (0xA4D0, 'V'), (0xA62C, 'X'), (0xA640, 'M', u'ꙁ'), (0xA641, 'V'), (0xA642, 'M', u'ꙃ'), (0xA643, 'V'), (0xA644, 'M', u'ꙅ'), (0xA645, 'V'), (0xA646, 'M', u'ꙇ'), (0xA647, 'V'), (0xA648, 'M', u'ꙉ'), (0xA649, 'V'), (0xA64A, 'M', u'ꙋ'), (0xA64B, 'V'), (0xA64C, 'M', u'ꙍ'), (0xA64D, 'V'), (0xA64E, 'M', u'ꙏ'), (0xA64F, 'V'), (0xA650, 'M', u'ꙑ'), (0xA651, 'V'), (0xA652, 'M', u'ꙓ'), (0xA653, 'V'), (0xA654, 'M', u'ꙕ'), (0xA655, 'V'), (0xA656, 'M', u'ꙗ'), (0xA657, 'V'), (0xA658, 'M', u'ꙙ'), (0xA659, 'V'), (0xA65A, 'M', u'ꙛ'), (0xA65B, 'V'), (0xA65C, 'M', u'ꙝ'), (0xA65D, 'V'), (0xA65E, 'M', u'ꙟ'), (0xA65F, 'V'), (0xA660, 'M', u'ꙡ'), (0xA661, 'V'), (0xA662, 'M', u'ꙣ'), (0xA663, 'V'), (0xA664, 'M', u'ꙥ'), (0xA665, 'V'), (0xA666, 'M', u'ꙧ'), (0xA667, 'V'), (0xA668, 'M', u'ꙩ'), (0xA669, 'V'), (0xA66A, 'M', u'ꙫ'), (0xA66B, 'V'), (0xA66C, 'M', u'ꙭ'), (0xA66D, 'V'), (0xA680, 'M', u'ꚁ'), (0xA681, 'V'), (0xA682, 'M', u'ꚃ'), (0xA683, 'V'), (0xA684, 'M', u'ꚅ'), (0xA685, 'V'), (0xA686, 'M', u'ꚇ'), (0xA687, 'V'), ] def _seg_36(): return [ (0xA688, 'M', u'ꚉ'), (0xA689, 'V'), (0xA68A, 'M', u'ꚋ'), (0xA68B, 'V'), (0xA68C, 'M', u'ꚍ'), (0xA68D, 'V'), (0xA68E, 'M', u'ꚏ'), (0xA68F, 'V'), (0xA690, 'M', u'ꚑ'), (0xA691, 'V'), (0xA692, 'M', u'ꚓ'), (0xA693, 'V'), (0xA694, 'M', u'ꚕ'), (0xA695, 'V'), (0xA696, 'M', u'ꚗ'), (0xA697, 'V'), (0xA698, 'M', u'ꚙ'), (0xA699, 'V'), (0xA69A, 'M', u'ꚛ'), (0xA69B, 'V'), (0xA69C, 'M', u'ъ'), (0xA69D, 'M', u'ь'), (0xA69E, 'V'), (0xA6F8, 'X'), (0xA700, 'V'), (0xA722, 'M', u'ꜣ'), (0xA723, 'V'), (0xA724, 'M', u'ꜥ'), (0xA725, 'V'), (0xA726, 'M', u'ꜧ'), (0xA727, 'V'), (0xA728, 'M', u'ꜩ'), (0xA729, 'V'), (0xA72A, 'M', u'ꜫ'), (0xA72B, 'V'), (0xA72C, 'M', u'ꜭ'), (0xA72D, 'V'), (0xA72E, 'M', u'ꜯ'), (0xA72F, 'V'), (0xA732, 'M', u'ꜳ'), (0xA733, 'V'), (0xA734, 'M', u'ꜵ'), (0xA735, 'V'), (0xA736, 'M', u'ꜷ'), (0xA737, 'V'), (0xA738, 'M', u'ꜹ'), (0xA739, 'V'), (0xA73A, 'M', u'ꜻ'), (0xA73B, 'V'), (0xA73C, 'M', u'ꜽ'), (0xA73D, 'V'), (0xA73E, 'M', u'ꜿ'), (0xA73F, 'V'), (0xA740, 'M', u'ꝁ'), (0xA741, 'V'), (0xA742, 'M', u'ꝃ'), (0xA743, 'V'), (0xA744, 'M', u'ꝅ'), (0xA745, 'V'), (0xA746, 'M', u'ꝇ'), (0xA747, 'V'), (0xA748, 'M', u'ꝉ'), (0xA749, 'V'), (0xA74A, 'M', u'ꝋ'), (0xA74B, 'V'), (0xA74C, 'M', u'ꝍ'), (0xA74D, 'V'), (0xA74E, 'M', u'ꝏ'), (0xA74F, 'V'), (0xA750, 'M', u'ꝑ'), (0xA751, 'V'), (0xA752, 'M', u'ꝓ'), (0xA753, 'V'), (0xA754, 'M', u'ꝕ'), (0xA755, 'V'), (0xA756, 'M', u'ꝗ'), (0xA757, 'V'), (0xA758, 'M', u'ꝙ'), (0xA759, 'V'), (0xA75A, 'M', u'ꝛ'), (0xA75B, 'V'), (0xA75C, 'M', u'ꝝ'), (0xA75D, 'V'), (0xA75E, 'M', u'ꝟ'), (0xA75F, 'V'), (0xA760, 'M', u'ꝡ'), (0xA761, 'V'), (0xA762, 'M', u'ꝣ'), (0xA763, 'V'), (0xA764, 'M', u'ꝥ'), (0xA765, 'V'), (0xA766, 'M', u'ꝧ'), (0xA767, 'V'), (0xA768, 'M', u'ꝩ'), (0xA769, 'V'), (0xA76A, 'M', u'ꝫ'), (0xA76B, 'V'), (0xA76C, 'M', u'ꝭ'), (0xA76D, 'V'), (0xA76E, 'M', u'ꝯ'), ] def _seg_37(): return [ (0xA76F, 'V'), (0xA770, 'M', u'ꝯ'), (0xA771, 'V'), (0xA779, 'M', u'ꝺ'), (0xA77A, 'V'), (0xA77B, 'M', u'ꝼ'), (0xA77C, 'V'), (0xA77D, 'M', u'ᵹ'), (0xA77E, 'M', u'ꝿ'), (0xA77F, 'V'), (0xA780, 'M', u'ꞁ'), (0xA781, 'V'), (0xA782, 'M', u'ꞃ'), (0xA783, 'V'), (0xA784, 'M', u'ꞅ'), (0xA785, 'V'), (0xA786, 'M', u'ꞇ'), (0xA787, 'V'), (0xA78B, 'M', u'ꞌ'), (0xA78C, 'V'), (0xA78D, 'M', u'ɥ'), (0xA78E, 'V'), (0xA790, 'M', u'ꞑ'), (0xA791, 'V'), (0xA792, 'M', u'ꞓ'), (0xA793, 'V'), (0xA796, 'M', u'ꞗ'), (0xA797, 'V'), (0xA798, 'M', u'ꞙ'), (0xA799, 'V'), (0xA79A, 'M', u'ꞛ'), (0xA79B, 'V'), (0xA79C, 'M', u'ꞝ'), (0xA79D, 'V'), (0xA79E, 'M', u'ꞟ'), (0xA79F, 'V'), (0xA7A0, 'M', u'ꞡ'), (0xA7A1, 'V'), (0xA7A2, 'M', u'ꞣ'), (0xA7A3, 'V'), (0xA7A4, 'M', u'ꞥ'), (0xA7A5, 'V'), (0xA7A6, 'M', u'ꞧ'), (0xA7A7, 'V'), (0xA7A8, 'M', u'ꞩ'), (0xA7A9, 'V'), (0xA7AA, 'M', u'ɦ'), (0xA7AB, 'M', u'ɜ'), (0xA7AC, 'M', u'ɡ'), (0xA7AD, 'M', u'ɬ'), (0xA7AE, 'M', u'ɪ'), (0xA7AF, 'V'), (0xA7B0, 'M', u'ʞ'), (0xA7B1, 'M', u'ʇ'), (0xA7B2, 'M', u'ʝ'), (0xA7B3, 'M', u'ꭓ'), (0xA7B4, 'M', u'ꞵ'), (0xA7B5, 'V'), (0xA7B6, 'M', u'ꞷ'), (0xA7B7, 'V'), (0xA7B8, 'M', u'ꞹ'), (0xA7B9, 'V'), (0xA7BA, 'M', u'ꞻ'), (0xA7BB, 'V'), (0xA7BC, 'M', u'ꞽ'), (0xA7BD, 'V'), (0xA7BE, 'M', u'ꞿ'), (0xA7BF, 'V'), (0xA7C0, 'X'), (0xA7C2, 'M', u'ꟃ'), (0xA7C3, 'V'), (0xA7C4, 'M', u'ꞔ'), (0xA7C5, 'M', u'ʂ'), (0xA7C6, 'M', u'ᶎ'), (0xA7C7, 'M', u'ꟈ'), (0xA7C8, 'V'), (0xA7C9, 'M', u'ꟊ'), (0xA7CA, 'V'), (0xA7CB, 'X'), (0xA7F5, 'M', u'ꟶ'), (0xA7F6, 'V'), (0xA7F8, 'M', u'ħ'), (0xA7F9, 'M', u'œ'), (0xA7FA, 'V'), (0xA82D, 'X'), (0xA830, 'V'), (0xA83A, 'X'), (0xA840, 'V'), (0xA878, 'X'), (0xA880, 'V'), (0xA8C6, 'X'), (0xA8CE, 'V'), (0xA8DA, 'X'), (0xA8E0, 'V'), (0xA954, 'X'), (0xA95F, 'V'), (0xA97D, 'X'), (0xA980, 'V'), (0xA9CE, 'X'), (0xA9CF, 'V'), ] def _seg_38(): return [ (0xA9DA, 'X'), (0xA9DE, 'V'), (0xA9FF, 'X'), (0xAA00, 'V'), (0xAA37, 'X'), (0xAA40, 'V'), (0xAA4E, 'X'), (0xAA50, 'V'), (0xAA5A, 'X'), (0xAA5C, 'V'), (0xAAC3, 'X'), (0xAADB, 'V'), (0xAAF7, 'X'), (0xAB01, 'V'), (0xAB07, 'X'), (0xAB09, 'V'), (0xAB0F, 'X'), (0xAB11, 'V'), (0xAB17, 'X'), (0xAB20, 'V'), (0xAB27, 'X'), (0xAB28, 'V'), (0xAB2F, 'X'), (0xAB30, 'V'), (0xAB5C, 'M', u'ꜧ'), (0xAB5D, 'M', u'ꬷ'), (0xAB5E, 'M', u'ɫ'), (0xAB5F, 'M', u'ꭒ'), (0xAB60, 'V'), (0xAB69, 'M', u'ʍ'), (0xAB6A, 'V'), (0xAB6C, 'X'), (0xAB70, 'M', u'Ꭰ'), (0xAB71, 'M', u'Ꭱ'), (0xAB72, 'M', u'Ꭲ'), (0xAB73, 'M', u'Ꭳ'), (0xAB74, 'M', u'Ꭴ'), (0xAB75, 'M', u'Ꭵ'), (0xAB76, 'M', u'Ꭶ'), (0xAB77, 'M', u'Ꭷ'), (0xAB78, 'M', u'Ꭸ'), (0xAB79, 'M', u'Ꭹ'), (0xAB7A, 'M', u'Ꭺ'), (0xAB7B, 'M', u'Ꭻ'), (0xAB7C, 'M', u'Ꭼ'), (0xAB7D, 'M', u'Ꭽ'), (0xAB7E, 'M', u'Ꭾ'), (0xAB7F, 'M', u'Ꭿ'), (0xAB80, 'M', u'Ꮀ'), (0xAB81, 'M', u'Ꮁ'), (0xAB82, 'M', u'Ꮂ'), (0xAB83, 'M', u'Ꮃ'), (0xAB84, 'M', u'Ꮄ'), (0xAB85, 'M', u'Ꮅ'), (0xAB86, 'M', u'Ꮆ'), (0xAB87, 'M', u'Ꮇ'), (0xAB88, 'M', u'Ꮈ'), (0xAB89, 'M', u'Ꮉ'), (0xAB8A, 'M', u'Ꮊ'), (0xAB8B, 'M', u'Ꮋ'), (0xAB8C, 'M', u'Ꮌ'), (0xAB8D, 'M', u'Ꮍ'), (0xAB8E, 'M', u'Ꮎ'), (0xAB8F, 'M', u'Ꮏ'), (0xAB90, 'M', u'Ꮐ'), (0xAB91, 'M', u'Ꮑ'), (0xAB92, 'M', u'Ꮒ'), (0xAB93, 'M', u'Ꮓ'), (0xAB94, 'M', u'Ꮔ'), (0xAB95, 'M', u'Ꮕ'), (0xAB96, 'M', u'Ꮖ'), (0xAB97, 'M', u'Ꮗ'), (0xAB98, 'M', u'Ꮘ'), (0xAB99, 'M', u'Ꮙ'), (0xAB9A, 'M', u'Ꮚ'), (0xAB9B, 'M', u'Ꮛ'), (0xAB9C, 'M', u'Ꮜ'), (0xAB9D, 'M', u'Ꮝ'), (0xAB9E, 'M', u'Ꮞ'), (0xAB9F, 'M', u'Ꮟ'), (0xABA0, 'M', u'Ꮠ'), (0xABA1, 'M', u'Ꮡ'), (0xABA2, 'M', u'Ꮢ'), (0xABA3, 'M', u'Ꮣ'), (0xABA4, 'M', u'Ꮤ'), (0xABA5, 'M', u'Ꮥ'), (0xABA6, 'M', u'Ꮦ'), (0xABA7, 'M', u'Ꮧ'), (0xABA8, 'M', u'Ꮨ'), (0xABA9, 'M', u'Ꮩ'), (0xABAA, 'M', u'Ꮪ'), (0xABAB, 'M', u'Ꮫ'), (0xABAC, 'M', u'Ꮬ'), (0xABAD, 'M', u'Ꮭ'), (0xABAE, 'M', u'Ꮮ'), (0xABAF, 'M', u'Ꮯ'), (0xABB0, 'M', u'Ꮰ'), (0xABB1, 'M', u'Ꮱ'), (0xABB2, 'M', u'Ꮲ'), (0xABB3, 'M', u'Ꮳ'), ] def _seg_39(): return [ (0xABB4, 'M', u'Ꮴ'), (0xABB5, 'M', u'Ꮵ'), (0xABB6, 'M', u'Ꮶ'), (0xABB7, 'M', u'Ꮷ'), (0xABB8, 'M', u'Ꮸ'), (0xABB9, 'M', u'Ꮹ'), (0xABBA, 'M', u'Ꮺ'), (0xABBB, 'M', u'Ꮻ'), (0xABBC, 'M', u'Ꮼ'), (0xABBD, 'M', u'Ꮽ'), (0xABBE, 'M', u'Ꮾ'), (0xABBF, 'M', u'Ꮿ'), (0xABC0, 'V'), (0xABEE, 'X'), (0xABF0, 'V'), (0xABFA, 'X'), (0xAC00, 'V'), (0xD7A4, 'X'), (0xD7B0, 'V'), (0xD7C7, 'X'), (0xD7CB, 'V'), (0xD7FC, 'X'), (0xF900, 'M', u'豈'), (0xF901, 'M', u'更'), (0xF902, 'M', u'車'), (0xF903, 'M', u'賈'), (0xF904, 'M', u'滑'), (0xF905, 'M', u'串'), (0xF906, 'M', u'句'), (0xF907, 'M', u'龜'), (0xF909, 'M', u'契'), (0xF90A, 'M', u'金'), (0xF90B, 'M', u'喇'), (0xF90C, 'M', u'奈'), (0xF90D, 'M', u'懶'), (0xF90E, 'M', u'癩'), (0xF90F, 'M', u'羅'), (0xF910, 'M', u'蘿'), (0xF911, 'M', u'螺'), (0xF912, 'M', u'裸'), (0xF913, 'M', u'邏'), (0xF914, 'M', u'樂'), (0xF915, 'M', u'洛'), (0xF916, 'M', u'烙'), (0xF917, 'M', u'珞'), (0xF918, 'M', u'落'), (0xF919, 'M', u'酪'), (0xF91A, 'M', u'駱'), (0xF91B, 'M', u'亂'), (0xF91C, 'M', u'卵'), (0xF91D, 'M', u'欄'), (0xF91E, 'M', u'爛'), (0xF91F, 'M', u'蘭'), (0xF920, 'M', u'鸞'), (0xF921, 'M', u'嵐'), (0xF922, 'M', u'濫'), (0xF923, 'M', u'藍'), (0xF924, 'M', u'襤'), (0xF925, 'M', u'拉'), (0xF926, 'M', u'臘'), (0xF927, 'M', u'蠟'), (0xF928, 'M', u'廊'), (0xF929, 'M', u'朗'), (0xF92A, 'M', u'浪'), (0xF92B, 'M', u'狼'), (0xF92C, 'M', u'郎'), (0xF92D, 'M', u'來'), (0xF92E, 'M', u'冷'), (0xF92F, 'M', u'勞'), (0xF930, 'M', u'擄'), (0xF931, 'M', u'櫓'), (0xF932, 'M', u'爐'), (0xF933, 'M', u'盧'), (0xF934, 'M', u'老'), (0xF935, 'M', u'蘆'), (0xF936, 'M', u'虜'), (0xF937, 'M', u'路'), (0xF938, 'M', u'露'), (0xF939, 'M', u'魯'), (0xF93A, 'M', u'鷺'), (0xF93B, 'M', u'碌'), (0xF93C, 'M', u'祿'), (0xF93D, 'M', u'綠'), (0xF93E, 'M', u'菉'), (0xF93F, 'M', u'錄'), (0xF940, 'M', u'鹿'), (0xF941, 'M', u'論'), (0xF942, 'M', u'壟'), (0xF943, 'M', u'弄'), (0xF944, 'M', u'籠'), (0xF945, 'M', u'聾'), (0xF946, 'M', u'牢'), (0xF947, 'M', u'磊'), (0xF948, 'M', u'賂'), (0xF949, 'M', u'雷'), (0xF94A, 'M', u'壘'), (0xF94B, 'M', u'屢'), (0xF94C, 'M', u'樓'), (0xF94D, 'M', u'淚'), (0xF94E, 'M', u'漏'), ] def _seg_40(): return [ (0xF94F, 'M', u'累'), (0xF950, 'M', u'縷'), (0xF951, 'M', u'陋'), (0xF952, 'M', u'勒'), (0xF953, 'M', u'肋'), (0xF954, 'M', u'凜'), (0xF955, 'M', u'凌'), (0xF956, 'M', u'稜'), (0xF957, 'M', u'綾'), (0xF958, 'M', u'菱'), (0xF959, 'M', u'陵'), (0xF95A, 'M', u'讀'), (0xF95B, 'M', u'拏'), (0xF95C, 'M', u'樂'), (0xF95D, 'M', u'諾'), (0xF95E, 'M', u'丹'), (0xF95F, 'M', u'寧'), (0xF960, 'M', u'怒'), (0xF961, 'M', u'率'), (0xF962, 'M', u'異'), (0xF963, 'M', u'北'), (0xF964, 'M', u'磻'), (0xF965, 'M', u'便'), (0xF966, 'M', u'復'), (0xF967, 'M', u'不'), (0xF968, 'M', u'泌'), (0xF969, 'M', u'數'), (0xF96A, 'M', u'索'), (0xF96B, 'M', u'參'), (0xF96C, 'M', u'塞'), (0xF96D, 'M', u'省'), (0xF96E, 'M', u'葉'), (0xF96F, 'M', u'說'), (0xF970, 'M', u'殺'), (0xF971, 'M', u'辰'), (0xF972, 'M', u'沈'), (0xF973, 'M', u'拾'), (0xF974, 'M', u'若'), (0xF975, 'M', u'掠'), (0xF976, 'M', u'略'), (0xF977, 'M', u'亮'), (0xF978, 'M', u'兩'), (0xF979, 'M', u'凉'), (0xF97A, 'M', u'梁'), (0xF97B, 'M', u'糧'), (0xF97C, 'M', u'良'), (0xF97D, 'M', u'諒'), (0xF97E, 'M', u'量'), (0xF97F, 'M', u'勵'), (0xF980, 'M', u'呂'), (0xF981, 'M', u'女'), (0xF982, 'M', u'廬'), (0xF983, 'M', u'旅'), (0xF984, 'M', u'濾'), (0xF985, 'M', u'礪'), (0xF986, 'M', u'閭'), (0xF987, 'M', u'驪'), (0xF988, 'M', u'麗'), (0xF989, 'M', u'黎'), (0xF98A, 'M', u'力'), (0xF98B, 'M', u'曆'), (0xF98C, 'M', u'歷'), (0xF98D, 'M', u'轢'), (0xF98E, 'M', u'年'), (0xF98F, 'M', u'憐'), (0xF990, 'M', u'戀'), (0xF991, 'M', u'撚'), (0xF992, 'M', u'漣'), (0xF993, 'M', u'煉'), (0xF994, 'M', u'璉'), (0xF995, 'M', u'秊'), (0xF996, 'M', u'練'), (0xF997, 'M', u'聯'), (0xF998, 'M', u'輦'), (0xF999, 'M', u'蓮'), (0xF99A, 'M', u'連'), (0xF99B, 'M', u'鍊'), (0xF99C, 'M', u'列'), (0xF99D, 'M', u'劣'), (0xF99E, 'M', u'咽'), (0xF99F, 'M', u'烈'), (0xF9A0, 'M', u'裂'), (0xF9A1, 'M', u'說'), (0xF9A2, 'M', u'廉'), (0xF9A3, 'M', u'念'), (0xF9A4, 'M', u'捻'), (0xF9A5, 'M', u'殮'), (0xF9A6, 'M', u'簾'), (0xF9A7, 'M', u'獵'), (0xF9A8, 'M', u'令'), (0xF9A9, 'M', u'囹'), (0xF9AA, 'M', u'寧'), (0xF9AB, 'M', u'嶺'), (0xF9AC, 'M', u'怜'), (0xF9AD, 'M', u'玲'), (0xF9AE, 'M', u'瑩'), (0xF9AF, 'M', u'羚'), (0xF9B0, 'M', u'聆'), (0xF9B1, 'M', u'鈴'), (0xF9B2, 'M', u'零'), ] def _seg_41(): return [ (0xF9B3, 'M', u'靈'), (0xF9B4, 'M', u'領'), (0xF9B5, 'M', u'例'), (0xF9B6, 'M', u'禮'), (0xF9B7, 'M', u'醴'), (0xF9B8, 'M', u'隸'), (0xF9B9, 'M', u'惡'), (0xF9BA, 'M', u'了'), (0xF9BB, 'M', u'僚'), (0xF9BC, 'M', u'寮'), (0xF9BD, 'M', u'尿'), (0xF9BE, 'M', u'料'), (0xF9BF, 'M', u'樂'), (0xF9C0, 'M', u'燎'), (0xF9C1, 'M', u'療'), (0xF9C2, 'M', u'蓼'), (0xF9C3, 'M', u'遼'), (0xF9C4, 'M', u'龍'), (0xF9C5, 'M', u'暈'), (0xF9C6, 'M', u'阮'), (0xF9C7, 'M', u'劉'), (0xF9C8, 'M', u'杻'), (0xF9C9, 'M', u'柳'), (0xF9CA, 'M', u'流'), (0xF9CB, 'M', u'溜'), (0xF9CC, 'M', u'琉'), (0xF9CD, 'M', u'留'), (0xF9CE, 'M', u'硫'), (0xF9CF, 'M', u'紐'), (0xF9D0, 'M', u'類'), (0xF9D1, 'M', u'六'), (0xF9D2, 'M', u'戮'), (0xF9D3, 'M', u'陸'), (0xF9D4, 'M', u'倫'), (0xF9D5, 'M', u'崙'), (0xF9D6, 'M', u'淪'), (0xF9D7, 'M', u'輪'), (0xF9D8, 'M', u'律'), (0xF9D9, 'M', u'慄'), (0xF9DA, 'M', u'栗'), (0xF9DB, 'M', u'率'), (0xF9DC, 'M', u'隆'), (0xF9DD, 'M', u'利'), (0xF9DE, 'M', u'吏'), (0xF9DF, 'M', u'履'), (0xF9E0, 'M', u'易'), (0xF9E1, 'M', u'李'), (0xF9E2, 'M', u'梨'), (0xF9E3, 'M', u'泥'), (0xF9E4, 'M', u'理'), (0xF9E5, 'M', u'痢'), (0xF9E6, 'M', u'罹'), (0xF9E7, 'M', u'裏'), (0xF9E8, 'M', u'裡'), (0xF9E9, 'M', u'里'), (0xF9EA, 'M', u'離'), (0xF9EB, 'M', u'匿'), (0xF9EC, 'M', u'溺'), (0xF9ED, 'M', u'吝'), (0xF9EE, 'M', u'燐'), (0xF9EF, 'M', u'璘'), (0xF9F0, 'M', u'藺'), (0xF9F1, 'M', u'隣'), (0xF9F2, 'M', u'鱗'), (0xF9F3, 'M', u'麟'), (0xF9F4, 'M', u'林'), (0xF9F5, 'M', u'淋'), (0xF9F6, 'M', u'臨'), (0xF9F7, 'M', u'立'), (0xF9F8, 'M', u'笠'), (0xF9F9, 'M', u'粒'), (0xF9FA, 'M', u'狀'), (0xF9FB, 'M', u'炙'), (0xF9FC, 'M', u'識'), (0xF9FD, 'M', u'什'), (0xF9FE, 'M', u'茶'), (0xF9FF, 'M', u'刺'), (0xFA00, 'M', u'切'), (0xFA01, 'M', u'度'), (0xFA02, 'M', u'拓'), (0xFA03, 'M', u'糖'), (0xFA04, 'M', u'宅'), (0xFA05, 'M', u'洞'), (0xFA06, 'M', u'暴'), (0xFA07, 'M', u'輻'), (0xFA08, 'M', u'行'), (0xFA09, 'M', u'降'), (0xFA0A, 'M', u'見'), (0xFA0B, 'M', u'廓'), (0xFA0C, 'M', u'兀'), (0xFA0D, 'M', u'嗀'), (0xFA0E, 'V'), (0xFA10, 'M', u'塚'), (0xFA11, 'V'), (0xFA12, 'M', u'晴'), (0xFA13, 'V'), (0xFA15, 'M', u'凞'), (0xFA16, 'M', u'猪'), (0xFA17, 'M', u'益'), (0xFA18, 'M', u'礼'), ] def _seg_42(): return [ (0xFA19, 'M', u'神'), (0xFA1A, 'M', u'祥'), (0xFA1B, 'M', u'福'), (0xFA1C, 'M', u'靖'), (0xFA1D, 'M', u'精'), (0xFA1E, 'M', u'羽'), (0xFA1F, 'V'), (0xFA20, 'M', u'蘒'), (0xFA21, 'V'), (0xFA22, 'M', u'諸'), (0xFA23, 'V'), (0xFA25, 'M', u'逸'), (0xFA26, 'M', u'都'), (0xFA27, 'V'), (0xFA2A, 'M', u'飯'), (0xFA2B, 'M', u'飼'), (0xFA2C, 'M', u'館'), (0xFA2D, 'M', u'鶴'), (0xFA2E, 'M', u'郞'), (0xFA2F, 'M', u'隷'), (0xFA30, 'M', u'侮'), (0xFA31, 'M', u'僧'), (0xFA32, 'M', u'免'), (0xFA33, 'M', u'勉'), (0xFA34, 'M', u'勤'), (0xFA35, 'M', u'卑'), (0xFA36, 'M', u'喝'), (0xFA37, 'M', u'嘆'), (0xFA38, 'M', u'器'), (0xFA39, 'M', u'塀'), (0xFA3A, 'M', u'墨'), (0xFA3B, 'M', u'層'), (0xFA3C, 'M', u'屮'), (0xFA3D, 'M', u'悔'), (0xFA3E, 'M', u'慨'), (0xFA3F, 'M', u'憎'), (0xFA40, 'M', u'懲'), (0xFA41, 'M', u'敏'), (0xFA42, 'M', u'既'), (0xFA43, 'M', u'暑'), (0xFA44, 'M', u'梅'), (0xFA45, 'M', u'海'), (0xFA46, 'M', u'渚'), (0xFA47, 'M', u'漢'), (0xFA48, 'M', u'煮'), (0xFA49, 'M', u'爫'), (0xFA4A, 'M', u'琢'), (0xFA4B, 'M', u'碑'), (0xFA4C, 'M', u'社'), (0xFA4D, 'M', u'祉'), (0xFA4E, 'M', u'祈'), (0xFA4F, 'M', u'祐'), (0xFA50, 'M', u'祖'), (0xFA51, 'M', u'祝'), (0xFA52, 'M', u'禍'), (0xFA53, 'M', u'禎'), (0xFA54, 'M', u'穀'), (0xFA55, 'M', u'突'), (0xFA56, 'M', u'節'), (0xFA57, 'M', u'練'), (0xFA58, 'M', u'縉'), (0xFA59, 'M', u'繁'), (0xFA5A, 'M', u'署'), (0xFA5B, 'M', u'者'), (0xFA5C, 'M', u'臭'), (0xFA5D, 'M', u'艹'), (0xFA5F, 'M', u'著'), (0xFA60, 'M', u'褐'), (0xFA61, 'M', u'視'), (0xFA62, 'M', u'謁'), (0xFA63, 'M', u'謹'), (0xFA64, 'M', u'賓'), (0xFA65, 'M', u'贈'), (0xFA66, 'M', u'辶'), (0xFA67, 'M', u'逸'), (0xFA68, 'M', u'難'), (0xFA69, 'M', u'響'), (0xFA6A, 'M', u'頻'), (0xFA6B, 'M', u'恵'), (0xFA6C, 'M', u'𤋮'), (0xFA6D, 'M', u'舘'), (0xFA6E, 'X'), (0xFA70, 'M', u'並'), (0xFA71, 'M', u'况'), (0xFA72, 'M', u'全'), (0xFA73, 'M', u'侀'), (0xFA74, 'M', u'充'), (0xFA75, 'M', u'冀'), (0xFA76, 'M', u'勇'), (0xFA77, 'M', u'勺'), (0xFA78, 'M', u'喝'), (0xFA79, 'M', u'啕'), (0xFA7A, 'M', u'喙'), (0xFA7B, 'M', u'嗢'), (0xFA7C, 'M', u'塚'), (0xFA7D, 'M', u'墳'), (0xFA7E, 'M', u'奄'), (0xFA7F, 'M', u'奔'), (0xFA80, 'M', u'婢'), (0xFA81, 'M', u'嬨'), ] def _seg_43(): return [ (0xFA82, 'M', u'廒'), (0xFA83, 'M', u'廙'), (0xFA84, 'M', u'彩'), (0xFA85, 'M', u'徭'), (0xFA86, 'M', u'惘'), (0xFA87, 'M', u'慎'), (0xFA88, 'M', u'愈'), (0xFA89, 'M', u'憎'), (0xFA8A, 'M', u'慠'), (0xFA8B, 'M', u'懲'), (0xFA8C, 'M', u'戴'), (0xFA8D, 'M', u'揄'), (0xFA8E, 'M', u'搜'), (0xFA8F, 'M', u'摒'), (0xFA90, 'M', u'敖'), (0xFA91, 'M', u'晴'), (0xFA92, 'M', u'朗'), (0xFA93, 'M', u'望'), (0xFA94, 'M', u'杖'), (0xFA95, 'M', u'歹'), (0xFA96, 'M', u'殺'), (0xFA97, 'M', u'流'), (0xFA98, 'M', u'滛'), (0xFA99, 'M', u'滋'), (0xFA9A, 'M', u'漢'), (0xFA9B, 'M', u'瀞'), (0xFA9C, 'M', u'煮'), (0xFA9D, 'M', u'瞧'), (0xFA9E, 'M', u'爵'), (0xFA9F, 'M', u'犯'), (0xFAA0, 'M', u'猪'), (0xFAA1, 'M', u'瑱'), (0xFAA2, 'M', u'甆'), (0xFAA3, 'M', u'画'), (0xFAA4, 'M', u'瘝'), (0xFAA5, 'M', u'瘟'), (0xFAA6, 'M', u'益'), (0xFAA7, 'M', u'盛'), (0xFAA8, 'M', u'直'), (0xFAA9, 'M', u'睊'), (0xFAAA, 'M', u'着'), (0xFAAB, 'M', u'磌'), (0xFAAC, 'M', u'窱'), (0xFAAD, 'M', u'節'), (0xFAAE, 'M', u'类'), (0xFAAF, 'M', u'絛'), (0xFAB0, 'M', u'練'), (0xFAB1, 'M', u'缾'), (0xFAB2, 'M', u'者'), (0xFAB3, 'M', u'荒'), (0xFAB4, 'M', u'華'), (0xFAB5, 'M', u'蝹'), (0xFAB6, 'M', u'襁'), (0xFAB7, 'M', u'覆'), (0xFAB8, 'M', u'視'), (0xFAB9, 'M', u'調'), (0xFABA, 'M', u'諸'), (0xFABB, 'M', u'請'), (0xFABC, 'M', u'謁'), (0xFABD, 'M', u'諾'), (0xFABE, 'M', u'諭'), (0xFABF, 'M', u'謹'), (0xFAC0, 'M', u'變'), (0xFAC1, 'M', u'贈'), (0xFAC2, 'M', u'輸'), (0xFAC3, 'M', u'遲'), (0xFAC4, 'M', u'醙'), (0xFAC5, 'M', u'鉶'), (0xFAC6, 'M', u'陼'), (0xFAC7, 'M', u'難'), (0xFAC8, 'M', u'靖'), (0xFAC9, 'M', u'韛'), (0xFACA, 'M', u'響'), (0xFACB, 'M', u'頋'), (0xFACC, 'M', u'頻'), (0xFACD, 'M', u'鬒'), (0xFACE, 'M', u'龜'), (0xFACF, 'M', u'𢡊'), (0xFAD0, 'M', u'𢡄'), (0xFAD1, 'M', u'𣏕'), (0xFAD2, 'M', u'㮝'), (0xFAD3, 'M', u'䀘'), (0xFAD4, 'M', u'䀹'), (0xFAD5, 'M', u'𥉉'), (0xFAD6, 'M', u'𥳐'), (0xFAD7, 'M', u'𧻓'), (0xFAD8, 'M', u'齃'), (0xFAD9, 'M', u'龎'), (0xFADA, 'X'), (0xFB00, 'M', u'ff'), (0xFB01, 'M', u'fi'), (0xFB02, 'M', u'fl'), (0xFB03, 'M', u'ffi'), (0xFB04, 'M', u'ffl'), (0xFB05, 'M', u'st'), (0xFB07, 'X'), (0xFB13, 'M', u'մն'), (0xFB14, 'M', u'մե'), (0xFB15, 'M', u'մի'), (0xFB16, 'M', u'վն'), ] def _seg_44(): return [ (0xFB17, 'M', u'մխ'), (0xFB18, 'X'), (0xFB1D, 'M', u'יִ'), (0xFB1E, 'V'), (0xFB1F, 'M', u'ײַ'), (0xFB20, 'M', u'ע'), (0xFB21, 'M', u'א'), (0xFB22, 'M', u'ד'), (0xFB23, 'M', u'ה'), (0xFB24, 'M', u'כ'), (0xFB25, 'M', u'ל'), (0xFB26, 'M', u'ם'), (0xFB27, 'M', u'ר'), (0xFB28, 'M', u'ת'), (0xFB29, '3', u'+'), (0xFB2A, 'M', u'שׁ'), (0xFB2B, 'M', u'שׂ'), (0xFB2C, 'M', u'שּׁ'), (0xFB2D, 'M', u'שּׂ'), (0xFB2E, 'M', u'אַ'), (0xFB2F, 'M', u'אָ'), (0xFB30, 'M', u'אּ'), (0xFB31, 'M', u'בּ'), (0xFB32, 'M', u'גּ'), (0xFB33, 'M', u'דּ'), (0xFB34, 'M', u'הּ'), (0xFB35, 'M', u'וּ'), (0xFB36, 'M', u'זּ'), (0xFB37, 'X'), (0xFB38, 'M', u'טּ'), (0xFB39, 'M', u'יּ'), (0xFB3A, 'M', u'ךּ'), (0xFB3B, 'M', u'כּ'), (0xFB3C, 'M', u'לּ'), (0xFB3D, 'X'), (0xFB3E, 'M', u'מּ'), (0xFB3F, 'X'), (0xFB40, 'M', u'נּ'), (0xFB41, 'M', u'סּ'), (0xFB42, 'X'), (0xFB43, 'M', u'ףּ'), (0xFB44, 'M', u'פּ'), (0xFB45, 'X'), (0xFB46, 'M', u'צּ'), (0xFB47, 'M', u'קּ'), (0xFB48, 'M', u'רּ'), (0xFB49, 'M', u'שּ'), (0xFB4A, 'M', u'תּ'), (0xFB4B, 'M', u'וֹ'), (0xFB4C, 'M', u'בֿ'), (0xFB4D, 'M', u'כֿ'), (0xFB4E, 'M', u'פֿ'), (0xFB4F, 'M', u'אל'), (0xFB50, 'M', u'ٱ'), (0xFB52, 'M', u'ٻ'), (0xFB56, 'M', u'پ'), (0xFB5A, 'M', u'ڀ'), (0xFB5E, 'M', u'ٺ'), (0xFB62, 'M', u'ٿ'), (0xFB66, 'M', u'ٹ'), (0xFB6A, 'M', u'ڤ'), (0xFB6E, 'M', u'ڦ'), (0xFB72, 'M', u'ڄ'), (0xFB76, 'M', u'ڃ'), (0xFB7A, 'M', u'چ'), (0xFB7E, 'M', u'ڇ'), (0xFB82, 'M', u'ڍ'), (0xFB84, 'M', u'ڌ'), (0xFB86, 'M', u'ڎ'), (0xFB88, 'M', u'ڈ'), (0xFB8A, 'M', u'ژ'), (0xFB8C, 'M', u'ڑ'), (0xFB8E, 'M', u'ک'), (0xFB92, 'M', u'گ'), (0xFB96, 'M', u'ڳ'), (0xFB9A, 'M', u'ڱ'), (0xFB9E, 'M', u'ں'), (0xFBA0, 'M', u'ڻ'), (0xFBA4, 'M', u'ۀ'), (0xFBA6, 'M', u'ہ'), (0xFBAA, 'M', u'ھ'), (0xFBAE, 'M', u'ے'), (0xFBB0, 'M', u'ۓ'), (0xFBB2, 'V'), (0xFBC2, 'X'), (0xFBD3, 'M', u'ڭ'), (0xFBD7, 'M', u'ۇ'), (0xFBD9, 'M', u'ۆ'), (0xFBDB, 'M', u'ۈ'), (0xFBDD, 'M', u'ۇٴ'), (0xFBDE, 'M', u'ۋ'), (0xFBE0, 'M', u'ۅ'), (0xFBE2, 'M', u'ۉ'), (0xFBE4, 'M', u'ې'), (0xFBE8, 'M', u'ى'), (0xFBEA, 'M', u'ئا'), (0xFBEC, 'M', u'ئە'), (0xFBEE, 'M', u'ئو'), (0xFBF0, 'M', u'ئۇ'), (0xFBF2, 'M', u'ئۆ'), ] def _seg_45(): return [ (0xFBF4, 'M', u'ئۈ'), (0xFBF6, 'M', u'ئې'), (0xFBF9, 'M', u'ئى'), (0xFBFC, 'M', u'ی'), (0xFC00, 'M', u'ئج'), (0xFC01, 'M', u'ئح'), (0xFC02, 'M', u'ئم'), (0xFC03, 'M', u'ئى'), (0xFC04, 'M', u'ئي'), (0xFC05, 'M', u'بج'), (0xFC06, 'M', u'بح'), (0xFC07, 'M', u'بخ'), (0xFC08, 'M', u'بم'), (0xFC09, 'M', u'بى'), (0xFC0A, 'M', u'بي'), (0xFC0B, 'M', u'تج'), (0xFC0C, 'M', u'تح'), (0xFC0D, 'M', u'تخ'), (0xFC0E, 'M', u'تم'), (0xFC0F, 'M', u'تى'), (0xFC10, 'M', u'تي'), (0xFC11, 'M', u'ثج'), (0xFC12, 'M', u'ثم'), (0xFC13, 'M', u'ثى'), (0xFC14, 'M', u'ثي'), (0xFC15, 'M', u'جح'), (0xFC16, 'M', u'جم'), (0xFC17, 'M', u'حج'), (0xFC18, 'M', u'حم'), (0xFC19, 'M', u'خج'), (0xFC1A, 'M', u'خح'), (0xFC1B, 'M', u'خم'), (0xFC1C, 'M', u'سج'), (0xFC1D, 'M', u'سح'), (0xFC1E, 'M', u'سخ'), (0xFC1F, 'M', u'سم'), (0xFC20, 'M', u'صح'), (0xFC21, 'M', u'صم'), (0xFC22, 'M', u'ضج'), (0xFC23, 'M', u'ضح'), (0xFC24, 'M', u'ضخ'), (0xFC25, 'M', u'ضم'), (0xFC26, 'M', u'طح'), (0xFC27, 'M', u'طم'), (0xFC28, 'M', u'ظم'), (0xFC29, 'M', u'عج'), (0xFC2A, 'M', u'عم'), (0xFC2B, 'M', u'غج'), (0xFC2C, 'M', u'غم'), (0xFC2D, 'M', u'فج'), (0xFC2E, 'M', u'فح'), (0xFC2F, 'M', u'فخ'), (0xFC30, 'M', u'فم'), (0xFC31, 'M', u'فى'), (0xFC32, 'M', u'في'), (0xFC33, 'M', u'قح'), (0xFC34, 'M', u'قم'), (0xFC35, 'M', u'قى'), (0xFC36, 'M', u'قي'), (0xFC37, 'M', u'كا'), (0xFC38, 'M', u'كج'), (0xFC39, 'M', u'كح'), (0xFC3A, 'M', u'كخ'), (0xFC3B, 'M', u'كل'), (0xFC3C, 'M', u'كم'), (0xFC3D, 'M', u'كى'), (0xFC3E, 'M', u'كي'), (0xFC3F, 'M', u'لج'), (0xFC40, 'M', u'لح'), (0xFC41, 'M', u'لخ'), (0xFC42, 'M', u'لم'), (0xFC43, 'M', u'لى'), (0xFC44, 'M', u'لي'), (0xFC45, 'M', u'مج'), (0xFC46, 'M', u'مح'), (0xFC47, 'M', u'مخ'), (0xFC48, 'M', u'مم'), (0xFC49, 'M', u'مى'), (0xFC4A, 'M', u'مي'), (0xFC4B, 'M', u'نج'), (0xFC4C, 'M', u'نح'), (0xFC4D, 'M', u'نخ'), (0xFC4E, 'M', u'نم'), (0xFC4F, 'M', u'نى'), (0xFC50, 'M', u'ني'), (0xFC51, 'M', u'هج'), (0xFC52, 'M', u'هم'), (0xFC53, 'M', u'هى'), (0xFC54, 'M', u'هي'), (0xFC55, 'M', u'يج'), (0xFC56, 'M', u'يح'), (0xFC57, 'M', u'يخ'), (0xFC58, 'M', u'يم'), (0xFC59, 'M', u'يى'), (0xFC5A, 'M', u'يي'), (0xFC5B, 'M', u'ذٰ'), (0xFC5C, 'M', u'رٰ'), (0xFC5D, 'M', u'ىٰ'), (0xFC5E, '3', u' ٌّ'), (0xFC5F, '3', u' ٍّ'), ] def _seg_46(): return [ (0xFC60, '3', u' َّ'), (0xFC61, '3', u' ُّ'), (0xFC62, '3', u' ِّ'), (0xFC63, '3', u' ّٰ'), (0xFC64, 'M', u'ئر'), (0xFC65, 'M', u'ئز'), (0xFC66, 'M', u'ئم'), (0xFC67, 'M', u'ئن'), (0xFC68, 'M', u'ئى'), (0xFC69, 'M', u'ئي'), (0xFC6A, 'M', u'بر'), (0xFC6B, 'M', u'بز'), (0xFC6C, 'M', u'بم'), (0xFC6D, 'M', u'بن'), (0xFC6E, 'M', u'بى'), (0xFC6F, 'M', u'بي'), (0xFC70, 'M', u'تر'), (0xFC71, 'M', u'تز'), (0xFC72, 'M', u'تم'), (0xFC73, 'M', u'تن'), (0xFC74, 'M', u'تى'), (0xFC75, 'M', u'تي'), (0xFC76, 'M', u'ثر'), (0xFC77, 'M', u'ثز'), (0xFC78, 'M', u'ثم'), (0xFC79, 'M', u'ثن'), (0xFC7A, 'M', u'ثى'), (0xFC7B, 'M', u'ثي'), (0xFC7C, 'M', u'فى'), (0xFC7D, 'M', u'في'), (0xFC7E, 'M', u'قى'), (0xFC7F, 'M', u'قي'), (0xFC80, 'M', u'كا'), (0xFC81, 'M', u'كل'), (0xFC82, 'M', u'كم'), (0xFC83, 'M', u'كى'), (0xFC84, 'M', u'كي'), (0xFC85, 'M', u'لم'), (0xFC86, 'M', u'لى'), (0xFC87, 'M', u'لي'), (0xFC88, 'M', u'ما'), (0xFC89, 'M', u'مم'), (0xFC8A, 'M', u'نر'), (0xFC8B, 'M', u'نز'), (0xFC8C, 'M', u'نم'), (0xFC8D, 'M', u'نن'), (0xFC8E, 'M', u'نى'), (0xFC8F, 'M', u'ني'), (0xFC90, 'M', u'ىٰ'), (0xFC91, 'M', u'ير'), (0xFC92, 'M', u'يز'), (0xFC93, 'M', u'يم'), (0xFC94, 'M', u'ين'), (0xFC95, 'M', u'يى'), (0xFC96, 'M', u'يي'), (0xFC97, 'M', u'ئج'), (0xFC98, 'M', u'ئح'), (0xFC99, 'M', u'ئخ'), (0xFC9A, 'M', u'ئم'), (0xFC9B, 'M', u'ئه'), (0xFC9C, 'M', u'بج'), (0xFC9D, 'M', u'بح'), (0xFC9E, 'M', u'بخ'), (0xFC9F, 'M', u'بم'), (0xFCA0, 'M', u'به'), (0xFCA1, 'M', u'تج'), (0xFCA2, 'M', u'تح'), (0xFCA3, 'M', u'تخ'), (0xFCA4, 'M', u'تم'), (0xFCA5, 'M', u'ته'), (0xFCA6, 'M', u'ثم'), (0xFCA7, 'M', u'جح'), (0xFCA8, 'M', u'جم'), (0xFCA9, 'M', u'حج'), (0xFCAA, 'M', u'حم'), (0xFCAB, 'M', u'خج'), (0xFCAC, 'M', u'خم'), (0xFCAD, 'M', u'سج'), (0xFCAE, 'M', u'سح'), (0xFCAF, 'M', u'سخ'), (0xFCB0, 'M', u'سم'), (0xFCB1, 'M', u'صح'), (0xFCB2, 'M', u'صخ'), (0xFCB3, 'M', u'صم'), (0xFCB4, 'M', u'ضج'), (0xFCB5, 'M', u'ضح'), (0xFCB6, 'M', u'ضخ'), (0xFCB7, 'M', u'ضم'), (0xFCB8, 'M', u'طح'), (0xFCB9, 'M', u'ظم'), (0xFCBA, 'M', u'عج'), (0xFCBB, 'M', u'عم'), (0xFCBC, 'M', u'غج'), (0xFCBD, 'M', u'غم'), (0xFCBE, 'M', u'فج'), (0xFCBF, 'M', u'فح'), (0xFCC0, 'M', u'فخ'), (0xFCC1, 'M', u'فم'), (0xFCC2, 'M', u'قح'), (0xFCC3, 'M', u'قم'), ] def _seg_47(): return [ (0xFCC4, 'M', u'كج'), (0xFCC5, 'M', u'كح'), (0xFCC6, 'M', u'كخ'), (0xFCC7, 'M', u'كل'), (0xFCC8, 'M', u'كم'), (0xFCC9, 'M', u'لج'), (0xFCCA, 'M', u'لح'), (0xFCCB, 'M', u'لخ'), (0xFCCC, 'M', u'لم'), (0xFCCD, 'M', u'له'), (0xFCCE, 'M', u'مج'), (0xFCCF, 'M', u'مح'), (0xFCD0, 'M', u'مخ'), (0xFCD1, 'M', u'مم'), (0xFCD2, 'M', u'نج'), (0xFCD3, 'M', u'نح'), (0xFCD4, 'M', u'نخ'), (0xFCD5, 'M', u'نم'), (0xFCD6, 'M', u'نه'), (0xFCD7, 'M', u'هج'), (0xFCD8, 'M', u'هم'), (0xFCD9, 'M', u'هٰ'), (0xFCDA, 'M', u'يج'), (0xFCDB, 'M', u'يح'), (0xFCDC, 'M', u'يخ'), (0xFCDD, 'M', u'يم'), (0xFCDE, 'M', u'يه'), (0xFCDF, 'M', u'ئم'), (0xFCE0, 'M', u'ئه'), (0xFCE1, 'M', u'بم'), (0xFCE2, 'M', u'به'), (0xFCE3, 'M', u'تم'), (0xFCE4, 'M', u'ته'), (0xFCE5, 'M', u'ثم'), (0xFCE6, 'M', u'ثه'), (0xFCE7, 'M', u'سم'), (0xFCE8, 'M', u'سه'), (0xFCE9, 'M', u'شم'), (0xFCEA, 'M', u'شه'), (0xFCEB, 'M', u'كل'), (0xFCEC, 'M', u'كم'), (0xFCED, 'M', u'لم'), (0xFCEE, 'M', u'نم'), (0xFCEF, 'M', u'نه'), (0xFCF0, 'M', u'يم'), (0xFCF1, 'M', u'يه'), (0xFCF2, 'M', u'ـَّ'), (0xFCF3, 'M', u'ـُّ'), (0xFCF4, 'M', u'ـِّ'), (0xFCF5, 'M', u'طى'), (0xFCF6, 'M', u'طي'), (0xFCF7, 'M', u'عى'), (0xFCF8, 'M', u'عي'), (0xFCF9, 'M', u'غى'), (0xFCFA, 'M', u'غي'), (0xFCFB, 'M', u'سى'), (0xFCFC, 'M', u'سي'), (0xFCFD, 'M', u'شى'), (0xFCFE, 'M', u'شي'), (0xFCFF, 'M', u'حى'), (0xFD00, 'M', u'حي'), (0xFD01, 'M', u'جى'), (0xFD02, 'M', u'جي'), (0xFD03, 'M', u'خى'), (0xFD04, 'M', u'خي'), (0xFD05, 'M', u'صى'), (0xFD06, 'M', u'صي'), (0xFD07, 'M', u'ضى'), (0xFD08, 'M', u'ضي'), (0xFD09, 'M', u'شج'), (0xFD0A, 'M', u'شح'), (0xFD0B, 'M', u'شخ'), (0xFD0C, 'M', u'شم'), (0xFD0D, 'M', u'شر'), (0xFD0E, 'M', u'سر'), (0xFD0F, 'M', u'صر'), (0xFD10, 'M', u'ضر'), (0xFD11, 'M', u'طى'), (0xFD12, 'M', u'طي'), (0xFD13, 'M', u'عى'), (0xFD14, 'M', u'عي'), (0xFD15, 'M', u'غى'), (0xFD16, 'M', u'غي'), (0xFD17, 'M', u'سى'), (0xFD18, 'M', u'سي'), (0xFD19, 'M', u'شى'), (0xFD1A, 'M', u'شي'), (0xFD1B, 'M', u'حى'), (0xFD1C, 'M', u'حي'), (0xFD1D, 'M', u'جى'), (0xFD1E, 'M', u'جي'), (0xFD1F, 'M', u'خى'), (0xFD20, 'M', u'خي'), (0xFD21, 'M', u'صى'), (0xFD22, 'M', u'صي'), (0xFD23, 'M', u'ضى'), (0xFD24, 'M', u'ضي'), (0xFD25, 'M', u'شج'), (0xFD26, 'M', u'شح'), (0xFD27, 'M', u'شخ'), ] def _seg_48(): return [ (0xFD28, 'M', u'شم'), (0xFD29, 'M', u'شر'), (0xFD2A, 'M', u'سر'), (0xFD2B, 'M', u'صر'), (0xFD2C, 'M', u'ضر'), (0xFD2D, 'M', u'شج'), (0xFD2E, 'M', u'شح'), (0xFD2F, 'M', u'شخ'), (0xFD30, 'M', u'شم'), (0xFD31, 'M', u'سه'), (0xFD32, 'M', u'شه'), (0xFD33, 'M', u'طم'), (0xFD34, 'M', u'سج'), (0xFD35, 'M', u'سح'), (0xFD36, 'M', u'سخ'), (0xFD37, 'M', u'شج'), (0xFD38, 'M', u'شح'), (0xFD39, 'M', u'شخ'), (0xFD3A, 'M', u'طم'), (0xFD3B, 'M', u'ظم'), (0xFD3C, 'M', u'اً'), (0xFD3E, 'V'), (0xFD40, 'X'), (0xFD50, 'M', u'تجم'), (0xFD51, 'M', u'تحج'), (0xFD53, 'M', u'تحم'), (0xFD54, 'M', u'تخم'), (0xFD55, 'M', u'تمج'), (0xFD56, 'M', u'تمح'), (0xFD57, 'M', u'تمخ'), (0xFD58, 'M', u'جمح'), (0xFD5A, 'M', u'حمي'), (0xFD5B, 'M', u'حمى'), (0xFD5C, 'M', u'سحج'), (0xFD5D, 'M', u'سجح'), (0xFD5E, 'M', u'سجى'), (0xFD5F, 'M', u'سمح'), (0xFD61, 'M', u'سمج'), (0xFD62, 'M', u'سمم'), (0xFD64, 'M', u'صحح'), (0xFD66, 'M', u'صمم'), (0xFD67, 'M', u'شحم'), (0xFD69, 'M', u'شجي'), (0xFD6A, 'M', u'شمخ'), (0xFD6C, 'M', u'شمم'), (0xFD6E, 'M', u'ضحى'), (0xFD6F, 'M', u'ضخم'), (0xFD71, 'M', u'طمح'), (0xFD73, 'M', u'طمم'), (0xFD74, 'M', u'طمي'), (0xFD75, 'M', u'عجم'), (0xFD76, 'M', u'عمم'), (0xFD78, 'M', u'عمى'), (0xFD79, 'M', u'غمم'), (0xFD7A, 'M', u'غمي'), (0xFD7B, 'M', u'غمى'), (0xFD7C, 'M', u'فخم'), (0xFD7E, 'M', u'قمح'), (0xFD7F, 'M', u'قمم'), (0xFD80, 'M', u'لحم'), (0xFD81, 'M', u'لحي'), (0xFD82, 'M', u'لحى'), (0xFD83, 'M', u'لجج'), (0xFD85, 'M', u'لخم'), (0xFD87, 'M', u'لمح'), (0xFD89, 'M', u'محج'), (0xFD8A, 'M', u'محم'), (0xFD8B, 'M', u'محي'), (0xFD8C, 'M', u'مجح'), (0xFD8D, 'M', u'مجم'), (0xFD8E, 'M', u'مخج'), (0xFD8F, 'M', u'مخم'), (0xFD90, 'X'), (0xFD92, 'M', u'مجخ'), (0xFD93, 'M', u'همج'), (0xFD94, 'M', u'همم'), (0xFD95, 'M', u'نحم'), (0xFD96, 'M', u'نحى'), (0xFD97, 'M', u'نجم'), (0xFD99, 'M', u'نجى'), (0xFD9A, 'M', u'نمي'), (0xFD9B, 'M', u'نمى'), (0xFD9C, 'M', u'يمم'), (0xFD9E, 'M', u'بخي'), (0xFD9F, 'M', u'تجي'), (0xFDA0, 'M', u'تجى'), (0xFDA1, 'M', u'تخي'), (0xFDA2, 'M', u'تخى'), (0xFDA3, 'M', u'تمي'), (0xFDA4, 'M', u'تمى'), (0xFDA5, 'M', u'جمي'), (0xFDA6, 'M', u'جحى'), (0xFDA7, 'M', u'جمى'), (0xFDA8, 'M', u'سخى'), (0xFDA9, 'M', u'صحي'), (0xFDAA, 'M', u'شحي'), (0xFDAB, 'M', u'ضحي'), (0xFDAC, 'M', u'لجي'), (0xFDAD, 'M', u'لمي'), (0xFDAE, 'M', u'يحي'), ] def _seg_49(): return [ (0xFDAF, 'M', u'يجي'), (0xFDB0, 'M', u'يمي'), (0xFDB1, 'M', u'ممي'), (0xFDB2, 'M', u'قمي'), (0xFDB3, 'M', u'نحي'), (0xFDB4, 'M', u'قمح'), (0xFDB5, 'M', u'لحم'), (0xFDB6, 'M', u'عمي'), (0xFDB7, 'M', u'كمي'), (0xFDB8, 'M', u'نجح'), (0xFDB9, 'M', u'مخي'), (0xFDBA, 'M', u'لجم'), (0xFDBB, 'M', u'كمم'), (0xFDBC, 'M', u'لجم'), (0xFDBD, 'M', u'نجح'), (0xFDBE, 'M', u'جحي'), (0xFDBF, 'M', u'حجي'), (0xFDC0, 'M', u'مجي'), (0xFDC1, 'M', u'فمي'), (0xFDC2, 'M', u'بحي'), (0xFDC3, 'M', u'كمم'), (0xFDC4, 'M', u'عجم'), (0xFDC5, 'M', u'صمم'), (0xFDC6, 'M', u'سخي'), (0xFDC7, 'M', u'نجي'), (0xFDC8, 'X'), (0xFDF0, 'M', u'صلے'), (0xFDF1, 'M', u'قلے'), (0xFDF2, 'M', u'الله'), (0xFDF3, 'M', u'اكبر'), (0xFDF4, 'M', u'محمد'), (0xFDF5, 'M', u'صلعم'), (0xFDF6, 'M', u'رسول'), (0xFDF7, 'M', u'عليه'), (0xFDF8, 'M', u'وسلم'), (0xFDF9, 'M', u'صلى'), (0xFDFA, '3', u'صلى الله عليه وسلم'), (0xFDFB, '3', u'جل جلاله'), (0xFDFC, 'M', u'ریال'), (0xFDFD, 'V'), (0xFDFE, 'X'), (0xFE00, 'I'), (0xFE10, '3', u','), (0xFE11, 'M', u'、'), (0xFE12, 'X'), (0xFE13, '3', u':'), (0xFE14, '3', u';'), (0xFE15, '3', u'!'), (0xFE16, '3', u'?'), (0xFE17, 'M', u'〖'), (0xFE18, 'M', u'〗'), (0xFE19, 'X'), (0xFE20, 'V'), (0xFE30, 'X'), (0xFE31, 'M', u'—'), (0xFE32, 'M', u'–'), (0xFE33, '3', u'_'), (0xFE35, '3', u'('), (0xFE36, '3', u')'), (0xFE37, '3', u'{'), (0xFE38, '3', u'}'), (0xFE39, 'M', u'〔'), (0xFE3A, 'M', u'〕'), (0xFE3B, 'M', u'【'), (0xFE3C, 'M', u'】'), (0xFE3D, 'M', u'《'), (0xFE3E, 'M', u'》'), (0xFE3F, 'M', u'〈'), (0xFE40, 'M', u'〉'), (0xFE41, 'M', u'「'), (0xFE42, 'M', u'」'), (0xFE43, 'M', u'『'), (0xFE44, 'M', u'』'), (0xFE45, 'V'), (0xFE47, '3', u'['), (0xFE48, '3', u']'), (0xFE49, '3', u' ̅'), (0xFE4D, '3', u'_'), (0xFE50, '3', u','), (0xFE51, 'M', u'、'), (0xFE52, 'X'), (0xFE54, '3', u';'), (0xFE55, '3', u':'), (0xFE56, '3', u'?'), (0xFE57, '3', u'!'), (0xFE58, 'M', u'—'), (0xFE59, '3', u'('), (0xFE5A, '3', u')'), (0xFE5B, '3', u'{'), (0xFE5C, '3', u'}'), (0xFE5D, 'M', u'〔'), (0xFE5E, 'M', u'〕'), (0xFE5F, '3', u'#'), (0xFE60, '3', u'&'), (0xFE61, '3', u'*'), (0xFE62, '3', u'+'), (0xFE63, 'M', u'-'), (0xFE64, '3', u'<'), (0xFE65, '3', u'>'), (0xFE66, '3', u'='), ] def _seg_50(): return [ (0xFE67, 'X'), (0xFE68, '3', u'\\'), (0xFE69, '3', u'$'), (0xFE6A, '3', u'%'), (0xFE6B, '3', u'@'), (0xFE6C, 'X'), (0xFE70, '3', u' ً'), (0xFE71, 'M', u'ـً'), (0xFE72, '3', u' ٌ'), (0xFE73, 'V'), (0xFE74, '3', u' ٍ'), (0xFE75, 'X'), (0xFE76, '3', u' َ'), (0xFE77, 'M', u'ـَ'), (0xFE78, '3', u' ُ'), (0xFE79, 'M', u'ـُ'), (0xFE7A, '3', u' ِ'), (0xFE7B, 'M', u'ـِ'), (0xFE7C, '3', u' ّ'), (0xFE7D, 'M', u'ـّ'), (0xFE7E, '3', u' ْ'), (0xFE7F, 'M', u'ـْ'), (0xFE80, 'M', u'ء'), (0xFE81, 'M', u'آ'), (0xFE83, 'M', u'أ'), (0xFE85, 'M', u'ؤ'), (0xFE87, 'M', u'إ'), (0xFE89, 'M', u'ئ'), (0xFE8D, 'M', u'ا'), (0xFE8F, 'M', u'ب'), (0xFE93, 'M', u'ة'), (0xFE95, 'M', u'ت'), (0xFE99, 'M', u'ث'), (0xFE9D, 'M', u'ج'), (0xFEA1, 'M', u'ح'), (0xFEA5, 'M', u'خ'), (0xFEA9, 'M', u'د'), (0xFEAB, 'M', u'ذ'), (0xFEAD, 'M', u'ر'), (0xFEAF, 'M', u'ز'), (0xFEB1, 'M', u'س'), (0xFEB5, 'M', u'ش'), (0xFEB9, 'M', u'ص'), (0xFEBD, 'M', u'ض'), (0xFEC1, 'M', u'ط'), (0xFEC5, 'M', u'ظ'), (0xFEC9, 'M', u'ع'), (0xFECD, 'M', u'غ'), (0xFED1, 'M', u'ف'), (0xFED5, 'M', u'ق'), (0xFED9, 'M', u'ك'), (0xFEDD, 'M', u'ل'), (0xFEE1, 'M', u'م'), (0xFEE5, 'M', u'ن'), (0xFEE9, 'M', u'ه'), (0xFEED, 'M', u'و'), (0xFEEF, 'M', u'ى'), (0xFEF1, 'M', u'ي'), (0xFEF5, 'M', u'لآ'), (0xFEF7, 'M', u'لأ'), (0xFEF9, 'M', u'لإ'), (0xFEFB, 'M', u'لا'), (0xFEFD, 'X'), (0xFEFF, 'I'), (0xFF00, 'X'), (0xFF01, '3', u'!'), (0xFF02, '3', u'"'), (0xFF03, '3', u'#'), (0xFF04, '3', u'$'), (0xFF05, '3', u'%'), (0xFF06, '3', u'&'), (0xFF07, '3', u'\''), (0xFF08, '3', u'('), (0xFF09, '3', u')'), (0xFF0A, '3', u'*'), (0xFF0B, '3', u'+'), (0xFF0C, '3', u','), (0xFF0D, 'M', u'-'), (0xFF0E, 'M', u'.'), (0xFF0F, '3', u'/'), (0xFF10, 'M', u'0'), (0xFF11, 'M', u'1'), (0xFF12, 'M', u'2'), (0xFF13, 'M', u'3'), (0xFF14, 'M', u'4'), (0xFF15, 'M', u'5'), (0xFF16, 'M', u'6'), (0xFF17, 'M', u'7'), (0xFF18, 'M', u'8'), (0xFF19, 'M', u'9'), (0xFF1A, '3', u':'), (0xFF1B, '3', u';'), (0xFF1C, '3', u'<'), (0xFF1D, '3', u'='), (0xFF1E, '3', u'>'), (0xFF1F, '3', u'?'), (0xFF20, '3', u'@'), (0xFF21, 'M', u'a'), (0xFF22, 'M', u'b'), (0xFF23, 'M', u'c'), ] def _seg_51(): return [ (0xFF24, 'M', u'd'), (0xFF25, 'M', u'e'), (0xFF26, 'M', u'f'), (0xFF27, 'M', u'g'), (0xFF28, 'M', u'h'), (0xFF29, 'M', u'i'), (0xFF2A, 'M', u'j'), (0xFF2B, 'M', u'k'), (0xFF2C, 'M', u'l'), (0xFF2D, 'M', u'm'), (0xFF2E, 'M', u'n'), (0xFF2F, 'M', u'o'), (0xFF30, 'M', u'p'), (0xFF31, 'M', u'q'), (0xFF32, 'M', u'r'), (0xFF33, 'M', u's'), (0xFF34, 'M', u't'), (0xFF35, 'M', u'u'), (0xFF36, 'M', u'v'), (0xFF37, 'M', u'w'), (0xFF38, 'M', u'x'), (0xFF39, 'M', u'y'), (0xFF3A, 'M', u'z'), (0xFF3B, '3', u'['), (0xFF3C, '3', u'\\'), (0xFF3D, '3', u']'), (0xFF3E, '3', u'^'), (0xFF3F, '3', u'_'), (0xFF40, '3', u'`'), (0xFF41, 'M', u'a'), (0xFF42, 'M', u'b'), (0xFF43, 'M', u'c'), (0xFF44, 'M', u'd'), (0xFF45, 'M', u'e'), (0xFF46, 'M', u'f'), (0xFF47, 'M', u'g'), (0xFF48, 'M', u'h'), (0xFF49, 'M', u'i'), (0xFF4A, 'M', u'j'), (0xFF4B, 'M', u'k'), (0xFF4C, 'M', u'l'), (0xFF4D, 'M', u'm'), (0xFF4E, 'M', u'n'), (0xFF4F, 'M', u'o'), (0xFF50, 'M', u'p'), (0xFF51, 'M', u'q'), (0xFF52, 'M', u'r'), (0xFF53, 'M', u's'), (0xFF54, 'M', u't'), (0xFF55, 'M', u'u'), (0xFF56, 'M', u'v'), (0xFF57, 'M', u'w'), (0xFF58, 'M', u'x'), (0xFF59, 'M', u'y'), (0xFF5A, 'M', u'z'), (0xFF5B, '3', u'{'), (0xFF5C, '3', u'|'), (0xFF5D, '3', u'}'), (0xFF5E, '3', u'~'), (0xFF5F, 'M', u'⦅'), (0xFF60, 'M', u'⦆'), (0xFF61, 'M', u'.'), (0xFF62, 'M', u'「'), (0xFF63, 'M', u'」'), (0xFF64, 'M', u'、'), (0xFF65, 'M', u'・'), (0xFF66, 'M', u'ヲ'), (0xFF67, 'M', u'ァ'), (0xFF68, 'M', u'ィ'), (0xFF69, 'M', u'ゥ'), (0xFF6A, 'M', u'ェ'), (0xFF6B, 'M', u'ォ'), (0xFF6C, 'M', u'ャ'), (0xFF6D, 'M', u'ュ'), (0xFF6E, 'M', u'ョ'), (0xFF6F, 'M', u'ッ'), (0xFF70, 'M', u'ー'), (0xFF71, 'M', u'ア'), (0xFF72, 'M', u'イ'), (0xFF73, 'M', u'ウ'), (0xFF74, 'M', u'エ'), (0xFF75, 'M', u'オ'), (0xFF76, 'M', u'カ'), (0xFF77, 'M', u'キ'), (0xFF78, 'M', u'ク'), (0xFF79, 'M', u'ケ'), (0xFF7A, 'M', u'コ'), (0xFF7B, 'M', u'サ'), (0xFF7C, 'M', u'シ'), (0xFF7D, 'M', u'ス'), (0xFF7E, 'M', u'セ'), (0xFF7F, 'M', u'ソ'), (0xFF80, 'M', u'タ'), (0xFF81, 'M', u'チ'), (0xFF82, 'M', u'ツ'), (0xFF83, 'M', u'テ'), (0xFF84, 'M', u'ト'), (0xFF85, 'M', u'ナ'), (0xFF86, 'M', u'ニ'), (0xFF87, 'M', u'ヌ'), ] def _seg_52(): return [ (0xFF88, 'M', u'ネ'), (0xFF89, 'M', u'ノ'), (0xFF8A, 'M', u'ハ'), (0xFF8B, 'M', u'ヒ'), (0xFF8C, 'M', u'フ'), (0xFF8D, 'M', u'ヘ'), (0xFF8E, 'M', u'ホ'), (0xFF8F, 'M', u'マ'), (0xFF90, 'M', u'ミ'), (0xFF91, 'M', u'ム'), (0xFF92, 'M', u'メ'), (0xFF93, 'M', u'モ'), (0xFF94, 'M', u'ヤ'), (0xFF95, 'M', u'ユ'), (0xFF96, 'M', u'ヨ'), (0xFF97, 'M', u'ラ'), (0xFF98, 'M', u'リ'), (0xFF99, 'M', u'ル'), (0xFF9A, 'M', u'レ'), (0xFF9B, 'M', u'ロ'), (0xFF9C, 'M', u'ワ'), (0xFF9D, 'M', u'ン'), (0xFF9E, 'M', u'゙'), (0xFF9F, 'M', u'゚'), (0xFFA0, 'X'), (0xFFA1, 'M', u'ᄀ'), (0xFFA2, 'M', u'ᄁ'), (0xFFA3, 'M', u'ᆪ'), (0xFFA4, 'M', u'ᄂ'), (0xFFA5, 'M', u'ᆬ'), (0xFFA6, 'M', u'ᆭ'), (0xFFA7, 'M', u'ᄃ'), (0xFFA8, 'M', u'ᄄ'), (0xFFA9, 'M', u'ᄅ'), (0xFFAA, 'M', u'ᆰ'), (0xFFAB, 'M', u'ᆱ'), (0xFFAC, 'M', u'ᆲ'), (0xFFAD, 'M', u'ᆳ'), (0xFFAE, 'M', u'ᆴ'), (0xFFAF, 'M', u'ᆵ'), (0xFFB0, 'M', u'ᄚ'), (0xFFB1, 'M', u'ᄆ'), (0xFFB2, 'M', u'ᄇ'), (0xFFB3, 'M', u'ᄈ'), (0xFFB4, 'M', u'ᄡ'), (0xFFB5, 'M', u'ᄉ'), (0xFFB6, 'M', u'ᄊ'), (0xFFB7, 'M', u'ᄋ'), (0xFFB8, 'M', u'ᄌ'), (0xFFB9, 'M', u'ᄍ'), (0xFFBA, 'M', u'ᄎ'), (0xFFBB, 'M', u'ᄏ'), (0xFFBC, 'M', u'ᄐ'), (0xFFBD, 'M', u'ᄑ'), (0xFFBE, 'M', u'ᄒ'), (0xFFBF, 'X'), (0xFFC2, 'M', u'ᅡ'), (0xFFC3, 'M', u'ᅢ'), (0xFFC4, 'M', u'ᅣ'), (0xFFC5, 'M', u'ᅤ'), (0xFFC6, 'M', u'ᅥ'), (0xFFC7, 'M', u'ᅦ'), (0xFFC8, 'X'), (0xFFCA, 'M', u'ᅧ'), (0xFFCB, 'M', u'ᅨ'), (0xFFCC, 'M', u'ᅩ'), (0xFFCD, 'M', u'ᅪ'), (0xFFCE, 'M', u'ᅫ'), (0xFFCF, 'M', u'ᅬ'), (0xFFD0, 'X'), (0xFFD2, 'M', u'ᅭ'), (0xFFD3, 'M', u'ᅮ'), (0xFFD4, 'M', u'ᅯ'), (0xFFD5, 'M', u'ᅰ'), (0xFFD6, 'M', u'ᅱ'), (0xFFD7, 'M', u'ᅲ'), (0xFFD8, 'X'), (0xFFDA, 'M', u'ᅳ'), (0xFFDB, 'M', u'ᅴ'), (0xFFDC, 'M', u'ᅵ'), (0xFFDD, 'X'), (0xFFE0, 'M', u'¢'), (0xFFE1, 'M', u'£'), (0xFFE2, 'M', u'¬'), (0xFFE3, '3', u' ̄'), (0xFFE4, 'M', u'¦'), (0xFFE5, 'M', u'¥'), (0xFFE6, 'M', u'₩'), (0xFFE7, 'X'), (0xFFE8, 'M', u'│'), (0xFFE9, 'M', u'←'), (0xFFEA, 'M', u'↑'), (0xFFEB, 'M', u'→'), (0xFFEC, 'M', u'↓'), (0xFFED, 'M', u'■'), (0xFFEE, 'M', u'○'), (0xFFEF, 'X'), (0x10000, 'V'), (0x1000C, 'X'), (0x1000D, 'V'), ] def _seg_53(): return [ (0x10027, 'X'), (0x10028, 'V'), (0x1003B, 'X'), (0x1003C, 'V'), (0x1003E, 'X'), (0x1003F, 'V'), (0x1004E, 'X'), (0x10050, 'V'), (0x1005E, 'X'), (0x10080, 'V'), (0x100FB, 'X'), (0x10100, 'V'), (0x10103, 'X'), (0x10107, 'V'), (0x10134, 'X'), (0x10137, 'V'), (0x1018F, 'X'), (0x10190, 'V'), (0x1019D, 'X'), (0x101A0, 'V'), (0x101A1, 'X'), (0x101D0, 'V'), (0x101FE, 'X'), (0x10280, 'V'), (0x1029D, 'X'), (0x102A0, 'V'), (0x102D1, 'X'), (0x102E0, 'V'), (0x102FC, 'X'), (0x10300, 'V'), (0x10324, 'X'), (0x1032D, 'V'), (0x1034B, 'X'), (0x10350, 'V'), (0x1037B, 'X'), (0x10380, 'V'), (0x1039E, 'X'), (0x1039F, 'V'), (0x103C4, 'X'), (0x103C8, 'V'), (0x103D6, 'X'), (0x10400, 'M', u'𐐨'), (0x10401, 'M', u'𐐩'), (0x10402, 'M', u'𐐪'), (0x10403, 'M', u'𐐫'), (0x10404, 'M', u'𐐬'), (0x10405, 'M', u'𐐭'), (0x10406, 'M', u'𐐮'), (0x10407, 'M', u'𐐯'), (0x10408, 'M', u'𐐰'), (0x10409, 'M', u'𐐱'), (0x1040A, 'M', u'𐐲'), (0x1040B, 'M', u'𐐳'), (0x1040C, 'M', u'𐐴'), (0x1040D, 'M', u'𐐵'), (0x1040E, 'M', u'𐐶'), (0x1040F, 'M', u'𐐷'), (0x10410, 'M', u'𐐸'), (0x10411, 'M', u'𐐹'), (0x10412, 'M', u'𐐺'), (0x10413, 'M', u'𐐻'), (0x10414, 'M', u'𐐼'), (0x10415, 'M', u'𐐽'), (0x10416, 'M', u'𐐾'), (0x10417, 'M', u'𐐿'), (0x10418, 'M', u'𐑀'), (0x10419, 'M', u'𐑁'), (0x1041A, 'M', u'𐑂'), (0x1041B, 'M', u'𐑃'), (0x1041C, 'M', u'𐑄'), (0x1041D, 'M', u'𐑅'), (0x1041E, 'M', u'𐑆'), (0x1041F, 'M', u'𐑇'), (0x10420, 'M', u'𐑈'), (0x10421, 'M', u'𐑉'), (0x10422, 'M', u'𐑊'), (0x10423, 'M', u'𐑋'), (0x10424, 'M', u'𐑌'), (0x10425, 'M', u'𐑍'), (0x10426, 'M', u'𐑎'), (0x10427, 'M', u'𐑏'), (0x10428, 'V'), (0x1049E, 'X'), (0x104A0, 'V'), (0x104AA, 'X'), (0x104B0, 'M', u'𐓘'), (0x104B1, 'M', u'𐓙'), (0x104B2, 'M', u'𐓚'), (0x104B3, 'M', u'𐓛'), (0x104B4, 'M', u'𐓜'), (0x104B5, 'M', u'𐓝'), (0x104B6, 'M', u'𐓞'), (0x104B7, 'M', u'𐓟'), (0x104B8, 'M', u'𐓠'), (0x104B9, 'M', u'𐓡'), (0x104BA, 'M', u'𐓢'), (0x104BB, 'M', u'𐓣'), (0x104BC, 'M', u'𐓤'), (0x104BD, 'M', u'𐓥'), (0x104BE, 'M', u'𐓦'), ] def _seg_54(): return [ (0x104BF, 'M', u'𐓧'), (0x104C0, 'M', u'𐓨'), (0x104C1, 'M', u'𐓩'), (0x104C2, 'M', u'𐓪'), (0x104C3, 'M', u'𐓫'), (0x104C4, 'M', u'𐓬'), (0x104C5, 'M', u'𐓭'), (0x104C6, 'M', u'𐓮'), (0x104C7, 'M', u'𐓯'), (0x104C8, 'M', u'𐓰'), (0x104C9, 'M', u'𐓱'), (0x104CA, 'M', u'𐓲'), (0x104CB, 'M', u'𐓳'), (0x104CC, 'M', u'𐓴'), (0x104CD, 'M', u'𐓵'), (0x104CE, 'M', u'𐓶'), (0x104CF, 'M', u'𐓷'), (0x104D0, 'M', u'𐓸'), (0x104D1, 'M', u'𐓹'), (0x104D2, 'M', u'𐓺'), (0x104D3, 'M', u'𐓻'), (0x104D4, 'X'), (0x104D8, 'V'), (0x104FC, 'X'), (0x10500, 'V'), (0x10528, 'X'), (0x10530, 'V'), (0x10564, 'X'), (0x1056F, 'V'), (0x10570, 'X'), (0x10600, 'V'), (0x10737, 'X'), (0x10740, 'V'), (0x10756, 'X'), (0x10760, 'V'), (0x10768, 'X'), (0x10800, 'V'), (0x10806, 'X'), (0x10808, 'V'), (0x10809, 'X'), (0x1080A, 'V'), (0x10836, 'X'), (0x10837, 'V'), (0x10839, 'X'), (0x1083C, 'V'), (0x1083D, 'X'), (0x1083F, 'V'), (0x10856, 'X'), (0x10857, 'V'), (0x1089F, 'X'), (0x108A7, 'V'), (0x108B0, 'X'), (0x108E0, 'V'), (0x108F3, 'X'), (0x108F4, 'V'), (0x108F6, 'X'), (0x108FB, 'V'), (0x1091C, 'X'), (0x1091F, 'V'), (0x1093A, 'X'), (0x1093F, 'V'), (0x10940, 'X'), (0x10980, 'V'), (0x109B8, 'X'), (0x109BC, 'V'), (0x109D0, 'X'), (0x109D2, 'V'), (0x10A04, 'X'), (0x10A05, 'V'), (0x10A07, 'X'), (0x10A0C, 'V'), (0x10A14, 'X'), (0x10A15, 'V'), (0x10A18, 'X'), (0x10A19, 'V'), (0x10A36, 'X'), (0x10A38, 'V'), (0x10A3B, 'X'), (0x10A3F, 'V'), (0x10A49, 'X'), (0x10A50, 'V'), (0x10A59, 'X'), (0x10A60, 'V'), (0x10AA0, 'X'), (0x10AC0, 'V'), (0x10AE7, 'X'), (0x10AEB, 'V'), (0x10AF7, 'X'), (0x10B00, 'V'), (0x10B36, 'X'), (0x10B39, 'V'), (0x10B56, 'X'), (0x10B58, 'V'), (0x10B73, 'X'), (0x10B78, 'V'), (0x10B92, 'X'), (0x10B99, 'V'), (0x10B9D, 'X'), (0x10BA9, 'V'), (0x10BB0, 'X'), ] def _seg_55(): return [ (0x10C00, 'V'), (0x10C49, 'X'), (0x10C80, 'M', u'𐳀'), (0x10C81, 'M', u'𐳁'), (0x10C82, 'M', u'𐳂'), (0x10C83, 'M', u'𐳃'), (0x10C84, 'M', u'𐳄'), (0x10C85, 'M', u'𐳅'), (0x10C86, 'M', u'𐳆'), (0x10C87, 'M', u'𐳇'), (0x10C88, 'M', u'𐳈'), (0x10C89, 'M', u'𐳉'), (0x10C8A, 'M', u'𐳊'), (0x10C8B, 'M', u'𐳋'), (0x10C8C, 'M', u'𐳌'), (0x10C8D, 'M', u'𐳍'), (0x10C8E, 'M', u'𐳎'), (0x10C8F, 'M', u'𐳏'), (0x10C90, 'M', u'𐳐'), (0x10C91, 'M', u'𐳑'), (0x10C92, 'M', u'𐳒'), (0x10C93, 'M', u'𐳓'), (0x10C94, 'M', u'𐳔'), (0x10C95, 'M', u'𐳕'), (0x10C96, 'M', u'𐳖'), (0x10C97, 'M', u'𐳗'), (0x10C98, 'M', u'𐳘'), (0x10C99, 'M', u'𐳙'), (0x10C9A, 'M', u'𐳚'), (0x10C9B, 'M', u'𐳛'), (0x10C9C, 'M', u'𐳜'), (0x10C9D, 'M', u'𐳝'), (0x10C9E, 'M', u'𐳞'), (0x10C9F, 'M', u'𐳟'), (0x10CA0, 'M', u'𐳠'), (0x10CA1, 'M', u'𐳡'), (0x10CA2, 'M', u'𐳢'), (0x10CA3, 'M', u'𐳣'), (0x10CA4, 'M', u'𐳤'), (0x10CA5, 'M', u'𐳥'), (0x10CA6, 'M', u'𐳦'), (0x10CA7, 'M', u'𐳧'), (0x10CA8, 'M', u'𐳨'), (0x10CA9, 'M', u'𐳩'), (0x10CAA, 'M', u'𐳪'), (0x10CAB, 'M', u'𐳫'), (0x10CAC, 'M', u'𐳬'), (0x10CAD, 'M', u'𐳭'), (0x10CAE, 'M', u'𐳮'), (0x10CAF, 'M', u'𐳯'), (0x10CB0, 'M', u'𐳰'), (0x10CB1, 'M', u'𐳱'), (0x10CB2, 'M', u'𐳲'), (0x10CB3, 'X'), (0x10CC0, 'V'), (0x10CF3, 'X'), (0x10CFA, 'V'), (0x10D28, 'X'), (0x10D30, 'V'), (0x10D3A, 'X'), (0x10E60, 'V'), (0x10E7F, 'X'), (0x10E80, 'V'), (0x10EAA, 'X'), (0x10EAB, 'V'), (0x10EAE, 'X'), (0x10EB0, 'V'), (0x10EB2, 'X'), (0x10F00, 'V'), (0x10F28, 'X'), (0x10F30, 'V'), (0x10F5A, 'X'), (0x10FB0, 'V'), (0x10FCC, 'X'), (0x10FE0, 'V'), (0x10FF7, 'X'), (0x11000, 'V'), (0x1104E, 'X'), (0x11052, 'V'), (0x11070, 'X'), (0x1107F, 'V'), (0x110BD, 'X'), (0x110BE, 'V'), (0x110C2, 'X'), (0x110D0, 'V'), (0x110E9, 'X'), (0x110F0, 'V'), (0x110FA, 'X'), (0x11100, 'V'), (0x11135, 'X'), (0x11136, 'V'), (0x11148, 'X'), (0x11150, 'V'), (0x11177, 'X'), (0x11180, 'V'), (0x111E0, 'X'), (0x111E1, 'V'), (0x111F5, 'X'), (0x11200, 'V'), (0x11212, 'X'), ] def _seg_56(): return [ (0x11213, 'V'), (0x1123F, 'X'), (0x11280, 'V'), (0x11287, 'X'), (0x11288, 'V'), (0x11289, 'X'), (0x1128A, 'V'), (0x1128E, 'X'), (0x1128F, 'V'), (0x1129E, 'X'), (0x1129F, 'V'), (0x112AA, 'X'), (0x112B0, 'V'), (0x112EB, 'X'), (0x112F0, 'V'), (0x112FA, 'X'), (0x11300, 'V'), (0x11304, 'X'), (0x11305, 'V'), (0x1130D, 'X'), (0x1130F, 'V'), (0x11311, 'X'), (0x11313, 'V'), (0x11329, 'X'), (0x1132A, 'V'), (0x11331, 'X'), (0x11332, 'V'), (0x11334, 'X'), (0x11335, 'V'), (0x1133A, 'X'), (0x1133B, 'V'), (0x11345, 'X'), (0x11347, 'V'), (0x11349, 'X'), (0x1134B, 'V'), (0x1134E, 'X'), (0x11350, 'V'), (0x11351, 'X'), (0x11357, 'V'), (0x11358, 'X'), (0x1135D, 'V'), (0x11364, 'X'), (0x11366, 'V'), (0x1136D, 'X'), (0x11370, 'V'), (0x11375, 'X'), (0x11400, 'V'), (0x1145C, 'X'), (0x1145D, 'V'), (0x11462, 'X'), (0x11480, 'V'), (0x114C8, 'X'), (0x114D0, 'V'), (0x114DA, 'X'), (0x11580, 'V'), (0x115B6, 'X'), (0x115B8, 'V'), (0x115DE, 'X'), (0x11600, 'V'), (0x11645, 'X'), (0x11650, 'V'), (0x1165A, 'X'), (0x11660, 'V'), (0x1166D, 'X'), (0x11680, 'V'), (0x116B9, 'X'), (0x116C0, 'V'), (0x116CA, 'X'), (0x11700, 'V'), (0x1171B, 'X'), (0x1171D, 'V'), (0x1172C, 'X'), (0x11730, 'V'), (0x11740, 'X'), (0x11800, 'V'), (0x1183C, 'X'), (0x118A0, 'M', u'𑣀'), (0x118A1, 'M', u'𑣁'), (0x118A2, 'M', u'𑣂'), (0x118A3, 'M', u'𑣃'), (0x118A4, 'M', u'𑣄'), (0x118A5, 'M', u'𑣅'), (0x118A6, 'M', u'𑣆'), (0x118A7, 'M', u'𑣇'), (0x118A8, 'M', u'𑣈'), (0x118A9, 'M', u'𑣉'), (0x118AA, 'M', u'𑣊'), (0x118AB, 'M', u'𑣋'), (0x118AC, 'M', u'𑣌'), (0x118AD, 'M', u'𑣍'), (0x118AE, 'M', u'𑣎'), (0x118AF, 'M', u'𑣏'), (0x118B0, 'M', u'𑣐'), (0x118B1, 'M', u'𑣑'), (0x118B2, 'M', u'𑣒'), (0x118B3, 'M', u'𑣓'), (0x118B4, 'M', u'𑣔'), (0x118B5, 'M', u'𑣕'), (0x118B6, 'M', u'𑣖'), (0x118B7, 'M', u'𑣗'), ] def _seg_57(): return [ (0x118B8, 'M', u'𑣘'), (0x118B9, 'M', u'𑣙'), (0x118BA, 'M', u'𑣚'), (0x118BB, 'M', u'𑣛'), (0x118BC, 'M', u'𑣜'), (0x118BD, 'M', u'𑣝'), (0x118BE, 'M', u'𑣞'), (0x118BF, 'M', u'𑣟'), (0x118C0, 'V'), (0x118F3, 'X'), (0x118FF, 'V'), (0x11907, 'X'), (0x11909, 'V'), (0x1190A, 'X'), (0x1190C, 'V'), (0x11914, 'X'), (0x11915, 'V'), (0x11917, 'X'), (0x11918, 'V'), (0x11936, 'X'), (0x11937, 'V'), (0x11939, 'X'), (0x1193B, 'V'), (0x11947, 'X'), (0x11950, 'V'), (0x1195A, 'X'), (0x119A0, 'V'), (0x119A8, 'X'), (0x119AA, 'V'), (0x119D8, 'X'), (0x119DA, 'V'), (0x119E5, 'X'), (0x11A00, 'V'), (0x11A48, 'X'), (0x11A50, 'V'), (0x11AA3, 'X'), (0x11AC0, 'V'), (0x11AF9, 'X'), (0x11C00, 'V'), (0x11C09, 'X'), (0x11C0A, 'V'), (0x11C37, 'X'), (0x11C38, 'V'), (0x11C46, 'X'), (0x11C50, 'V'), (0x11C6D, 'X'), (0x11C70, 'V'), (0x11C90, 'X'), (0x11C92, 'V'), (0x11CA8, 'X'), (0x11CA9, 'V'), (0x11CB7, 'X'), (0x11D00, 'V'), (0x11D07, 'X'), (0x11D08, 'V'), (0x11D0A, 'X'), (0x11D0B, 'V'), (0x11D37, 'X'), (0x11D3A, 'V'), (0x11D3B, 'X'), (0x11D3C, 'V'), (0x11D3E, 'X'), (0x11D3F, 'V'), (0x11D48, 'X'), (0x11D50, 'V'), (0x11D5A, 'X'), (0x11D60, 'V'), (0x11D66, 'X'), (0x11D67, 'V'), (0x11D69, 'X'), (0x11D6A, 'V'), (0x11D8F, 'X'), (0x11D90, 'V'), (0x11D92, 'X'), (0x11D93, 'V'), (0x11D99, 'X'), (0x11DA0, 'V'), (0x11DAA, 'X'), (0x11EE0, 'V'), (0x11EF9, 'X'), (0x11FB0, 'V'), (0x11FB1, 'X'), (0x11FC0, 'V'), (0x11FF2, 'X'), (0x11FFF, 'V'), (0x1239A, 'X'), (0x12400, 'V'), (0x1246F, 'X'), (0x12470, 'V'), (0x12475, 'X'), (0x12480, 'V'), (0x12544, 'X'), (0x13000, 'V'), (0x1342F, 'X'), (0x14400, 'V'), (0x14647, 'X'), (0x16800, 'V'), (0x16A39, 'X'), (0x16A40, 'V'), (0x16A5F, 'X'), ] def _seg_58(): return [ (0x16A60, 'V'), (0x16A6A, 'X'), (0x16A6E, 'V'), (0x16A70, 'X'), (0x16AD0, 'V'), (0x16AEE, 'X'), (0x16AF0, 'V'), (0x16AF6, 'X'), (0x16B00, 'V'), (0x16B46, 'X'), (0x16B50, 'V'), (0x16B5A, 'X'), (0x16B5B, 'V'), (0x16B62, 'X'), (0x16B63, 'V'), (0x16B78, 'X'), (0x16B7D, 'V'), (0x16B90, 'X'), (0x16E40, 'M', u'𖹠'), (0x16E41, 'M', u'𖹡'), (0x16E42, 'M', u'𖹢'), (0x16E43, 'M', u'𖹣'), (0x16E44, 'M', u'𖹤'), (0x16E45, 'M', u'𖹥'), (0x16E46, 'M', u'𖹦'), (0x16E47, 'M', u'𖹧'), (0x16E48, 'M', u'𖹨'), (0x16E49, 'M', u'𖹩'), (0x16E4A, 'M', u'𖹪'), (0x16E4B, 'M', u'𖹫'), (0x16E4C, 'M', u'𖹬'), (0x16E4D, 'M', u'𖹭'), (0x16E4E, 'M', u'𖹮'), (0x16E4F, 'M', u'𖹯'), (0x16E50, 'M', u'𖹰'), (0x16E51, 'M', u'𖹱'), (0x16E52, 'M', u'𖹲'), (0x16E53, 'M', u'𖹳'), (0x16E54, 'M', u'𖹴'), (0x16E55, 'M', u'𖹵'), (0x16E56, 'M', u'𖹶'), (0x16E57, 'M', u'𖹷'), (0x16E58, 'M', u'𖹸'), (0x16E59, 'M', u'𖹹'), (0x16E5A, 'M', u'𖹺'), (0x16E5B, 'M', u'𖹻'), (0x16E5C, 'M', u'𖹼'), (0x16E5D, 'M', u'𖹽'), (0x16E5E, 'M', u'𖹾'), (0x16E5F, 'M', u'𖹿'), (0x16E60, 'V'), (0x16E9B, 'X'), (0x16F00, 'V'), (0x16F4B, 'X'), (0x16F4F, 'V'), (0x16F88, 'X'), (0x16F8F, 'V'), (0x16FA0, 'X'), (0x16FE0, 'V'), (0x16FE5, 'X'), (0x16FF0, 'V'), (0x16FF2, 'X'), (0x17000, 'V'), (0x187F8, 'X'), (0x18800, 'V'), (0x18CD6, 'X'), (0x18D00, 'V'), (0x18D09, 'X'), (0x1B000, 'V'), (0x1B11F, 'X'), (0x1B150, 'V'), (0x1B153, 'X'), (0x1B164, 'V'), (0x1B168, 'X'), (0x1B170, 'V'), (0x1B2FC, 'X'), (0x1BC00, 'V'), (0x1BC6B, 'X'), (0x1BC70, 'V'), (0x1BC7D, 'X'), (0x1BC80, 'V'), (0x1BC89, 'X'), (0x1BC90, 'V'), (0x1BC9A, 'X'), (0x1BC9C, 'V'), (0x1BCA0, 'I'), (0x1BCA4, 'X'), (0x1D000, 'V'), (0x1D0F6, 'X'), (0x1D100, 'V'), (0x1D127, 'X'), (0x1D129, 'V'), (0x1D15E, 'M', u'𝅗𝅥'), (0x1D15F, 'M', u'𝅘𝅥'), (0x1D160, 'M', u'𝅘𝅥𝅮'), (0x1D161, 'M', u'𝅘𝅥𝅯'), (0x1D162, 'M', u'𝅘𝅥𝅰'), (0x1D163, 'M', u'𝅘𝅥𝅱'), (0x1D164, 'M', u'𝅘𝅥𝅲'), (0x1D165, 'V'), ] def _seg_59(): return [ (0x1D173, 'X'), (0x1D17B, 'V'), (0x1D1BB, 'M', u'𝆹𝅥'), (0x1D1BC, 'M', u'𝆺𝅥'), (0x1D1BD, 'M', u'𝆹𝅥𝅮'), (0x1D1BE, 'M', u'𝆺𝅥𝅮'), (0x1D1BF, 'M', u'𝆹𝅥𝅯'), (0x1D1C0, 'M', u'𝆺𝅥𝅯'), (0x1D1C1, 'V'), (0x1D1E9, 'X'), (0x1D200, 'V'), (0x1D246, 'X'), (0x1D2E0, 'V'), (0x1D2F4, 'X'), (0x1D300, 'V'), (0x1D357, 'X'), (0x1D360, 'V'), (0x1D379, 'X'), (0x1D400, 'M', u'a'), (0x1D401, 'M', u'b'), (0x1D402, 'M', u'c'), (0x1D403, 'M', u'd'), (0x1D404, 'M', u'e'), (0x1D405, 'M', u'f'), (0x1D406, 'M', u'g'), (0x1D407, 'M', u'h'), (0x1D408, 'M', u'i'), (0x1D409, 'M', u'j'), (0x1D40A, 'M', u'k'), (0x1D40B, 'M', u'l'), (0x1D40C, 'M', u'm'), (0x1D40D, 'M', u'n'), (0x1D40E, 'M', u'o'), (0x1D40F, 'M', u'p'), (0x1D410, 'M', u'q'), (0x1D411, 'M', u'r'), (0x1D412, 'M', u's'), (0x1D413, 'M', u't'), (0x1D414, 'M', u'u'), (0x1D415, 'M', u'v'), (0x1D416, 'M', u'w'), (0x1D417, 'M', u'x'), (0x1D418, 'M', u'y'), (0x1D419, 'M', u'z'), (0x1D41A, 'M', u'a'), (0x1D41B, 'M', u'b'), (0x1D41C, 'M', u'c'), (0x1D41D, 'M', u'd'), (0x1D41E, 'M', u'e'), (0x1D41F, 'M', u'f'), (0x1D420, 'M', u'g'), (0x1D421, 'M', u'h'), (0x1D422, 'M', u'i'), (0x1D423, 'M', u'j'), (0x1D424, 'M', u'k'), (0x1D425, 'M', u'l'), (0x1D426, 'M', u'm'), (0x1D427, 'M', u'n'), (0x1D428, 'M', u'o'), (0x1D429, 'M', u'p'), (0x1D42A, 'M', u'q'), (0x1D42B, 'M', u'r'), (0x1D42C, 'M', u's'), (0x1D42D, 'M', u't'), (0x1D42E, 'M', u'u'), (0x1D42F, 'M', u'v'), (0x1D430, 'M', u'w'), (0x1D431, 'M', u'x'), (0x1D432, 'M', u'y'), (0x1D433, 'M', u'z'), (0x1D434, 'M', u'a'), (0x1D435, 'M', u'b'), (0x1D436, 'M', u'c'), (0x1D437, 'M', u'd'), (0x1D438, 'M', u'e'), (0x1D439, 'M', u'f'), (0x1D43A, 'M', u'g'), (0x1D43B, 'M', u'h'), (0x1D43C, 'M', u'i'), (0x1D43D, 'M', u'j'), (0x1D43E, 'M', u'k'), (0x1D43F, 'M', u'l'), (0x1D440, 'M', u'm'), (0x1D441, 'M', u'n'), (0x1D442, 'M', u'o'), (0x1D443, 'M', u'p'), (0x1D444, 'M', u'q'), (0x1D445, 'M', u'r'), (0x1D446, 'M', u's'), (0x1D447, 'M', u't'), (0x1D448, 'M', u'u'), (0x1D449, 'M', u'v'), (0x1D44A, 'M', u'w'), (0x1D44B, 'M', u'x'), (0x1D44C, 'M', u'y'), (0x1D44D, 'M', u'z'), (0x1D44E, 'M', u'a'), (0x1D44F, 'M', u'b'), (0x1D450, 'M', u'c'), (0x1D451, 'M', u'd'), ] def _seg_60(): return [ (0x1D452, 'M', u'e'), (0x1D453, 'M', u'f'), (0x1D454, 'M', u'g'), (0x1D455, 'X'), (0x1D456, 'M', u'i'), (0x1D457, 'M', u'j'), (0x1D458, 'M', u'k'), (0x1D459, 'M', u'l'), (0x1D45A, 'M', u'm'), (0x1D45B, 'M', u'n'), (0x1D45C, 'M', u'o'), (0x1D45D, 'M', u'p'), (0x1D45E, 'M', u'q'), (0x1D45F, 'M', u'r'), (0x1D460, 'M', u's'), (0x1D461, 'M', u't'), (0x1D462, 'M', u'u'), (0x1D463, 'M', u'v'), (0x1D464, 'M', u'w'), (0x1D465, 'M', u'x'), (0x1D466, 'M', u'y'), (0x1D467, 'M', u'z'), (0x1D468, 'M', u'a'), (0x1D469, 'M', u'b'), (0x1D46A, 'M', u'c'), (0x1D46B, 'M', u'd'), (0x1D46C, 'M', u'e'), (0x1D46D, 'M', u'f'), (0x1D46E, 'M', u'g'), (0x1D46F, 'M', u'h'), (0x1D470, 'M', u'i'), (0x1D471, 'M', u'j'), (0x1D472, 'M', u'k'), (0x1D473, 'M', u'l'), (0x1D474, 'M', u'm'), (0x1D475, 'M', u'n'), (0x1D476, 'M', u'o'), (0x1D477, 'M', u'p'), (0x1D478, 'M', u'q'), (0x1D479, 'M', u'r'), (0x1D47A, 'M', u's'), (0x1D47B, 'M', u't'), (0x1D47C, 'M', u'u'), (0x1D47D, 'M', u'v'), (0x1D47E, 'M', u'w'), (0x1D47F, 'M', u'x'), (0x1D480, 'M', u'y'), (0x1D481, 'M', u'z'), (0x1D482, 'M', u'a'), (0x1D483, 'M', u'b'), (0x1D484, 'M', u'c'), (0x1D485, 'M', u'd'), (0x1D486, 'M', u'e'), (0x1D487, 'M', u'f'), (0x1D488, 'M', u'g'), (0x1D489, 'M', u'h'), (0x1D48A, 'M', u'i'), (0x1D48B, 'M', u'j'), (0x1D48C, 'M', u'k'), (0x1D48D, 'M', u'l'), (0x1D48E, 'M', u'm'), (0x1D48F, 'M', u'n'), (0x1D490, 'M', u'o'), (0x1D491, 'M', u'p'), (0x1D492, 'M', u'q'), (0x1D493, 'M', u'r'), (0x1D494, 'M', u's'), (0x1D495, 'M', u't'), (0x1D496, 'M', u'u'), (0x1D497, 'M', u'v'), (0x1D498, 'M', u'w'), (0x1D499, 'M', u'x'), (0x1D49A, 'M', u'y'), (0x1D49B, 'M', u'z'), (0x1D49C, 'M', u'a'), (0x1D49D, 'X'), (0x1D49E, 'M', u'c'), (0x1D49F, 'M', u'd'), (0x1D4A0, 'X'), (0x1D4A2, 'M', u'g'), (0x1D4A3, 'X'), (0x1D4A5, 'M', u'j'), (0x1D4A6, 'M', u'k'), (0x1D4A7, 'X'), (0x1D4A9, 'M', u'n'), (0x1D4AA, 'M', u'o'), (0x1D4AB, 'M', u'p'), (0x1D4AC, 'M', u'q'), (0x1D4AD, 'X'), (0x1D4AE, 'M', u's'), (0x1D4AF, 'M', u't'), (0x1D4B0, 'M', u'u'), (0x1D4B1, 'M', u'v'), (0x1D4B2, 'M', u'w'), (0x1D4B3, 'M', u'x'), (0x1D4B4, 'M', u'y'), (0x1D4B5, 'M', u'z'), (0x1D4B6, 'M', u'a'), (0x1D4B7, 'M', u'b'), (0x1D4B8, 'M', u'c'), ] def _seg_61(): return [ (0x1D4B9, 'M', u'd'), (0x1D4BA, 'X'), (0x1D4BB, 'M', u'f'), (0x1D4BC, 'X'), (0x1D4BD, 'M', u'h'), (0x1D4BE, 'M', u'i'), (0x1D4BF, 'M', u'j'), (0x1D4C0, 'M', u'k'), (0x1D4C1, 'M', u'l'), (0x1D4C2, 'M', u'm'), (0x1D4C3, 'M', u'n'), (0x1D4C4, 'X'), (0x1D4C5, 'M', u'p'), (0x1D4C6, 'M', u'q'), (0x1D4C7, 'M', u'r'), (0x1D4C8, 'M', u's'), (0x1D4C9, 'M', u't'), (0x1D4CA, 'M', u'u'), (0x1D4CB, 'M', u'v'), (0x1D4CC, 'M', u'w'), (0x1D4CD, 'M', u'x'), (0x1D4CE, 'M', u'y'), (0x1D4CF, 'M', u'z'), (0x1D4D0, 'M', u'a'), (0x1D4D1, 'M', u'b'), (0x1D4D2, 'M', u'c'), (0x1D4D3, 'M', u'd'), (0x1D4D4, 'M', u'e'), (0x1D4D5, 'M', u'f'), (0x1D4D6, 'M', u'g'), (0x1D4D7, 'M', u'h'), (0x1D4D8, 'M', u'i'), (0x1D4D9, 'M', u'j'), (0x1D4DA, 'M', u'k'), (0x1D4DB, 'M', u'l'), (0x1D4DC, 'M', u'm'), (0x1D4DD, 'M', u'n'), (0x1D4DE, 'M', u'o'), (0x1D4DF, 'M', u'p'), (0x1D4E0, 'M', u'q'), (0x1D4E1, 'M', u'r'), (0x1D4E2, 'M', u's'), (0x1D4E3, 'M', u't'), (0x1D4E4, 'M', u'u'), (0x1D4E5, 'M', u'v'), (0x1D4E6, 'M', u'w'), (0x1D4E7, 'M', u'x'), (0x1D4E8, 'M', u'y'), (0x1D4E9, 'M', u'z'), (0x1D4EA, 'M', u'a'), (0x1D4EB, 'M', u'b'), (0x1D4EC, 'M', u'c'), (0x1D4ED, 'M', u'd'), (0x1D4EE, 'M', u'e'), (0x1D4EF, 'M', u'f'), (0x1D4F0, 'M', u'g'), (0x1D4F1, 'M', u'h'), (0x1D4F2, 'M', u'i'), (0x1D4F3, 'M', u'j'), (0x1D4F4, 'M', u'k'), (0x1D4F5, 'M', u'l'), (0x1D4F6, 'M', u'm'), (0x1D4F7, 'M', u'n'), (0x1D4F8, 'M', u'o'), (0x1D4F9, 'M', u'p'), (0x1D4FA, 'M', u'q'), (0x1D4FB, 'M', u'r'), (0x1D4FC, 'M', u's'), (0x1D4FD, 'M', u't'), (0x1D4FE, 'M', u'u'), (0x1D4FF, 'M', u'v'), (0x1D500, 'M', u'w'), (0x1D501, 'M', u'x'), (0x1D502, 'M', u'y'), (0x1D503, 'M', u'z'), (0x1D504, 'M', u'a'), (0x1D505, 'M', u'b'), (0x1D506, 'X'), (0x1D507, 'M', u'd'), (0x1D508, 'M', u'e'), (0x1D509, 'M', u'f'), (0x1D50A, 'M', u'g'), (0x1D50B, 'X'), (0x1D50D, 'M', u'j'), (0x1D50E, 'M', u'k'), (0x1D50F, 'M', u'l'), (0x1D510, 'M', u'm'), (0x1D511, 'M', u'n'), (0x1D512, 'M', u'o'), (0x1D513, 'M', u'p'), (0x1D514, 'M', u'q'), (0x1D515, 'X'), (0x1D516, 'M', u's'), (0x1D517, 'M', u't'), (0x1D518, 'M', u'u'), (0x1D519, 'M', u'v'), (0x1D51A, 'M', u'w'), (0x1D51B, 'M', u'x'), (0x1D51C, 'M', u'y'), (0x1D51D, 'X'), ] def _seg_62(): return [ (0x1D51E, 'M', u'a'), (0x1D51F, 'M', u'b'), (0x1D520, 'M', u'c'), (0x1D521, 'M', u'd'), (0x1D522, 'M', u'e'), (0x1D523, 'M', u'f'), (0x1D524, 'M', u'g'), (0x1D525, 'M', u'h'), (0x1D526, 'M', u'i'), (0x1D527, 'M', u'j'), (0x1D528, 'M', u'k'), (0x1D529, 'M', u'l'), (0x1D52A, 'M', u'm'), (0x1D52B, 'M', u'n'), (0x1D52C, 'M', u'o'), (0x1D52D, 'M', u'p'), (0x1D52E, 'M', u'q'), (0x1D52F, 'M', u'r'), (0x1D530, 'M', u's'), (0x1D531, 'M', u't'), (0x1D532, 'M', u'u'), (0x1D533, 'M', u'v'), (0x1D534, 'M', u'w'), (0x1D535, 'M', u'x'), (0x1D536, 'M', u'y'), (0x1D537, 'M', u'z'), (0x1D538, 'M', u'a'), (0x1D539, 'M', u'b'), (0x1D53A, 'X'), (0x1D53B, 'M', u'd'), (0x1D53C, 'M', u'e'), (0x1D53D, 'M', u'f'), (0x1D53E, 'M', u'g'), (0x1D53F, 'X'), (0x1D540, 'M', u'i'), (0x1D541, 'M', u'j'), (0x1D542, 'M', u'k'), (0x1D543, 'M', u'l'), (0x1D544, 'M', u'm'), (0x1D545, 'X'), (0x1D546, 'M', u'o'), (0x1D547, 'X'), (0x1D54A, 'M', u's'), (0x1D54B, 'M', u't'), (0x1D54C, 'M', u'u'), (0x1D54D, 'M', u'v'), (0x1D54E, 'M', u'w'), (0x1D54F, 'M', u'x'), (0x1D550, 'M', u'y'), (0x1D551, 'X'), (0x1D552, 'M', u'a'), (0x1D553, 'M', u'b'), (0x1D554, 'M', u'c'), (0x1D555, 'M', u'd'), (0x1D556, 'M', u'e'), (0x1D557, 'M', u'f'), (0x1D558, 'M', u'g'), (0x1D559, 'M', u'h'), (0x1D55A, 'M', u'i'), (0x1D55B, 'M', u'j'), (0x1D55C, 'M', u'k'), (0x1D55D, 'M', u'l'), (0x1D55E, 'M', u'm'), (0x1D55F, 'M', u'n'), (0x1D560, 'M', u'o'), (0x1D561, 'M', u'p'), (0x1D562, 'M', u'q'), (0x1D563, 'M', u'r'), (0x1D564, 'M', u's'), (0x1D565, 'M', u't'), (0x1D566, 'M', u'u'), (0x1D567, 'M', u'v'), (0x1D568, 'M', u'w'), (0x1D569, 'M', u'x'), (0x1D56A, 'M', u'y'), (0x1D56B, 'M', u'z'), (0x1D56C, 'M', u'a'), (0x1D56D, 'M', u'b'), (0x1D56E, 'M', u'c'), (0x1D56F, 'M', u'd'), (0x1D570, 'M', u'e'), (0x1D571, 'M', u'f'), (0x1D572, 'M', u'g'), (0x1D573, 'M', u'h'), (0x1D574, 'M', u'i'), (0x1D575, 'M', u'j'), (0x1D576, 'M', u'k'), (0x1D577, 'M', u'l'), (0x1D578, 'M', u'm'), (0x1D579, 'M', u'n'), (0x1D57A, 'M', u'o'), (0x1D57B, 'M', u'p'), (0x1D57C, 'M', u'q'), (0x1D57D, 'M', u'r'), (0x1D57E, 'M', u's'), (0x1D57F, 'M', u't'), (0x1D580, 'M', u'u'), (0x1D581, 'M', u'v'), (0x1D582, 'M', u'w'), (0x1D583, 'M', u'x'), ] def _seg_63(): return [ (0x1D584, 'M', u'y'), (0x1D585, 'M', u'z'), (0x1D586, 'M', u'a'), (0x1D587, 'M', u'b'), (0x1D588, 'M', u'c'), (0x1D589, 'M', u'd'), (0x1D58A, 'M', u'e'), (0x1D58B, 'M', u'f'), (0x1D58C, 'M', u'g'), (0x1D58D, 'M', u'h'), (0x1D58E, 'M', u'i'), (0x1D58F, 'M', u'j'), (0x1D590, 'M', u'k'), (0x1D591, 'M', u'l'), (0x1D592, 'M', u'm'), (0x1D593, 'M', u'n'), (0x1D594, 'M', u'o'), (0x1D595, 'M', u'p'), (0x1D596, 'M', u'q'), (0x1D597, 'M', u'r'), (0x1D598, 'M', u's'), (0x1D599, 'M', u't'), (0x1D59A, 'M', u'u'), (0x1D59B, 'M', u'v'), (0x1D59C, 'M', u'w'), (0x1D59D, 'M', u'x'), (0x1D59E, 'M', u'y'), (0x1D59F, 'M', u'z'), (0x1D5A0, 'M', u'a'), (0x1D5A1, 'M', u'b'), (0x1D5A2, 'M', u'c'), (0x1D5A3, 'M', u'd'), (0x1D5A4, 'M', u'e'), (0x1D5A5, 'M', u'f'), (0x1D5A6, 'M', u'g'), (0x1D5A7, 'M', u'h'), (0x1D5A8, 'M', u'i'), (0x1D5A9, 'M', u'j'), (0x1D5AA, 'M', u'k'), (0x1D5AB, 'M', u'l'), (0x1D5AC, 'M', u'm'), (0x1D5AD, 'M', u'n'), (0x1D5AE, 'M', u'o'), (0x1D5AF, 'M', u'p'), (0x1D5B0, 'M', u'q'), (0x1D5B1, 'M', u'r'), (0x1D5B2, 'M', u's'), (0x1D5B3, 'M', u't'), (0x1D5B4, 'M', u'u'), (0x1D5B5, 'M', u'v'), (0x1D5B6, 'M', u'w'), (0x1D5B7, 'M', u'x'), (0x1D5B8, 'M', u'y'), (0x1D5B9, 'M', u'z'), (0x1D5BA, 'M', u'a'), (0x1D5BB, 'M', u'b'), (0x1D5BC, 'M', u'c'), (0x1D5BD, 'M', u'd'), (0x1D5BE, 'M', u'e'), (0x1D5BF, 'M', u'f'), (0x1D5C0, 'M', u'g'), (0x1D5C1, 'M', u'h'), (0x1D5C2, 'M', u'i'), (0x1D5C3, 'M', u'j'), (0x1D5C4, 'M', u'k'), (0x1D5C5, 'M', u'l'), (0x1D5C6, 'M', u'm'), (0x1D5C7, 'M', u'n'), (0x1D5C8, 'M', u'o'), (0x1D5C9, 'M', u'p'), (0x1D5CA, 'M', u'q'), (0x1D5CB, 'M', u'r'), (0x1D5CC, 'M', u's'), (0x1D5CD, 'M', u't'), (0x1D5CE, 'M', u'u'), (0x1D5CF, 'M', u'v'), (0x1D5D0, 'M', u'w'), (0x1D5D1, 'M', u'x'), (0x1D5D2, 'M', u'y'), (0x1D5D3, 'M', u'z'), (0x1D5D4, 'M', u'a'), (0x1D5D5, 'M', u'b'), (0x1D5D6, 'M', u'c'), (0x1D5D7, 'M', u'd'), (0x1D5D8, 'M', u'e'), (0x1D5D9, 'M', u'f'), (0x1D5DA, 'M', u'g'), (0x1D5DB, 'M', u'h'), (0x1D5DC, 'M', u'i'), (0x1D5DD, 'M', u'j'), (0x1D5DE, 'M', u'k'), (0x1D5DF, 'M', u'l'), (0x1D5E0, 'M', u'm'), (0x1D5E1, 'M', u'n'), (0x1D5E2, 'M', u'o'), (0x1D5E3, 'M', u'p'), (0x1D5E4, 'M', u'q'), (0x1D5E5, 'M', u'r'), (0x1D5E6, 'M', u's'), (0x1D5E7, 'M', u't'), ] def _seg_64(): return [ (0x1D5E8, 'M', u'u'), (0x1D5E9, 'M', u'v'), (0x1D5EA, 'M', u'w'), (0x1D5EB, 'M', u'x'), (0x1D5EC, 'M', u'y'), (0x1D5ED, 'M', u'z'), (0x1D5EE, 'M', u'a'), (0x1D5EF, 'M', u'b'), (0x1D5F0, 'M', u'c'), (0x1D5F1, 'M', u'd'), (0x1D5F2, 'M', u'e'), (0x1D5F3, 'M', u'f'), (0x1D5F4, 'M', u'g'), (0x1D5F5, 'M', u'h'), (0x1D5F6, 'M', u'i'), (0x1D5F7, 'M', u'j'), (0x1D5F8, 'M', u'k'), (0x1D5F9, 'M', u'l'), (0x1D5FA, 'M', u'm'), (0x1D5FB, 'M', u'n'), (0x1D5FC, 'M', u'o'), (0x1D5FD, 'M', u'p'), (0x1D5FE, 'M', u'q'), (0x1D5FF, 'M', u'r'), (0x1D600, 'M', u's'), (0x1D601, 'M', u't'), (0x1D602, 'M', u'u'), (0x1D603, 'M', u'v'), (0x1D604, 'M', u'w'), (0x1D605, 'M', u'x'), (0x1D606, 'M', u'y'), (0x1D607, 'M', u'z'), (0x1D608, 'M', u'a'), (0x1D609, 'M', u'b'), (0x1D60A, 'M', u'c'), (0x1D60B, 'M', u'd'), (0x1D60C, 'M', u'e'), (0x1D60D, 'M', u'f'), (0x1D60E, 'M', u'g'), (0x1D60F, 'M', u'h'), (0x1D610, 'M', u'i'), (0x1D611, 'M', u'j'), (0x1D612, 'M', u'k'), (0x1D613, 'M', u'l'), (0x1D614, 'M', u'm'), (0x1D615, 'M', u'n'), (0x1D616, 'M', u'o'), (0x1D617, 'M', u'p'), (0x1D618, 'M', u'q'), (0x1D619, 'M', u'r'), (0x1D61A, 'M', u's'), (0x1D61B, 'M', u't'), (0x1D61C, 'M', u'u'), (0x1D61D, 'M', u'v'), (0x1D61E, 'M', u'w'), (0x1D61F, 'M', u'x'), (0x1D620, 'M', u'y'), (0x1D621, 'M', u'z'), (0x1D622, 'M', u'a'), (0x1D623, 'M', u'b'), (0x1D624, 'M', u'c'), (0x1D625, 'M', u'd'), (0x1D626, 'M', u'e'), (0x1D627, 'M', u'f'), (0x1D628, 'M', u'g'), (0x1D629, 'M', u'h'), (0x1D62A, 'M', u'i'), (0x1D62B, 'M', u'j'), (0x1D62C, 'M', u'k'), (0x1D62D, 'M', u'l'), (0x1D62E, 'M', u'm'), (0x1D62F, 'M', u'n'), (0x1D630, 'M', u'o'), (0x1D631, 'M', u'p'), (0x1D632, 'M', u'q'), (0x1D633, 'M', u'r'), (0x1D634, 'M', u's'), (0x1D635, 'M', u't'), (0x1D636, 'M', u'u'), (0x1D637, 'M', u'v'), (0x1D638, 'M', u'w'), (0x1D639, 'M', u'x'), (0x1D63A, 'M', u'y'), (0x1D63B, 'M', u'z'), (0x1D63C, 'M', u'a'), (0x1D63D, 'M', u'b'), (0x1D63E, 'M', u'c'), (0x1D63F, 'M', u'd'), (0x1D640, 'M', u'e'), (0x1D641, 'M', u'f'), (0x1D642, 'M', u'g'), (0x1D643, 'M', u'h'), (0x1D644, 'M', u'i'), (0x1D645, 'M', u'j'), (0x1D646, 'M', u'k'), (0x1D647, 'M', u'l'), (0x1D648, 'M', u'm'), (0x1D649, 'M', u'n'), (0x1D64A, 'M', u'o'), (0x1D64B, 'M', u'p'), ] def _seg_65(): return [ (0x1D64C, 'M', u'q'), (0x1D64D, 'M', u'r'), (0x1D64E, 'M', u's'), (0x1D64F, 'M', u't'), (0x1D650, 'M', u'u'), (0x1D651, 'M', u'v'), (0x1D652, 'M', u'w'), (0x1D653, 'M', u'x'), (0x1D654, 'M', u'y'), (0x1D655, 'M', u'z'), (0x1D656, 'M', u'a'), (0x1D657, 'M', u'b'), (0x1D658, 'M', u'c'), (0x1D659, 'M', u'd'), (0x1D65A, 'M', u'e'), (0x1D65B, 'M', u'f'), (0x1D65C, 'M', u'g'), (0x1D65D, 'M', u'h'), (0x1D65E, 'M', u'i'), (0x1D65F, 'M', u'j'), (0x1D660, 'M', u'k'), (0x1D661, 'M', u'l'), (0x1D662, 'M', u'm'), (0x1D663, 'M', u'n'), (0x1D664, 'M', u'o'), (0x1D665, 'M', u'p'), (0x1D666, 'M', u'q'), (0x1D667, 'M', u'r'), (0x1D668, 'M', u's'), (0x1D669, 'M', u't'), (0x1D66A, 'M', u'u'), (0x1D66B, 'M', u'v'), (0x1D66C, 'M', u'w'), (0x1D66D, 'M', u'x'), (0x1D66E, 'M', u'y'), (0x1D66F, 'M', u'z'), (0x1D670, 'M', u'a'), (0x1D671, 'M', u'b'), (0x1D672, 'M', u'c'), (0x1D673, 'M', u'd'), (0x1D674, 'M', u'e'), (0x1D675, 'M', u'f'), (0x1D676, 'M', u'g'), (0x1D677, 'M', u'h'), (0x1D678, 'M', u'i'), (0x1D679, 'M', u'j'), (0x1D67A, 'M', u'k'), (0x1D67B, 'M', u'l'), (0x1D67C, 'M', u'm'), (0x1D67D, 'M', u'n'), (0x1D67E, 'M', u'o'), (0x1D67F, 'M', u'p'), (0x1D680, 'M', u'q'), (0x1D681, 'M', u'r'), (0x1D682, 'M', u's'), (0x1D683, 'M', u't'), (0x1D684, 'M', u'u'), (0x1D685, 'M', u'v'), (0x1D686, 'M', u'w'), (0x1D687, 'M', u'x'), (0x1D688, 'M', u'y'), (0x1D689, 'M', u'z'), (0x1D68A, 'M', u'a'), (0x1D68B, 'M', u'b'), (0x1D68C, 'M', u'c'), (0x1D68D, 'M', u'd'), (0x1D68E, 'M', u'e'), (0x1D68F, 'M', u'f'), (0x1D690, 'M', u'g'), (0x1D691, 'M', u'h'), (0x1D692, 'M', u'i'), (0x1D693, 'M', u'j'), (0x1D694, 'M', u'k'), (0x1D695, 'M', u'l'), (0x1D696, 'M', u'm'), (0x1D697, 'M', u'n'), (0x1D698, 'M', u'o'), (0x1D699, 'M', u'p'), (0x1D69A, 'M', u'q'), (0x1D69B, 'M', u'r'), (0x1D69C, 'M', u's'), (0x1D69D, 'M', u't'), (0x1D69E, 'M', u'u'), (0x1D69F, 'M', u'v'), (0x1D6A0, 'M', u'w'), (0x1D6A1, 'M', u'x'), (0x1D6A2, 'M', u'y'), (0x1D6A3, 'M', u'z'), (0x1D6A4, 'M', u'ı'), (0x1D6A5, 'M', u'ȷ'), (0x1D6A6, 'X'), (0x1D6A8, 'M', u'α'), (0x1D6A9, 'M', u'β'), (0x1D6AA, 'M', u'γ'), (0x1D6AB, 'M', u'δ'), (0x1D6AC, 'M', u'ε'), (0x1D6AD, 'M', u'ζ'), (0x1D6AE, 'M', u'η'), (0x1D6AF, 'M', u'θ'), (0x1D6B0, 'M', u'ι'), ] def _seg_66(): return [ (0x1D6B1, 'M', u'κ'), (0x1D6B2, 'M', u'λ'), (0x1D6B3, 'M', u'μ'), (0x1D6B4, 'M', u'ν'), (0x1D6B5, 'M', u'ξ'), (0x1D6B6, 'M', u'ο'), (0x1D6B7, 'M', u'π'), (0x1D6B8, 'M', u'ρ'), (0x1D6B9, 'M', u'θ'), (0x1D6BA, 'M', u'σ'), (0x1D6BB, 'M', u'τ'), (0x1D6BC, 'M', u'υ'), (0x1D6BD, 'M', u'φ'), (0x1D6BE, 'M', u'χ'), (0x1D6BF, 'M', u'ψ'), (0x1D6C0, 'M', u'ω'), (0x1D6C1, 'M', u'∇'), (0x1D6C2, 'M', u'α'), (0x1D6C3, 'M', u'β'), (0x1D6C4, 'M', u'γ'), (0x1D6C5, 'M', u'δ'), (0x1D6C6, 'M', u'ε'), (0x1D6C7, 'M', u'ζ'), (0x1D6C8, 'M', u'η'), (0x1D6C9, 'M', u'θ'), (0x1D6CA, 'M', u'ι'), (0x1D6CB, 'M', u'κ'), (0x1D6CC, 'M', u'λ'), (0x1D6CD, 'M', u'μ'), (0x1D6CE, 'M', u'ν'), (0x1D6CF, 'M', u'ξ'), (0x1D6D0, 'M', u'ο'), (0x1D6D1, 'M', u'π'), (0x1D6D2, 'M', u'ρ'), (0x1D6D3, 'M', u'σ'), (0x1D6D5, 'M', u'τ'), (0x1D6D6, 'M', u'υ'), (0x1D6D7, 'M', u'φ'), (0x1D6D8, 'M', u'χ'), (0x1D6D9, 'M', u'ψ'), (0x1D6DA, 'M', u'ω'), (0x1D6DB, 'M', u'∂'), (0x1D6DC, 'M', u'ε'), (0x1D6DD, 'M', u'θ'), (0x1D6DE, 'M', u'κ'), (0x1D6DF, 'M', u'φ'), (0x1D6E0, 'M', u'ρ'), (0x1D6E1, 'M', u'π'), (0x1D6E2, 'M', u'α'), (0x1D6E3, 'M', u'β'), (0x1D6E4, 'M', u'γ'), (0x1D6E5, 'M', u'δ'), (0x1D6E6, 'M', u'ε'), (0x1D6E7, 'M', u'ζ'), (0x1D6E8, 'M', u'η'), (0x1D6E9, 'M', u'θ'), (0x1D6EA, 'M', u'ι'), (0x1D6EB, 'M', u'κ'), (0x1D6EC, 'M', u'λ'), (0x1D6ED, 'M', u'μ'), (0x1D6EE, 'M', u'ν'), (0x1D6EF, 'M', u'ξ'), (0x1D6F0, 'M', u'ο'), (0x1D6F1, 'M', u'π'), (0x1D6F2, 'M', u'ρ'), (0x1D6F3, 'M', u'θ'), (0x1D6F4, 'M', u'σ'), (0x1D6F5, 'M', u'τ'), (0x1D6F6, 'M', u'υ'), (0x1D6F7, 'M', u'φ'), (0x1D6F8, 'M', u'χ'), (0x1D6F9, 'M', u'ψ'), (0x1D6FA, 'M', u'ω'), (0x1D6FB, 'M', u'∇'), (0x1D6FC, 'M', u'α'), (0x1D6FD, 'M', u'β'), (0x1D6FE, 'M', u'γ'), (0x1D6FF, 'M', u'δ'), (0x1D700, 'M', u'ε'), (0x1D701, 'M', u'ζ'), (0x1D702, 'M', u'η'), (0x1D703, 'M', u'θ'), (0x1D704, 'M', u'ι'), (0x1D705, 'M', u'κ'), (0x1D706, 'M', u'λ'), (0x1D707, 'M', u'μ'), (0x1D708, 'M', u'ν'), (0x1D709, 'M', u'ξ'), (0x1D70A, 'M', u'ο'), (0x1D70B, 'M', u'π'), (0x1D70C, 'M', u'ρ'), (0x1D70D, 'M', u'σ'), (0x1D70F, 'M', u'τ'), (0x1D710, 'M', u'υ'), (0x1D711, 'M', u'φ'), (0x1D712, 'M', u'χ'), (0x1D713, 'M', u'ψ'), (0x1D714, 'M', u'ω'), (0x1D715, 'M', u'∂'), (0x1D716, 'M', u'ε'), ] def _seg_67(): return [ (0x1D717, 'M', u'θ'), (0x1D718, 'M', u'κ'), (0x1D719, 'M', u'φ'), (0x1D71A, 'M', u'ρ'), (0x1D71B, 'M', u'π'), (0x1D71C, 'M', u'α'), (0x1D71D, 'M', u'β'), (0x1D71E, 'M', u'γ'), (0x1D71F, 'M', u'δ'), (0x1D720, 'M', u'ε'), (0x1D721, 'M', u'ζ'), (0x1D722, 'M', u'η'), (0x1D723, 'M', u'θ'), (0x1D724, 'M', u'ι'), (0x1D725, 'M', u'κ'), (0x1D726, 'M', u'λ'), (0x1D727, 'M', u'μ'), (0x1D728, 'M', u'ν'), (0x1D729, 'M', u'ξ'), (0x1D72A, 'M', u'ο'), (0x1D72B, 'M', u'π'), (0x1D72C, 'M', u'ρ'), (0x1D72D, 'M', u'θ'), (0x1D72E, 'M', u'σ'), (0x1D72F, 'M', u'τ'), (0x1D730, 'M', u'υ'), (0x1D731, 'M', u'φ'), (0x1D732, 'M', u'χ'), (0x1D733, 'M', u'ψ'), (0x1D734, 'M', u'ω'), (0x1D735, 'M', u'∇'), (0x1D736, 'M', u'α'), (0x1D737, 'M', u'β'), (0x1D738, 'M', u'γ'), (0x1D739, 'M', u'δ'), (0x1D73A, 'M', u'ε'), (0x1D73B, 'M', u'ζ'), (0x1D73C, 'M', u'η'), (0x1D73D, 'M', u'θ'), (0x1D73E, 'M', u'ι'), (0x1D73F, 'M', u'κ'), (0x1D740, 'M', u'λ'), (0x1D741, 'M', u'μ'), (0x1D742, 'M', u'ν'), (0x1D743, 'M', u'ξ'), (0x1D744, 'M', u'ο'), (0x1D745, 'M', u'π'), (0x1D746, 'M', u'ρ'), (0x1D747, 'M', u'σ'), (0x1D749, 'M', u'τ'), (0x1D74A, 'M', u'υ'), (0x1D74B, 'M', u'φ'), (0x1D74C, 'M', u'χ'), (0x1D74D, 'M', u'ψ'), (0x1D74E, 'M', u'ω'), (0x1D74F, 'M', u'∂'), (0x1D750, 'M', u'ε'), (0x1D751, 'M', u'θ'), (0x1D752, 'M', u'κ'), (0x1D753, 'M', u'φ'), (0x1D754, 'M', u'ρ'), (0x1D755, 'M', u'π'), (0x1D756, 'M', u'α'), (0x1D757, 'M', u'β'), (0x1D758, 'M', u'γ'), (0x1D759, 'M', u'δ'), (0x1D75A, 'M', u'ε'), (0x1D75B, 'M', u'ζ'), (0x1D75C, 'M', u'η'), (0x1D75D, 'M', u'θ'), (0x1D75E, 'M', u'ι'), (0x1D75F, 'M', u'κ'), (0x1D760, 'M', u'λ'), (0x1D761, 'M', u'μ'), (0x1D762, 'M', u'ν'), (0x1D763, 'M', u'ξ'), (0x1D764, 'M', u'ο'), (0x1D765, 'M', u'π'), (0x1D766, 'M', u'ρ'), (0x1D767, 'M', u'θ'), (0x1D768, 'M', u'σ'), (0x1D769, 'M', u'τ'), (0x1D76A, 'M', u'υ'), (0x1D76B, 'M', u'φ'), (0x1D76C, 'M', u'χ'), (0x1D76D, 'M', u'ψ'), (0x1D76E, 'M', u'ω'), (0x1D76F, 'M', u'∇'), (0x1D770, 'M', u'α'), (0x1D771, 'M', u'β'), (0x1D772, 'M', u'γ'), (0x1D773, 'M', u'δ'), (0x1D774, 'M', u'ε'), (0x1D775, 'M', u'ζ'), (0x1D776, 'M', u'η'), (0x1D777, 'M', u'θ'), (0x1D778, 'M', u'ι'), (0x1D779, 'M', u'κ'), (0x1D77A, 'M', u'λ'), (0x1D77B, 'M', u'μ'), ] def _seg_68(): return [ (0x1D77C, 'M', u'ν'), (0x1D77D, 'M', u'ξ'), (0x1D77E, 'M', u'ο'), (0x1D77F, 'M', u'π'), (0x1D780, 'M', u'ρ'), (0x1D781, 'M', u'σ'), (0x1D783, 'M', u'τ'), (0x1D784, 'M', u'υ'), (0x1D785, 'M', u'φ'), (0x1D786, 'M', u'χ'), (0x1D787, 'M', u'ψ'), (0x1D788, 'M', u'ω'), (0x1D789, 'M', u'∂'), (0x1D78A, 'M', u'ε'), (0x1D78B, 'M', u'θ'), (0x1D78C, 'M', u'κ'), (0x1D78D, 'M', u'φ'), (0x1D78E, 'M', u'ρ'), (0x1D78F, 'M', u'π'), (0x1D790, 'M', u'α'), (0x1D791, 'M', u'β'), (0x1D792, 'M', u'γ'), (0x1D793, 'M', u'δ'), (0x1D794, 'M', u'ε'), (0x1D795, 'M', u'ζ'), (0x1D796, 'M', u'η'), (0x1D797, 'M', u'θ'), (0x1D798, 'M', u'ι'), (0x1D799, 'M', u'κ'), (0x1D79A, 'M', u'λ'), (0x1D79B, 'M', u'μ'), (0x1D79C, 'M', u'ν'), (0x1D79D, 'M', u'ξ'), (0x1D79E, 'M', u'ο'), (0x1D79F, 'M', u'π'), (0x1D7A0, 'M', u'ρ'), (0x1D7A1, 'M', u'θ'), (0x1D7A2, 'M', u'σ'), (0x1D7A3, 'M', u'τ'), (0x1D7A4, 'M', u'υ'), (0x1D7A5, 'M', u'φ'), (0x1D7A6, 'M', u'χ'), (0x1D7A7, 'M', u'ψ'), (0x1D7A8, 'M', u'ω'), (0x1D7A9, 'M', u'∇'), (0x1D7AA, 'M', u'α'), (0x1D7AB, 'M', u'β'), (0x1D7AC, 'M', u'γ'), (0x1D7AD, 'M', u'δ'), (0x1D7AE, 'M', u'ε'), (0x1D7AF, 'M', u'ζ'), (0x1D7B0, 'M', u'η'), (0x1D7B1, 'M', u'θ'), (0x1D7B2, 'M', u'ι'), (0x1D7B3, 'M', u'κ'), (0x1D7B4, 'M', u'λ'), (0x1D7B5, 'M', u'μ'), (0x1D7B6, 'M', u'ν'), (0x1D7B7, 'M', u'ξ'), (0x1D7B8, 'M', u'ο'), (0x1D7B9, 'M', u'π'), (0x1D7BA, 'M', u'ρ'), (0x1D7BB, 'M', u'σ'), (0x1D7BD, 'M', u'τ'), (0x1D7BE, 'M', u'υ'), (0x1D7BF, 'M', u'φ'), (0x1D7C0, 'M', u'χ'), (0x1D7C1, 'M', u'ψ'), (0x1D7C2, 'M', u'ω'), (0x1D7C3, 'M', u'∂'), (0x1D7C4, 'M', u'ε'), (0x1D7C5, 'M', u'θ'), (0x1D7C6, 'M', u'κ'), (0x1D7C7, 'M', u'φ'), (0x1D7C8, 'M', u'ρ'), (0x1D7C9, 'M', u'π'), (0x1D7CA, 'M', u'ϝ'), (0x1D7CC, 'X'), (0x1D7CE, 'M', u'0'), (0x1D7CF, 'M', u'1'), (0x1D7D0, 'M', u'2'), (0x1D7D1, 'M', u'3'), (0x1D7D2, 'M', u'4'), (0x1D7D3, 'M', u'5'), (0x1D7D4, 'M', u'6'), (0x1D7D5, 'M', u'7'), (0x1D7D6, 'M', u'8'), (0x1D7D7, 'M', u'9'), (0x1D7D8, 'M', u'0'), (0x1D7D9, 'M', u'1'), (0x1D7DA, 'M', u'2'), (0x1D7DB, 'M', u'3'), (0x1D7DC, 'M', u'4'), (0x1D7DD, 'M', u'5'), (0x1D7DE, 'M', u'6'), (0x1D7DF, 'M', u'7'), (0x1D7E0, 'M', u'8'), (0x1D7E1, 'M', u'9'), (0x1D7E2, 'M', u'0'), (0x1D7E3, 'M', u'1'), ] def _seg_69(): return [ (0x1D7E4, 'M', u'2'), (0x1D7E5, 'M', u'3'), (0x1D7E6, 'M', u'4'), (0x1D7E7, 'M', u'5'), (0x1D7E8, 'M', u'6'), (0x1D7E9, 'M', u'7'), (0x1D7EA, 'M', u'8'), (0x1D7EB, 'M', u'9'), (0x1D7EC, 'M', u'0'), (0x1D7ED, 'M', u'1'), (0x1D7EE, 'M', u'2'), (0x1D7EF, 'M', u'3'), (0x1D7F0, 'M', u'4'), (0x1D7F1, 'M', u'5'), (0x1D7F2, 'M', u'6'), (0x1D7F3, 'M', u'7'), (0x1D7F4, 'M', u'8'), (0x1D7F5, 'M', u'9'), (0x1D7F6, 'M', u'0'), (0x1D7F7, 'M', u'1'), (0x1D7F8, 'M', u'2'), (0x1D7F9, 'M', u'3'), (0x1D7FA, 'M', u'4'), (0x1D7FB, 'M', u'5'), (0x1D7FC, 'M', u'6'), (0x1D7FD, 'M', u'7'), (0x1D7FE, 'M', u'8'), (0x1D7FF, 'M', u'9'), (0x1D800, 'V'), (0x1DA8C, 'X'), (0x1DA9B, 'V'), (0x1DAA0, 'X'), (0x1DAA1, 'V'), (0x1DAB0, 'X'), (0x1E000, 'V'), (0x1E007, 'X'), (0x1E008, 'V'), (0x1E019, 'X'), (0x1E01B, 'V'), (0x1E022, 'X'), (0x1E023, 'V'), (0x1E025, 'X'), (0x1E026, 'V'), (0x1E02B, 'X'), (0x1E100, 'V'), (0x1E12D, 'X'), (0x1E130, 'V'), (0x1E13E, 'X'), (0x1E140, 'V'), (0x1E14A, 'X'), (0x1E14E, 'V'), (0x1E150, 'X'), (0x1E2C0, 'V'), (0x1E2FA, 'X'), (0x1E2FF, 'V'), (0x1E300, 'X'), (0x1E800, 'V'), (0x1E8C5, 'X'), (0x1E8C7, 'V'), (0x1E8D7, 'X'), (0x1E900, 'M', u'𞤢'), (0x1E901, 'M', u'𞤣'), (0x1E902, 'M', u'𞤤'), (0x1E903, 'M', u'𞤥'), (0x1E904, 'M', u'𞤦'), (0x1E905, 'M', u'𞤧'), (0x1E906, 'M', u'𞤨'), (0x1E907, 'M', u'𞤩'), (0x1E908, 'M', u'𞤪'), (0x1E909, 'M', u'𞤫'), (0x1E90A, 'M', u'𞤬'), (0x1E90B, 'M', u'𞤭'), (0x1E90C, 'M', u'𞤮'), (0x1E90D, 'M', u'𞤯'), (0x1E90E, 'M', u'𞤰'), (0x1E90F, 'M', u'𞤱'), (0x1E910, 'M', u'𞤲'), (0x1E911, 'M', u'𞤳'), (0x1E912, 'M', u'𞤴'), (0x1E913, 'M', u'𞤵'), (0x1E914, 'M', u'𞤶'), (0x1E915, 'M', u'𞤷'), (0x1E916, 'M', u'𞤸'), (0x1E917, 'M', u'𞤹'), (0x1E918, 'M', u'𞤺'), (0x1E919, 'M', u'𞤻'), (0x1E91A, 'M', u'𞤼'), (0x1E91B, 'M', u'𞤽'), (0x1E91C, 'M', u'𞤾'), (0x1E91D, 'M', u'𞤿'), (0x1E91E, 'M', u'𞥀'), (0x1E91F, 'M', u'𞥁'), (0x1E920, 'M', u'𞥂'), (0x1E921, 'M', u'𞥃'), (0x1E922, 'V'), (0x1E94C, 'X'), (0x1E950, 'V'), (0x1E95A, 'X'), (0x1E95E, 'V'), (0x1E960, 'X'), ] def _seg_70(): return [ (0x1EC71, 'V'), (0x1ECB5, 'X'), (0x1ED01, 'V'), (0x1ED3E, 'X'), (0x1EE00, 'M', u'ا'), (0x1EE01, 'M', u'ب'), (0x1EE02, 'M', u'ج'), (0x1EE03, 'M', u'د'), (0x1EE04, 'X'), (0x1EE05, 'M', u'و'), (0x1EE06, 'M', u'ز'), (0x1EE07, 'M', u'ح'), (0x1EE08, 'M', u'ط'), (0x1EE09, 'M', u'ي'), (0x1EE0A, 'M', u'ك'), (0x1EE0B, 'M', u'ل'), (0x1EE0C, 'M', u'م'), (0x1EE0D, 'M', u'ن'), (0x1EE0E, 'M', u'س'), (0x1EE0F, 'M', u'ع'), (0x1EE10, 'M', u'ف'), (0x1EE11, 'M', u'ص'), (0x1EE12, 'M', u'ق'), (0x1EE13, 'M', u'ر'), (0x1EE14, 'M', u'ش'), (0x1EE15, 'M', u'ت'), (0x1EE16, 'M', u'ث'), (0x1EE17, 'M', u'خ'), (0x1EE18, 'M', u'ذ'), (0x1EE19, 'M', u'ض'), (0x1EE1A, 'M', u'ظ'), (0x1EE1B, 'M', u'غ'), (0x1EE1C, 'M', u'ٮ'), (0x1EE1D, 'M', u'ں'), (0x1EE1E, 'M', u'ڡ'), (0x1EE1F, 'M', u'ٯ'), (0x1EE20, 'X'), (0x1EE21, 'M', u'ب'), (0x1EE22, 'M', u'ج'), (0x1EE23, 'X'), (0x1EE24, 'M', u'ه'), (0x1EE25, 'X'), (0x1EE27, 'M', u'ح'), (0x1EE28, 'X'), (0x1EE29, 'M', u'ي'), (0x1EE2A, 'M', u'ك'), (0x1EE2B, 'M', u'ل'), (0x1EE2C, 'M', u'م'), (0x1EE2D, 'M', u'ن'), (0x1EE2E, 'M', u'س'), (0x1EE2F, 'M', u'ع'), (0x1EE30, 'M', u'ف'), (0x1EE31, 'M', u'ص'), (0x1EE32, 'M', u'ق'), (0x1EE33, 'X'), (0x1EE34, 'M', u'ش'), (0x1EE35, 'M', u'ت'), (0x1EE36, 'M', u'ث'), (0x1EE37, 'M', u'خ'), (0x1EE38, 'X'), (0x1EE39, 'M', u'ض'), (0x1EE3A, 'X'), (0x1EE3B, 'M', u'غ'), (0x1EE3C, 'X'), (0x1EE42, 'M', u'ج'), (0x1EE43, 'X'), (0x1EE47, 'M', u'ح'), (0x1EE48, 'X'), (0x1EE49, 'M', u'ي'), (0x1EE4A, 'X'), (0x1EE4B, 'M', u'ل'), (0x1EE4C, 'X'), (0x1EE4D, 'M', u'ن'), (0x1EE4E, 'M', u'س'), (0x1EE4F, 'M', u'ع'), (0x1EE50, 'X'), (0x1EE51, 'M', u'ص'), (0x1EE52, 'M', u'ق'), (0x1EE53, 'X'), (0x1EE54, 'M', u'ش'), (0x1EE55, 'X'), (0x1EE57, 'M', u'خ'), (0x1EE58, 'X'), (0x1EE59, 'M', u'ض'), (0x1EE5A, 'X'), (0x1EE5B, 'M', u'غ'), (0x1EE5C, 'X'), (0x1EE5D, 'M', u'ں'), (0x1EE5E, 'X'), (0x1EE5F, 'M', u'ٯ'), (0x1EE60, 'X'), (0x1EE61, 'M', u'ب'), (0x1EE62, 'M', u'ج'), (0x1EE63, 'X'), (0x1EE64, 'M', u'ه'), (0x1EE65, 'X'), (0x1EE67, 'M', u'ح'), (0x1EE68, 'M', u'ط'), (0x1EE69, 'M', u'ي'), (0x1EE6A, 'M', u'ك'), ] def _seg_71(): return [ (0x1EE6B, 'X'), (0x1EE6C, 'M', u'م'), (0x1EE6D, 'M', u'ن'), (0x1EE6E, 'M', u'س'), (0x1EE6F, 'M', u'ع'), (0x1EE70, 'M', u'ف'), (0x1EE71, 'M', u'ص'), (0x1EE72, 'M', u'ق'), (0x1EE73, 'X'), (0x1EE74, 'M', u'ش'), (0x1EE75, 'M', u'ت'), (0x1EE76, 'M', u'ث'), (0x1EE77, 'M', u'خ'), (0x1EE78, 'X'), (0x1EE79, 'M', u'ض'), (0x1EE7A, 'M', u'ظ'), (0x1EE7B, 'M', u'غ'), (0x1EE7C, 'M', u'ٮ'), (0x1EE7D, 'X'), (0x1EE7E, 'M', u'ڡ'), (0x1EE7F, 'X'), (0x1EE80, 'M', u'ا'), (0x1EE81, 'M', u'ب'), (0x1EE82, 'M', u'ج'), (0x1EE83, 'M', u'د'), (0x1EE84, 'M', u'ه'), (0x1EE85, 'M', u'و'), (0x1EE86, 'M', u'ز'), (0x1EE87, 'M', u'ح'), (0x1EE88, 'M', u'ط'), (0x1EE89, 'M', u'ي'), (0x1EE8A, 'X'), (0x1EE8B, 'M', u'ل'), (0x1EE8C, 'M', u'م'), (0x1EE8D, 'M', u'ن'), (0x1EE8E, 'M', u'س'), (0x1EE8F, 'M', u'ع'), (0x1EE90, 'M', u'ف'), (0x1EE91, 'M', u'ص'), (0x1EE92, 'M', u'ق'), (0x1EE93, 'M', u'ر'), (0x1EE94, 'M', u'ش'), (0x1EE95, 'M', u'ت'), (0x1EE96, 'M', u'ث'), (0x1EE97, 'M', u'خ'), (0x1EE98, 'M', u'ذ'), (0x1EE99, 'M', u'ض'), (0x1EE9A, 'M', u'ظ'), (0x1EE9B, 'M', u'غ'), (0x1EE9C, 'X'), (0x1EEA1, 'M', u'ب'), (0x1EEA2, 'M', u'ج'), (0x1EEA3, 'M', u'د'), (0x1EEA4, 'X'), (0x1EEA5, 'M', u'و'), (0x1EEA6, 'M', u'ز'), (0x1EEA7, 'M', u'ح'), (0x1EEA8, 'M', u'ط'), (0x1EEA9, 'M', u'ي'), (0x1EEAA, 'X'), (0x1EEAB, 'M', u'ل'), (0x1EEAC, 'M', u'م'), (0x1EEAD, 'M', u'ن'), (0x1EEAE, 'M', u'س'), (0x1EEAF, 'M', u'ع'), (0x1EEB0, 'M', u'ف'), (0x1EEB1, 'M', u'ص'), (0x1EEB2, 'M', u'ق'), (0x1EEB3, 'M', u'ر'), (0x1EEB4, 'M', u'ش'), (0x1EEB5, 'M', u'ت'), (0x1EEB6, 'M', u'ث'), (0x1EEB7, 'M', u'خ'), (0x1EEB8, 'M', u'ذ'), (0x1EEB9, 'M', u'ض'), (0x1EEBA, 'M', u'ظ'), (0x1EEBB, 'M', u'غ'), (0x1EEBC, 'X'), (0x1EEF0, 'V'), (0x1EEF2, 'X'), (0x1F000, 'V'), (0x1F02C, 'X'), (0x1F030, 'V'), (0x1F094, 'X'), (0x1F0A0, 'V'), (0x1F0AF, 'X'), (0x1F0B1, 'V'), (0x1F0C0, 'X'), (0x1F0C1, 'V'), (0x1F0D0, 'X'), (0x1F0D1, 'V'), (0x1F0F6, 'X'), (0x1F101, '3', u'0,'), (0x1F102, '3', u'1,'), (0x1F103, '3', u'2,'), (0x1F104, '3', u'3,'), (0x1F105, '3', u'4,'), (0x1F106, '3', u'5,'), (0x1F107, '3', u'6,'), (0x1F108, '3', u'7,'), ] def _seg_72(): return [ (0x1F109, '3', u'8,'), (0x1F10A, '3', u'9,'), (0x1F10B, 'V'), (0x1F110, '3', u'(a)'), (0x1F111, '3', u'(b)'), (0x1F112, '3', u'(c)'), (0x1F113, '3', u'(d)'), (0x1F114, '3', u'(e)'), (0x1F115, '3', u'(f)'), (0x1F116, '3', u'(g)'), (0x1F117, '3', u'(h)'), (0x1F118, '3', u'(i)'), (0x1F119, '3', u'(j)'), (0x1F11A, '3', u'(k)'), (0x1F11B, '3', u'(l)'), (0x1F11C, '3', u'(m)'), (0x1F11D, '3', u'(n)'), (0x1F11E, '3', u'(o)'), (0x1F11F, '3', u'(p)'), (0x1F120, '3', u'(q)'), (0x1F121, '3', u'(r)'), (0x1F122, '3', u'(s)'), (0x1F123, '3', u'(t)'), (0x1F124, '3', u'(u)'), (0x1F125, '3', u'(v)'), (0x1F126, '3', u'(w)'), (0x1F127, '3', u'(x)'), (0x1F128, '3', u'(y)'), (0x1F129, '3', u'(z)'), (0x1F12A, 'M', u'〔s〕'), (0x1F12B, 'M', u'c'), (0x1F12C, 'M', u'r'), (0x1F12D, 'M', u'cd'), (0x1F12E, 'M', u'wz'), (0x1F12F, 'V'), (0x1F130, 'M', u'a'), (0x1F131, 'M', u'b'), (0x1F132, 'M', u'c'), (0x1F133, 'M', u'd'), (0x1F134, 'M', u'e'), (0x1F135, 'M', u'f'), (0x1F136, 'M', u'g'), (0x1F137, 'M', u'h'), (0x1F138, 'M', u'i'), (0x1F139, 'M', u'j'), (0x1F13A, 'M', u'k'), (0x1F13B, 'M', u'l'), (0x1F13C, 'M', u'm'), (0x1F13D, 'M', u'n'), (0x1F13E, 'M', u'o'), (0x1F13F, 'M', u'p'), (0x1F140, 'M', u'q'), (0x1F141, 'M', u'r'), (0x1F142, 'M', u's'), (0x1F143, 'M', u't'), (0x1F144, 'M', u'u'), (0x1F145, 'M', u'v'), (0x1F146, 'M', u'w'), (0x1F147, 'M', u'x'), (0x1F148, 'M', u'y'), (0x1F149, 'M', u'z'), (0x1F14A, 'M', u'hv'), (0x1F14B, 'M', u'mv'), (0x1F14C, 'M', u'sd'), (0x1F14D, 'M', u'ss'), (0x1F14E, 'M', u'ppv'), (0x1F14F, 'M', u'wc'), (0x1F150, 'V'), (0x1F16A, 'M', u'mc'), (0x1F16B, 'M', u'md'), (0x1F16C, 'M', u'mr'), (0x1F16D, 'V'), (0x1F190, 'M', u'dj'), (0x1F191, 'V'), (0x1F1AE, 'X'), (0x1F1E6, 'V'), (0x1F200, 'M', u'ほか'), (0x1F201, 'M', u'ココ'), (0x1F202, 'M', u'サ'), (0x1F203, 'X'), (0x1F210, 'M', u'手'), (0x1F211, 'M', u'字'), (0x1F212, 'M', u'双'), (0x1F213, 'M', u'デ'), (0x1F214, 'M', u'二'), (0x1F215, 'M', u'多'), (0x1F216, 'M', u'解'), (0x1F217, 'M', u'天'), (0x1F218, 'M', u'交'), (0x1F219, 'M', u'映'), (0x1F21A, 'M', u'無'), (0x1F21B, 'M', u'料'), (0x1F21C, 'M', u'前'), (0x1F21D, 'M', u'後'), (0x1F21E, 'M', u'再'), (0x1F21F, 'M', u'新'), (0x1F220, 'M', u'初'), (0x1F221, 'M', u'終'), (0x1F222, 'M', u'生'), (0x1F223, 'M', u'販'), ] def _seg_73(): return [ (0x1F224, 'M', u'声'), (0x1F225, 'M', u'吹'), (0x1F226, 'M', u'演'), (0x1F227, 'M', u'投'), (0x1F228, 'M', u'捕'), (0x1F229, 'M', u'一'), (0x1F22A, 'M', u'三'), (0x1F22B, 'M', u'遊'), (0x1F22C, 'M', u'左'), (0x1F22D, 'M', u'中'), (0x1F22E, 'M', u'右'), (0x1F22F, 'M', u'指'), (0x1F230, 'M', u'走'), (0x1F231, 'M', u'打'), (0x1F232, 'M', u'禁'), (0x1F233, 'M', u'空'), (0x1F234, 'M', u'合'), (0x1F235, 'M', u'満'), (0x1F236, 'M', u'有'), (0x1F237, 'M', u'月'), (0x1F238, 'M', u'申'), (0x1F239, 'M', u'割'), (0x1F23A, 'M', u'営'), (0x1F23B, 'M', u'配'), (0x1F23C, 'X'), (0x1F240, 'M', u'〔本〕'), (0x1F241, 'M', u'〔三〕'), (0x1F242, 'M', u'〔二〕'), (0x1F243, 'M', u'〔安〕'), (0x1F244, 'M', u'〔点〕'), (0x1F245, 'M', u'〔打〕'), (0x1F246, 'M', u'〔盗〕'), (0x1F247, 'M', u'〔勝〕'), (0x1F248, 'M', u'〔敗〕'), (0x1F249, 'X'), (0x1F250, 'M', u'得'), (0x1F251, 'M', u'可'), (0x1F252, 'X'), (0x1F260, 'V'), (0x1F266, 'X'), (0x1F300, 'V'), (0x1F6D8, 'X'), (0x1F6E0, 'V'), (0x1F6ED, 'X'), (0x1F6F0, 'V'), (0x1F6FD, 'X'), (0x1F700, 'V'), (0x1F774, 'X'), (0x1F780, 'V'), (0x1F7D9, 'X'), (0x1F7E0, 'V'), (0x1F7EC, 'X'), (0x1F800, 'V'), (0x1F80C, 'X'), (0x1F810, 'V'), (0x1F848, 'X'), (0x1F850, 'V'), (0x1F85A, 'X'), (0x1F860, 'V'), (0x1F888, 'X'), (0x1F890, 'V'), (0x1F8AE, 'X'), (0x1F8B0, 'V'), (0x1F8B2, 'X'), (0x1F900, 'V'), (0x1F979, 'X'), (0x1F97A, 'V'), (0x1F9CC, 'X'), (0x1F9CD, 'V'), (0x1FA54, 'X'), (0x1FA60, 'V'), (0x1FA6E, 'X'), (0x1FA70, 'V'), (0x1FA75, 'X'), (0x1FA78, 'V'), (0x1FA7B, 'X'), (0x1FA80, 'V'), (0x1FA87, 'X'), (0x1FA90, 'V'), (0x1FAA9, 'X'), (0x1FAB0, 'V'), (0x1FAB7, 'X'), (0x1FAC0, 'V'), (0x1FAC3, 'X'), (0x1FAD0, 'V'), (0x1FAD7, 'X'), (0x1FB00, 'V'), (0x1FB93, 'X'), (0x1FB94, 'V'), (0x1FBCB, 'X'), (0x1FBF0, 'M', u'0'), (0x1FBF1, 'M', u'1'), (0x1FBF2, 'M', u'2'), (0x1FBF3, 'M', u'3'), (0x1FBF4, 'M', u'4'), (0x1FBF5, 'M', u'5'), (0x1FBF6, 'M', u'6'), (0x1FBF7, 'M', u'7'), (0x1FBF8, 'M', u'8'), (0x1FBF9, 'M', u'9'), ] def _seg_74(): return [ (0x1FBFA, 'X'), (0x20000, 'V'), (0x2A6DE, 'X'), (0x2A700, 'V'), (0x2B735, 'X'), (0x2B740, 'V'), (0x2B81E, 'X'), (0x2B820, 'V'), (0x2CEA2, 'X'), (0x2CEB0, 'V'), (0x2EBE1, 'X'), (0x2F800, 'M', u'丽'), (0x2F801, 'M', u'丸'), (0x2F802, 'M', u'乁'), (0x2F803, 'M', u'𠄢'), (0x2F804, 'M', u'你'), (0x2F805, 'M', u'侮'), (0x2F806, 'M', u'侻'), (0x2F807, 'M', u'倂'), (0x2F808, 'M', u'偺'), (0x2F809, 'M', u'備'), (0x2F80A, 'M', u'僧'), (0x2F80B, 'M', u'像'), (0x2F80C, 'M', u'㒞'), (0x2F80D, 'M', u'𠘺'), (0x2F80E, 'M', u'免'), (0x2F80F, 'M', u'兔'), (0x2F810, 'M', u'兤'), (0x2F811, 'M', u'具'), (0x2F812, 'M', u'𠔜'), (0x2F813, 'M', u'㒹'), (0x2F814, 'M', u'內'), (0x2F815, 'M', u'再'), (0x2F816, 'M', u'𠕋'), (0x2F817, 'M', u'冗'), (0x2F818, 'M', u'冤'), (0x2F819, 'M', u'仌'), (0x2F81A, 'M', u'冬'), (0x2F81B, 'M', u'况'), (0x2F81C, 'M', u'𩇟'), (0x2F81D, 'M', u'凵'), (0x2F81E, 'M', u'刃'), (0x2F81F, 'M', u'㓟'), (0x2F820, 'M', u'刻'), (0x2F821, 'M', u'剆'), (0x2F822, 'M', u'割'), (0x2F823, 'M', u'剷'), (0x2F824, 'M', u'㔕'), (0x2F825, 'M', u'勇'), (0x2F826, 'M', u'勉'), (0x2F827, 'M', u'勤'), (0x2F828, 'M', u'勺'), (0x2F829, 'M', u'包'), (0x2F82A, 'M', u'匆'), (0x2F82B, 'M', u'北'), (0x2F82C, 'M', u'卉'), (0x2F82D, 'M', u'卑'), (0x2F82E, 'M', u'博'), (0x2F82F, 'M', u'即'), (0x2F830, 'M', u'卽'), (0x2F831, 'M', u'卿'), (0x2F834, 'M', u'𠨬'), (0x2F835, 'M', u'灰'), (0x2F836, 'M', u'及'), (0x2F837, 'M', u'叟'), (0x2F838, 'M', u'𠭣'), (0x2F839, 'M', u'叫'), (0x2F83A, 'M', u'叱'), (0x2F83B, 'M', u'吆'), (0x2F83C, 'M', u'咞'), (0x2F83D, 'M', u'吸'), (0x2F83E, 'M', u'呈'), (0x2F83F, 'M', u'周'), (0x2F840, 'M', u'咢'), (0x2F841, 'M', u'哶'), (0x2F842, 'M', u'唐'), (0x2F843, 'M', u'啓'), (0x2F844, 'M', u'啣'), (0x2F845, 'M', u'善'), (0x2F847, 'M', u'喙'), (0x2F848, 'M', u'喫'), (0x2F849, 'M', u'喳'), (0x2F84A, 'M', u'嗂'), (0x2F84B, 'M', u'圖'), (0x2F84C, 'M', u'嘆'), (0x2F84D, 'M', u'圗'), (0x2F84E, 'M', u'噑'), (0x2F84F, 'M', u'噴'), (0x2F850, 'M', u'切'), (0x2F851, 'M', u'壮'), (0x2F852, 'M', u'城'), (0x2F853, 'M', u'埴'), (0x2F854, 'M', u'堍'), (0x2F855, 'M', u'型'), (0x2F856, 'M', u'堲'), (0x2F857, 'M', u'報'), (0x2F858, 'M', u'墬'), (0x2F859, 'M', u'𡓤'), (0x2F85A, 'M', u'売'), (0x2F85B, 'M', u'壷'), ] def _seg_75(): return [ (0x2F85C, 'M', u'夆'), (0x2F85D, 'M', u'多'), (0x2F85E, 'M', u'夢'), (0x2F85F, 'M', u'奢'), (0x2F860, 'M', u'𡚨'), (0x2F861, 'M', u'𡛪'), (0x2F862, 'M', u'姬'), (0x2F863, 'M', u'娛'), (0x2F864, 'M', u'娧'), (0x2F865, 'M', u'姘'), (0x2F866, 'M', u'婦'), (0x2F867, 'M', u'㛮'), (0x2F868, 'X'), (0x2F869, 'M', u'嬈'), (0x2F86A, 'M', u'嬾'), (0x2F86C, 'M', u'𡧈'), (0x2F86D, 'M', u'寃'), (0x2F86E, 'M', u'寘'), (0x2F86F, 'M', u'寧'), (0x2F870, 'M', u'寳'), (0x2F871, 'M', u'𡬘'), (0x2F872, 'M', u'寿'), (0x2F873, 'M', u'将'), (0x2F874, 'X'), (0x2F875, 'M', u'尢'), (0x2F876, 'M', u'㞁'), (0x2F877, 'M', u'屠'), (0x2F878, 'M', u'屮'), (0x2F879, 'M', u'峀'), (0x2F87A, 'M', u'岍'), (0x2F87B, 'M', u'𡷤'), (0x2F87C, 'M', u'嵃'), (0x2F87D, 'M', u'𡷦'), (0x2F87E, 'M', u'嵮'), (0x2F87F, 'M', u'嵫'), (0x2F880, 'M', u'嵼'), (0x2F881, 'M', u'巡'), (0x2F882, 'M', u'巢'), (0x2F883, 'M', u'㠯'), (0x2F884, 'M', u'巽'), (0x2F885, 'M', u'帨'), (0x2F886, 'M', u'帽'), (0x2F887, 'M', u'幩'), (0x2F888, 'M', u'㡢'), (0x2F889, 'M', u'𢆃'), (0x2F88A, 'M', u'㡼'), (0x2F88B, 'M', u'庰'), (0x2F88C, 'M', u'庳'), (0x2F88D, 'M', u'庶'), (0x2F88E, 'M', u'廊'), (0x2F88F, 'M', u'𪎒'), (0x2F890, 'M', u'廾'), (0x2F891, 'M', u'𢌱'), (0x2F893, 'M', u'舁'), (0x2F894, 'M', u'弢'), (0x2F896, 'M', u'㣇'), (0x2F897, 'M', u'𣊸'), (0x2F898, 'M', u'𦇚'), (0x2F899, 'M', u'形'), (0x2F89A, 'M', u'彫'), (0x2F89B, 'M', u'㣣'), (0x2F89C, 'M', u'徚'), (0x2F89D, 'M', u'忍'), (0x2F89E, 'M', u'志'), (0x2F89F, 'M', u'忹'), (0x2F8A0, 'M', u'悁'), (0x2F8A1, 'M', u'㤺'), (0x2F8A2, 'M', u'㤜'), (0x2F8A3, 'M', u'悔'), (0x2F8A4, 'M', u'𢛔'), (0x2F8A5, 'M', u'惇'), (0x2F8A6, 'M', u'慈'), (0x2F8A7, 'M', u'慌'), (0x2F8A8, 'M', u'慎'), (0x2F8A9, 'M', u'慌'), (0x2F8AA, 'M', u'慺'), (0x2F8AB, 'M', u'憎'), (0x2F8AC, 'M', u'憲'), (0x2F8AD, 'M', u'憤'), (0x2F8AE, 'M', u'憯'), (0x2F8AF, 'M', u'懞'), (0x2F8B0, 'M', u'懲'), (0x2F8B1, 'M', u'懶'), (0x2F8B2, 'M', u'成'), (0x2F8B3, 'M', u'戛'), (0x2F8B4, 'M', u'扝'), (0x2F8B5, 'M', u'抱'), (0x2F8B6, 'M', u'拔'), (0x2F8B7, 'M', u'捐'), (0x2F8B8, 'M', u'𢬌'), (0x2F8B9, 'M', u'挽'), (0x2F8BA, 'M', u'拼'), (0x2F8BB, 'M', u'捨'), (0x2F8BC, 'M', u'掃'), (0x2F8BD, 'M', u'揤'), (0x2F8BE, 'M', u'𢯱'), (0x2F8BF, 'M', u'搢'), (0x2F8C0, 'M', u'揅'), (0x2F8C1, 'M', u'掩'), (0x2F8C2, 'M', u'㨮'), ] def _seg_76(): return [ (0x2F8C3, 'M', u'摩'), (0x2F8C4, 'M', u'摾'), (0x2F8C5, 'M', u'撝'), (0x2F8C6, 'M', u'摷'), (0x2F8C7, 'M', u'㩬'), (0x2F8C8, 'M', u'敏'), (0x2F8C9, 'M', u'敬'), (0x2F8CA, 'M', u'𣀊'), (0x2F8CB, 'M', u'旣'), (0x2F8CC, 'M', u'書'), (0x2F8CD, 'M', u'晉'), (0x2F8CE, 'M', u'㬙'), (0x2F8CF, 'M', u'暑'), (0x2F8D0, 'M', u'㬈'), (0x2F8D1, 'M', u'㫤'), (0x2F8D2, 'M', u'冒'), (0x2F8D3, 'M', u'冕'), (0x2F8D4, 'M', u'最'), (0x2F8D5, 'M', u'暜'), (0x2F8D6, 'M', u'肭'), (0x2F8D7, 'M', u'䏙'), (0x2F8D8, 'M', u'朗'), (0x2F8D9, 'M', u'望'), (0x2F8DA, 'M', u'朡'), (0x2F8DB, 'M', u'杞'), (0x2F8DC, 'M', u'杓'), (0x2F8DD, 'M', u'𣏃'), (0x2F8DE, 'M', u'㭉'), (0x2F8DF, 'M', u'柺'), (0x2F8E0, 'M', u'枅'), (0x2F8E1, 'M', u'桒'), (0x2F8E2, 'M', u'梅'), (0x2F8E3, 'M', u'𣑭'), (0x2F8E4, 'M', u'梎'), (0x2F8E5, 'M', u'栟'), (0x2F8E6, 'M', u'椔'), (0x2F8E7, 'M', u'㮝'), (0x2F8E8, 'M', u'楂'), (0x2F8E9, 'M', u'榣'), (0x2F8EA, 'M', u'槪'), (0x2F8EB, 'M', u'檨'), (0x2F8EC, 'M', u'𣚣'), (0x2F8ED, 'M', u'櫛'), (0x2F8EE, 'M', u'㰘'), (0x2F8EF, 'M', u'次'), (0x2F8F0, 'M', u'𣢧'), (0x2F8F1, 'M', u'歔'), (0x2F8F2, 'M', u'㱎'), (0x2F8F3, 'M', u'歲'), (0x2F8F4, 'M', u'殟'), (0x2F8F5, 'M', u'殺'), (0x2F8F6, 'M', u'殻'), (0x2F8F7, 'M', u'𣪍'), (0x2F8F8, 'M', u'𡴋'), (0x2F8F9, 'M', u'𣫺'), (0x2F8FA, 'M', u'汎'), (0x2F8FB, 'M', u'𣲼'), (0x2F8FC, 'M', u'沿'), (0x2F8FD, 'M', u'泍'), (0x2F8FE, 'M', u'汧'), (0x2F8FF, 'M', u'洖'), (0x2F900, 'M', u'派'), (0x2F901, 'M', u'海'), (0x2F902, 'M', u'流'), (0x2F903, 'M', u'浩'), (0x2F904, 'M', u'浸'), (0x2F905, 'M', u'涅'), (0x2F906, 'M', u'𣴞'), (0x2F907, 'M', u'洴'), (0x2F908, 'M', u'港'), (0x2F909, 'M', u'湮'), (0x2F90A, 'M', u'㴳'), (0x2F90B, 'M', u'滋'), (0x2F90C, 'M', u'滇'), (0x2F90D, 'M', u'𣻑'), (0x2F90E, 'M', u'淹'), (0x2F90F, 'M', u'潮'), (0x2F910, 'M', u'𣽞'), (0x2F911, 'M', u'𣾎'), (0x2F912, 'M', u'濆'), (0x2F913, 'M', u'瀹'), (0x2F914, 'M', u'瀞'), (0x2F915, 'M', u'瀛'), (0x2F916, 'M', u'㶖'), (0x2F917, 'M', u'灊'), (0x2F918, 'M', u'災'), (0x2F919, 'M', u'灷'), (0x2F91A, 'M', u'炭'), (0x2F91B, 'M', u'𠔥'), (0x2F91C, 'M', u'煅'), (0x2F91D, 'M', u'𤉣'), (0x2F91E, 'M', u'熜'), (0x2F91F, 'X'), (0x2F920, 'M', u'爨'), (0x2F921, 'M', u'爵'), (0x2F922, 'M', u'牐'), (0x2F923, 'M', u'𤘈'), (0x2F924, 'M', u'犀'), (0x2F925, 'M', u'犕'), (0x2F926, 'M', u'𤜵'), ] def _seg_77(): return [ (0x2F927, 'M', u'𤠔'), (0x2F928, 'M', u'獺'), (0x2F929, 'M', u'王'), (0x2F92A, 'M', u'㺬'), (0x2F92B, 'M', u'玥'), (0x2F92C, 'M', u'㺸'), (0x2F92E, 'M', u'瑇'), (0x2F92F, 'M', u'瑜'), (0x2F930, 'M', u'瑱'), (0x2F931, 'M', u'璅'), (0x2F932, 'M', u'瓊'), (0x2F933, 'M', u'㼛'), (0x2F934, 'M', u'甤'), (0x2F935, 'M', u'𤰶'), (0x2F936, 'M', u'甾'), (0x2F937, 'M', u'𤲒'), (0x2F938, 'M', u'異'), (0x2F939, 'M', u'𢆟'), (0x2F93A, 'M', u'瘐'), (0x2F93B, 'M', u'𤾡'), (0x2F93C, 'M', u'𤾸'), (0x2F93D, 'M', u'𥁄'), (0x2F93E, 'M', u'㿼'), (0x2F93F, 'M', u'䀈'), (0x2F940, 'M', u'直'), (0x2F941, 'M', u'𥃳'), (0x2F942, 'M', u'𥃲'), (0x2F943, 'M', u'𥄙'), (0x2F944, 'M', u'𥄳'), (0x2F945, 'M', u'眞'), (0x2F946, 'M', u'真'), (0x2F948, 'M', u'睊'), (0x2F949, 'M', u'䀹'), (0x2F94A, 'M', u'瞋'), (0x2F94B, 'M', u'䁆'), (0x2F94C, 'M', u'䂖'), (0x2F94D, 'M', u'𥐝'), (0x2F94E, 'M', u'硎'), (0x2F94F, 'M', u'碌'), (0x2F950, 'M', u'磌'), (0x2F951, 'M', u'䃣'), (0x2F952, 'M', u'𥘦'), (0x2F953, 'M', u'祖'), (0x2F954, 'M', u'𥚚'), (0x2F955, 'M', u'𥛅'), (0x2F956, 'M', u'福'), (0x2F957, 'M', u'秫'), (0x2F958, 'M', u'䄯'), (0x2F959, 'M', u'穀'), (0x2F95A, 'M', u'穊'), (0x2F95B, 'M', u'穏'), (0x2F95C, 'M', u'𥥼'), (0x2F95D, 'M', u'𥪧'), (0x2F95F, 'X'), (0x2F960, 'M', u'䈂'), (0x2F961, 'M', u'𥮫'), (0x2F962, 'M', u'篆'), (0x2F963, 'M', u'築'), (0x2F964, 'M', u'䈧'), (0x2F965, 'M', u'𥲀'), (0x2F966, 'M', u'糒'), (0x2F967, 'M', u'䊠'), (0x2F968, 'M', u'糨'), (0x2F969, 'M', u'糣'), (0x2F96A, 'M', u'紀'), (0x2F96B, 'M', u'𥾆'), (0x2F96C, 'M', u'絣'), (0x2F96D, 'M', u'䌁'), (0x2F96E, 'M', u'緇'), (0x2F96F, 'M', u'縂'), (0x2F970, 'M', u'繅'), (0x2F971, 'M', u'䌴'), (0x2F972, 'M', u'𦈨'), (0x2F973, 'M', u'𦉇'), (0x2F974, 'M', u'䍙'), (0x2F975, 'M', u'𦋙'), (0x2F976, 'M', u'罺'), (0x2F977, 'M', u'𦌾'), (0x2F978, 'M', u'羕'), (0x2F979, 'M', u'翺'), (0x2F97A, 'M', u'者'), (0x2F97B, 'M', u'𦓚'), (0x2F97C, 'M', u'𦔣'), (0x2F97D, 'M', u'聠'), (0x2F97E, 'M', u'𦖨'), (0x2F97F, 'M', u'聰'), (0x2F980, 'M', u'𣍟'), (0x2F981, 'M', u'䏕'), (0x2F982, 'M', u'育'), (0x2F983, 'M', u'脃'), (0x2F984, 'M', u'䐋'), (0x2F985, 'M', u'脾'), (0x2F986, 'M', u'媵'), (0x2F987, 'M', u'𦞧'), (0x2F988, 'M', u'𦞵'), (0x2F989, 'M', u'𣎓'), (0x2F98A, 'M', u'𣎜'), (0x2F98B, 'M', u'舁'), (0x2F98C, 'M', u'舄'), (0x2F98D, 'M', u'辞'), ] def _seg_78(): return [ (0x2F98E, 'M', u'䑫'), (0x2F98F, 'M', u'芑'), (0x2F990, 'M', u'芋'), (0x2F991, 'M', u'芝'), (0x2F992, 'M', u'劳'), (0x2F993, 'M', u'花'), (0x2F994, 'M', u'芳'), (0x2F995, 'M', u'芽'), (0x2F996, 'M', u'苦'), (0x2F997, 'M', u'𦬼'), (0x2F998, 'M', u'若'), (0x2F999, 'M', u'茝'), (0x2F99A, 'M', u'荣'), (0x2F99B, 'M', u'莭'), (0x2F99C, 'M', u'茣'), (0x2F99D, 'M', u'莽'), (0x2F99E, 'M', u'菧'), (0x2F99F, 'M', u'著'), (0x2F9A0, 'M', u'荓'), (0x2F9A1, 'M', u'菊'), (0x2F9A2, 'M', u'菌'), (0x2F9A3, 'M', u'菜'), (0x2F9A4, 'M', u'𦰶'), (0x2F9A5, 'M', u'𦵫'), (0x2F9A6, 'M', u'𦳕'), (0x2F9A7, 'M', u'䔫'), (0x2F9A8, 'M', u'蓱'), (0x2F9A9, 'M', u'蓳'), (0x2F9AA, 'M', u'蔖'), (0x2F9AB, 'M', u'𧏊'), (0x2F9AC, 'M', u'蕤'), (0x2F9AD, 'M', u'𦼬'), (0x2F9AE, 'M', u'䕝'), (0x2F9AF, 'M', u'䕡'), (0x2F9B0, 'M', u'𦾱'), (0x2F9B1, 'M', u'𧃒'), (0x2F9B2, 'M', u'䕫'), (0x2F9B3, 'M', u'虐'), (0x2F9B4, 'M', u'虜'), (0x2F9B5, 'M', u'虧'), (0x2F9B6, 'M', u'虩'), (0x2F9B7, 'M', u'蚩'), (0x2F9B8, 'M', u'蚈'), (0x2F9B9, 'M', u'蜎'), (0x2F9BA, 'M', u'蛢'), (0x2F9BB, 'M', u'蝹'), (0x2F9BC, 'M', u'蜨'), (0x2F9BD, 'M', u'蝫'), (0x2F9BE, 'M', u'螆'), (0x2F9BF, 'X'), (0x2F9C0, 'M', u'蟡'), (0x2F9C1, 'M', u'蠁'), (0x2F9C2, 'M', u'䗹'), (0x2F9C3, 'M', u'衠'), (0x2F9C4, 'M', u'衣'), (0x2F9C5, 'M', u'𧙧'), (0x2F9C6, 'M', u'裗'), (0x2F9C7, 'M', u'裞'), (0x2F9C8, 'M', u'䘵'), (0x2F9C9, 'M', u'裺'), (0x2F9CA, 'M', u'㒻'), (0x2F9CB, 'M', u'𧢮'), (0x2F9CC, 'M', u'𧥦'), (0x2F9CD, 'M', u'䚾'), (0x2F9CE, 'M', u'䛇'), (0x2F9CF, 'M', u'誠'), (0x2F9D0, 'M', u'諭'), (0x2F9D1, 'M', u'變'), (0x2F9D2, 'M', u'豕'), (0x2F9D3, 'M', u'𧲨'), (0x2F9D4, 'M', u'貫'), (0x2F9D5, 'M', u'賁'), (0x2F9D6, 'M', u'贛'), (0x2F9D7, 'M', u'起'), (0x2F9D8, 'M', u'𧼯'), (0x2F9D9, 'M', u'𠠄'), (0x2F9DA, 'M', u'跋'), (0x2F9DB, 'M', u'趼'), (0x2F9DC, 'M', u'跰'), (0x2F9DD, 'M', u'𠣞'), (0x2F9DE, 'M', u'軔'), (0x2F9DF, 'M', u'輸'), (0x2F9E0, 'M', u'𨗒'), (0x2F9E1, 'M', u'𨗭'), (0x2F9E2, 'M', u'邔'), (0x2F9E3, 'M', u'郱'), (0x2F9E4, 'M', u'鄑'), (0x2F9E5, 'M', u'𨜮'), (0x2F9E6, 'M', u'鄛'), (0x2F9E7, 'M', u'鈸'), (0x2F9E8, 'M', u'鋗'), (0x2F9E9, 'M', u'鋘'), (0x2F9EA, 'M', u'鉼'), (0x2F9EB, 'M', u'鏹'), (0x2F9EC, 'M', u'鐕'), (0x2F9ED, 'M', u'𨯺'), (0x2F9EE, 'M', u'開'), (0x2F9EF, 'M', u'䦕'), (0x2F9F0, 'M', u'閷'), (0x2F9F1, 'M', u'𨵷'), ] def _seg_79(): return [ (0x2F9F2, 'M', u'䧦'), (0x2F9F3, 'M', u'雃'), (0x2F9F4, 'M', u'嶲'), (0x2F9F5, 'M', u'霣'), (0x2F9F6, 'M', u'𩅅'), (0x2F9F7, 'M', u'𩈚'), (0x2F9F8, 'M', u'䩮'), (0x2F9F9, 'M', u'䩶'), (0x2F9FA, 'M', u'韠'), (0x2F9FB, 'M', u'𩐊'), (0x2F9FC, 'M', u'䪲'), (0x2F9FD, 'M', u'𩒖'), (0x2F9FE, 'M', u'頋'), (0x2FA00, 'M', u'頩'), (0x2FA01, 'M', u'𩖶'), (0x2FA02, 'M', u'飢'), (0x2FA03, 'M', u'䬳'), (0x2FA04, 'M', u'餩'), (0x2FA05, 'M', u'馧'), (0x2FA06, 'M', u'駂'), (0x2FA07, 'M', u'駾'), (0x2FA08, 'M', u'䯎'), (0x2FA09, 'M', u'𩬰'), (0x2FA0A, 'M', u'鬒'), (0x2FA0B, 'M', u'鱀'), (0x2FA0C, 'M', u'鳽'), (0x2FA0D, 'M', u'䳎'), (0x2FA0E, 'M', u'䳭'), (0x2FA0F, 'M', u'鵧'), (0x2FA10, 'M', u'𪃎'), (0x2FA11, 'M', u'䳸'), (0x2FA12, 'M', u'𪄅'), (0x2FA13, 'M', u'𪈎'), (0x2FA14, 'M', u'𪊑'), (0x2FA15, 'M', u'麻'), (0x2FA16, 'M', u'䵖'), (0x2FA17, 'M', u'黹'), (0x2FA18, 'M', u'黾'), (0x2FA19, 'M', u'鼅'), (0x2FA1A, 'M', u'鼏'), (0x2FA1B, 'M', u'鼖'), (0x2FA1C, 'M', u'鼻'), (0x2FA1D, 'M', u'𪘀'), (0x2FA1E, 'X'), (0x30000, 'V'), (0x3134B, 'X'), (0xE0100, 'I'), (0xE01F0, 'X'), ] uts46data = tuple( _seg_0() + _seg_1() + _seg_2() + _seg_3() + _seg_4() + _seg_5() + _seg_6() + _seg_7() + _seg_8() + _seg_9() + _seg_10() + _seg_11() + _seg_12() + _seg_13() + _seg_14() + _seg_15() + _seg_16() + _seg_17() + _seg_18() + _seg_19() + _seg_20() + _seg_21() + _seg_22() + _seg_23() + _seg_24() + _seg_25() + _seg_26() + _seg_27() + _seg_28() + _seg_29() + _seg_30() + _seg_31() + _seg_32() + _seg_33() + _seg_34() + _seg_35() + _seg_36() + _seg_37() + _seg_38() + _seg_39() + _seg_40() + _seg_41() + _seg_42() + _seg_43() + _seg_44() + _seg_45() + _seg_46() + _seg_47() + _seg_48() + _seg_49() + _seg_50() + _seg_51() + _seg_52() + _seg_53() + _seg_54() + _seg_55() + _seg_56() + _seg_57() + _seg_58() + _seg_59() + _seg_60() + _seg_61() + _seg_62() + _seg_63() + _seg_64() + _seg_65() + _seg_66() + _seg_67() + _seg_68() + _seg_69() + _seg_70() + _seg_71() + _seg_72() + _seg_73() + _seg_74() + _seg_75() + _seg_76() + _seg_77() + _seg_78() + _seg_79() )
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/cookies.py
# -*- coding: utf-8 -*- """ requests.cookies ~~~~~~~~~~~~~~~~ Compatibility code to be able to use `cookielib.CookieJar` with requests. requests.utils imports from here, so be careful with imports. """ import copy import time import calendar from ._internal_utils import to_native_string from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping try: import threading except ImportError: import dummy_threading as threading class MockRequest(object): """Wraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly manage cookie policies, i.e., determine whether a cookie can be set, given the domains of the request and the cookie. The original request object is read-only. The client is responsible for collecting the new headers via `get_new_headers()` and interpreting them appropriately. You probably want `get_cookie_header`, defined below. """ def __init__(self, request): self._r = request self._new_headers = {} self.type = urlparse(self._r.url).scheme def get_type(self): return self.type def get_host(self): return urlparse(self._r.url).netloc def get_origin_req_host(self): return self.get_host() def get_full_url(self): # Only return the response's URL if the user hadn't set the Host # header if not self._r.headers.get('Host'): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain host = to_native_string(self._r.headers['Host'], encoding='utf-8') parsed = urlparse(self._r.url) # Reconstruct the URL as we expect it return urlunparse([ parsed.scheme, host, parsed.path, parsed.params, parsed.query, parsed.fragment ]) def is_unverifiable(self): return True def has_header(self, name): return name in self._r.headers or name in self._new_headers def get_header(self, name, default=None): return self._r.headers.get(name, self._new_headers.get(name, default)) def add_header(self, key, val): """cookielib has no legitimate use for this method; add it back if you find one.""" raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") def add_unredirected_header(self, name, value): self._new_headers[name] = value def get_new_headers(self): return self._new_headers @property def unverifiable(self): return self.is_unverifiable() @property def origin_req_host(self): return self.get_origin_req_host() @property def host(self): return self.get_host() class MockResponse(object): """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. ...what? Basically, expose the parsed HTTP headers from the server response the way `cookielib` expects to see them. """ def __init__(self, headers): """Make a MockResponse for `cookielib` to read. :param headers: a httplib.HTTPMessage or analogous carrying the headers """ self._headers = headers def info(self): return self._headers def getheaders(self, name): self._headers.getheaders(name) def extract_cookies_to_jar(jar, request, response): """Extract the cookies from the response into a CookieJar. :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ if not (hasattr(response, '_original_response') and response._original_response): return # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) # pull out the HTTPMessage with the headers and put it in the mock: res = MockResponse(response._original_response.msg) jar.extract_cookies(res, req) def get_cookie_header(jar, request): """ Produce an appropriate Cookie header string to be sent with `request`, or None. :rtype: str """ r = MockRequest(request) jar.add_cookie_header(r) return r.get_new_headers().get('Cookie') def remove_cookie_by_name(cookiejar, name, domain=None, path=None): """Unsets a cookie by name, by default over all domains and paths. Wraps CookieJar.clear(), is O(n). """ clearables = [] for cookie in cookiejar: if cookie.name != name: continue if domain is not None and domain != cookie.domain: continue if path is not None and path != cookie.path: continue clearables.append((cookie.domain, cookie.path, cookie.name)) for domain, path, name in clearables: cookiejar.clear(domain, path, name) class CookieConflictError(RuntimeError): """There are two cookies that meet the criteria specified in the cookie jar. Use .get and .set and include domain and path args in order to be more specific. """ class RequestsCookieJar(cookielib.CookieJar, MutableMapping): """Compatibility class; is a cookielib.CookieJar, but exposes a dict interface. This is the CookieJar we create by default for requests and sessions that don't specify one, since some clients may expect response.cookies and session.cookies to support dict operations. Requests does not use the dict interface internally; it's just for compatibility with external client code. All requests code should work out of the box with externally provided instances of ``CookieJar``, e.g. ``LWPCookieJar`` and ``FileCookieJar``. Unlike a regular CookieJar, this class is pickleable. .. warning:: dictionary operations that are normally O(1) may be O(n). """ def get(self, name, default=None, domain=None, path=None): """Dict-like get() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. .. warning:: operation is O(n), not O(1). """ try: return self._find_no_duplicates(name, domain, path) except KeyError: return default def set(self, name, value, **kwargs): """Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. """ # support client code that unsets cookies by assignment of a None value: if value is None: remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c def iterkeys(self): """Dict-like iterkeys() that returns an iterator of names of cookies from the jar. .. seealso:: itervalues() and iteritems(). """ for cookie in iter(self): yield cookie.name def keys(self): """Dict-like keys() that returns a list of names of cookies from the jar. .. seealso:: values() and items(). """ return list(self.iterkeys()) def itervalues(self): """Dict-like itervalues() that returns an iterator of values of cookies from the jar. .. seealso:: iterkeys() and iteritems(). """ for cookie in iter(self): yield cookie.value def values(self): """Dict-like values() that returns a list of values of cookies from the jar. .. seealso:: keys() and items(). """ return list(self.itervalues()) def iteritems(self): """Dict-like iteritems() that returns an iterator of name-value tuples from the jar. .. seealso:: iterkeys() and itervalues(). """ for cookie in iter(self): yield cookie.name, cookie.value def items(self): """Dict-like items() that returns a list of name-value tuples from the jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a vanilla python dict of key value pairs. .. seealso:: keys() and values(). """ return list(self.iteritems()) def list_domains(self): """Utility method to list all the domains in the jar.""" domains = [] for cookie in iter(self): if cookie.domain not in domains: domains.append(cookie.domain) return domains def list_paths(self): """Utility method to list all the paths in the jar.""" paths = [] for cookie in iter(self): if cookie.path not in paths: paths.append(cookie.path) return paths def multiple_domains(self): """Returns True if there are multiple domains in the jar. Returns False otherwise. :rtype: bool """ domains = [] for cookie in iter(self): if cookie.domain is not None and cookie.domain in domains: return True domains.append(cookie.domain) return False # there is only one domain in jar def get_dict(self, domain=None, path=None): """Takes as an argument an optional domain and path and returns a plain old Python dict of name-value pairs of cookies that meet the requirements. :rtype: dict """ dictionary = {} for cookie in iter(self): if ( (domain is None or cookie.domain == domain) and (path is None or cookie.path == path) ): dictionary[cookie.name] = cookie.value return dictionary def __contains__(self, name): try: return super(RequestsCookieJar, self).__contains__(name) except CookieConflictError: return True def __getitem__(self, name): """Dict-like __getitem__() for compatibility with client code. Throws exception if there are more than one cookie with name. In that case, use the more explicit get() method instead. .. warning:: operation is O(n), not O(1). """ return self._find_no_duplicates(name) def __setitem__(self, name, value): """Dict-like __setitem__ for compatibility with client code. Throws exception if there is already a cookie of that name in the jar. In that case, use the more explicit set() method instead. """ self.set(name, value) def __delitem__(self, name): """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s ``remove_cookie_by_name()``. """ remove_cookie_by_name(self, name) def set_cookie(self, cookie, *args, **kwargs): if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): cookie.value = cookie.value.replace('\\"', '') return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" if isinstance(other, cookielib.CookieJar): for cookie in other: self.set_cookie(copy.copy(cookie)) else: super(RequestsCookieJar, self).update(other) def _find(self, name, domain=None, path=None): """Requests uses this method internally to get cookie values. If there are conflicting cookies, _find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown if there are conflicting cookies. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :return: cookie.value """ for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: return cookie.value raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def _find_no_duplicates(self, name, domain=None, path=None): """Both ``__get_item__`` and ``get`` call this function: it's never used elsewhere in Requests. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :raises KeyError: if cookie is not found :raises CookieConflictError: if there are multiple cookies that match name and optionally domain and path :return: cookie.value """ toReturn = None for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: if toReturn is not None: # if there are multiple cookies that meet passed in criteria raise CookieConflictError('There are multiple cookies with name, %r' % (name)) toReturn = cookie.value # we will eventually return this as long as no cookie conflict if toReturn: return toReturn raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object state.pop('_cookies_lock') return state def __setstate__(self, state): """Unlike a normal CookieJar, this class is pickleable.""" self.__dict__.update(state) if '_cookies_lock' not in self.__dict__: self._cookies_lock = threading.RLock() def copy(self): """Return a copy of this RequestsCookieJar.""" new_cj = RequestsCookieJar() new_cj.set_policy(self.get_policy()) new_cj.update(self) return new_cj def get_policy(self): """Return the CookiePolicy instance used.""" return self._policy def _copy_cookie_jar(jar): if jar is None: return None if hasattr(jar, 'copy'): # We're dealing with an instance of RequestsCookieJar return jar.copy() # We're dealing with a generic CookieJar instance new_jar = copy.copy(jar) new_jar.clear() for cookie in jar: new_jar.set_cookie(copy.copy(cookie)) return new_jar def create_cookie(name, value, **kwargs): """Make a cookie from underspecified parameters. By default, the pair of `name` and `value` will be set for the domain '' and sent on every request (this is sometimes called a "supercookie"). """ result = { 'version': 0, 'name': name, 'value': value, 'port': None, 'domain': '', 'path': '/', 'secure': False, 'expires': None, 'discard': True, 'comment': None, 'comment_url': None, 'rest': {'HttpOnly': None}, 'rfc2109': False, } badargs = set(kwargs) - set(result) if badargs: err = 'create_cookie() got unexpected keyword arguments: %s' raise TypeError(err % list(badargs)) result.update(kwargs) result['port_specified'] = bool(result['port']) result['domain_specified'] = bool(result['domain']) result['domain_initial_dot'] = result['domain'].startswith('.') result['path_specified'] = bool(result['path']) return cookielib.Cookie(**result) def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" expires = None if morsel['max-age']: try: expires = int(time.time() + int(morsel['max-age'])) except ValueError: raise TypeError('max-age: %s must be integer' % morsel['max-age']) elif morsel['expires']: time_template = '%a, %d-%b-%Y %H:%M:%S GMT' expires = calendar.timegm( time.strptime(morsel['expires'], time_template) ) return create_cookie( comment=morsel['comment'], comment_url=bool(morsel['comment']), discard=False, domain=morsel['domain'], expires=expires, name=morsel.key, path=morsel['path'], port=None, rest={'HttpOnly': morsel['httponly']}, rfc2109=False, secure=bool(morsel['secure']), value=morsel.value, version=morsel['version'] or 0, ) def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): """Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. :rtype: CookieJar """ if cookiejar is None: cookiejar = RequestsCookieJar() if cookie_dict is not None: names_from_jar = [cookie.name for cookie in cookiejar] for name in cookie_dict: if overwrite or (name not in names_from_jar): cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) return cookiejar def merge_cookies(cookiejar, cookies): """Add cookies to cookiejar and returns a merged CookieJar. :param cookiejar: CookieJar object to add the cookies to. :param cookies: Dictionary or CookieJar object to be added. :rtype: CookieJar """ if not isinstance(cookiejar, cookielib.CookieJar): raise ValueError('You can only merge into CookieJar') if isinstance(cookies, dict): cookiejar = cookiejar_from_dict( cookies, cookiejar=cookiejar, overwrite=False) elif isinstance(cookies, cookielib.CookieJar): try: cookiejar.update(cookies) except AttributeError: for cookie_in_jar in cookies: cookiejar.set_cookie(cookie_in_jar) return cookiejar
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/auth.py
# -*- coding: utf-8 -*- """ requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. """ import os import re import time import hashlib import threading import warnings from base64 import b64encode from .compat import urlparse, str, basestring from .cookies import extract_cookies_to_jar from ._internal_utils import to_native_string from .utils import parse_dict_header CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_MULTI_PART = 'multipart/form-data' def _basic_auth_str(username, password): """Returns a Basic Auth string.""" # "I want us to put a big-ol' comment on top of it that # says that this behaviour is dumb but we need to preserve # it because people are relying on it." # - Lukasa # # These are here solely to maintain backwards compatibility # for things like ints. This will be removed in 3.0.0. if not isinstance(username, basestring): warnings.warn( "Non-string usernames will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(username), category=DeprecationWarning, ) username = str(username) if not isinstance(password, basestring): warnings.warn( "Non-string passwords will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(type(password)), category=DeprecationWarning, ) password = str(password) # -- End Removal -- if isinstance(username, str): username = username.encode('latin1') if isinstance(password, str): password = password.encode('latin1') authstr = 'Basic ' + to_native_string( b64encode(b':'.join((username, password))).strip() ) return authstr class AuthBase(object): """Base class that all auth implementations derive from""" def __call__(self, r): raise NotImplementedError('Auth hooks must be callable.') class HTTPBasicAuth(AuthBase): """Attaches HTTP Basic Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password def __eq__(self, other): return all([ self.username == getattr(other, 'username', None), self.password == getattr(other, 'password', None) ]) def __ne__(self, other): return not self == other def __call__(self, r): r.headers['Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPDigestAuth(AuthBase): """Attaches HTTP Digest Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password # Keep state in per-thread local storage self._thread_local = threading.local() def init_per_thread_state(self): # Ensure state is initialized just once per-thread if not hasattr(self._thread_local, 'init'): self._thread_local.init = True self._thread_local.last_nonce = '' self._thread_local.nonce_count = 0 self._thread_local.chal = {} self._thread_local.pos = None self._thread_local.num_401_calls = None def build_digest_header(self, method, url): """ :rtype: str """ realm = self._thread_local.chal['realm'] nonce = self._thread_local.chal['nonce'] qop = self._thread_local.chal.get('qop') algorithm = self._thread_local.chal.get('algorithm') opaque = self._thread_local.chal.get('opaque') hash_utf8 = None if algorithm is None: _algorithm = 'MD5' else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': def md5_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.md5(x).hexdigest() hash_utf8 = md5_utf8 elif _algorithm == 'SHA': def sha_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 elif _algorithm == 'SHA-256': def sha256_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha256(x).hexdigest() hash_utf8 = sha256_utf8 elif _algorithm == 'SHA-512': def sha512_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha512(x).hexdigest() hash_utf8 = sha512_utf8 KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) #: path is request-uri defined in RFC 2616 which should not be empty path = p_parsed.path or "/" if p_parsed.query: path += '?' + p_parsed.query A1 = '%s:%s:%s' % (self.username, realm, self.password) A2 = '%s:%s' % (method, path) HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self._thread_local.last_nonce: self._thread_local.nonce_count += 1 else: self._thread_local.nonce_count = 1 ncvalue = '%08x' % self._thread_local.nonce_count s = str(self._thread_local.nonce_count).encode('utf-8') s += nonce.encode('utf-8') s += time.ctime().encode('utf-8') s += os.urandom(8) cnonce = (hashlib.sha1(s).hexdigest()[:16]) if _algorithm == 'MD5-SESS': HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) if not qop: respdig = KD(HA1, "%s:%s" % (nonce, HA2)) elif qop == 'auth' or 'auth' in qop.split(','): noncebit = "%s:%s:%s:%s:%s" % ( nonce, ncvalue, cnonce, 'auth', HA2 ) respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self._thread_local.last_nonce = nonce # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (self.username, realm, nonce, path, respdig) if opaque: base += ', opaque="%s"' % opaque if algorithm: base += ', algorithm="%s"' % algorithm if entdig: base += ', digest="%s"' % entdig if qop: base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) return 'Digest %s' % (base) def handle_redirect(self, r, **kwargs): """Reset num_401_calls counter on redirects.""" if r.is_redirect: self._thread_local.num_401_calls = 1 def handle_401(self, r, **kwargs): """ Takes the given response and tries digest-auth, if needed. :rtype: requests.Response """ # If response is not 4xx, do not auth # See https://github.com/psf/requests/issues/3772 if not 400 <= r.status_code < 500: self._thread_local.num_401_calls = 1 return r if self._thread_local.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self._thread_local.pos) s_auth = r.headers.get('www-authenticate', '') if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: self._thread_local.num_401_calls += 1 pat = re.compile(r'digest ', flags=re.IGNORECASE) self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.close() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers['Authorization'] = self.build_digest_header( prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r self._thread_local.num_401_calls = 1 return r def __call__(self, r): # Initialize per-thread state, if needed self.init_per_thread_state() # If we have a saved nonce, skip the 401 if self._thread_local.last_nonce: r.headers['Authorization'] = self.build_digest_header(r.method, r.url) try: self._thread_local.pos = r.body.tell() except AttributeError: # In the case of HTTPDigestAuth being reused and the body of # the previous request was a file-like object, pos has the # file position of the previous body. Ensure it's set to # None. self._thread_local.pos = None r.register_hook('response', self.handle_401) r.register_hook('response', self.handle_redirect) self._thread_local.num_401_calls = 1 return r def __eq__(self, other): return all([ self.username == getattr(other, 'username', None), self.password == getattr(other, 'password', None) ]) def __ne__(self, other): return not self == other
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/sessions.py
# -*- coding: utf-8 -*- """ requests.session ~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). """ import os import sys import time from datetime import timedelta from collections import OrderedDict from .auth import _basic_auth_str from .compat import cookielib, is_py3, urljoin, urlparse, Mapping from .cookies import ( cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook from ._internal_utils import to_native_string from .utils import to_key_val_list, default_headers, DEFAULT_PORTS from .exceptions import ( TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) from .structures import CaseInsensitiveDict from .adapters import HTTPAdapter from .utils import ( requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, get_auth_from_url, rewind_body ) from .status_codes import codes # formerly defined here, reexposed here for backward compatibility from .models import REDIRECT_STATI # Preferred clock, based on which one is more accurate on a given system. if sys.platform == 'win32': try: # Python 3.4+ preferred_clock = time.perf_counter except AttributeError: # Earlier than Python 3. preferred_clock = time.clock else: preferred_clock = time.time def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """ if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. Extract keys first to avoid altering # the dictionary during iteration. none_keys = [k for (k, v) in merged_setting.items() if v is None] for key in none_keys: del merged_setting[key] return merged_setting def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): """Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ if session_hooks is None or session_hooks.get('response') == []: return request_hooks if request_hooks is None or request_hooks.get('response') == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) class SessionRedirectMixin(object): def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will # be called at least once upon the original response and at least twice # on each subsequent redirect response (if any). # If a custom mixin is used to handle this logic, it may be advantageous # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: location = resp.headers['location'] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. if is_py3: location = location.encode('latin1') return to_native_string(location, 'utf8') return None def should_strip_auth(self, old_url, new_url): """Decide whether Authorization header should be removed when redirecting""" old_parsed = urlparse(old_url) new_parsed = urlparse(new_url) if old_parsed.hostname != new_parsed.hostname: return True # Special case: allow http -> https redirect when using the standard # ports. This isn't specified by RFC 7235, but is kept to avoid # breaking backwards compatibility with older versions of requests # that allowed any redirects on the same host. if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): return False # Handle default port usage corresponding to scheme. changed_port = old_parsed.port != new_parsed.port changed_scheme = old_parsed.scheme != new_parsed.scheme default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) if (not changed_scheme and old_parsed.port in default_port and new_parsed.port in default_port): return False # Standard case: root URI must match return changed_port or changed_scheme def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): """Receives a Response. Returns a generator of Responses or Requests.""" hist = [] # keep track of history url = self.get_redirect_target(resp) previous_fragment = urlparse(req.url).fragment while url: prepared_request = req.copy() # Update history and keep track of redirects. # resp.history must ignore the original request in this loop hist.append(resp) resp.history = hist[1:] try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp) # Release the connection back into the pool. resp.close() # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith('//'): parsed_rurl = urlparse(resp.url) url = ':'.join([to_native_string(parsed_rurl.scheme), url]) # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) parsed = urlparse(url) if parsed.fragment == '' and previous_fragment: parsed = parsed._replace(fragment=previous_fragment) elif parsed.fragment: previous_fragment = parsed.fragment url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) self.rebuild_method(prepared_request, resp) # https://github.com/psf/requests/issues/1084 if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): # https://github.com/psf/requests/issues/3490 purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers headers.pop('Cookie', None) # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. rewindable = ( prepared_request._body_position is not None and ('Content-Length' in headers or 'Transfer-Encoding' in headers) ) # Attempt to rewind consumed file-like object. if rewindable: rewind_body(prepared_request) # Override the original request. req = prepared_request if yield_requests: yield req else: resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) # extract redirect url, if any, for the next loop url = self.get_redirect_target(resp) yield resp def rebuild_auth(self, prepared_request, response): """When being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. """ headers = prepared_request.headers url = prepared_request.url if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): # If we get redirected to a new host, we should strip out any # authentication headers. del headers['Authorization'] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict """ proxies = proxies if proxies is not None else {} headers = prepared_request.headers url = prepared_request.url scheme = urlparse(url).scheme new_proxies = proxies.copy() no_proxy = proxies.get('no_proxy') bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) if self.trust_env and not bypass_proxy: environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) proxy = environ_proxies.get(scheme, environ_proxies.get('all')) if proxy: new_proxies.setdefault(scheme, proxy) if 'Proxy-Authorization' in headers: del headers['Proxy-Authorization'] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return new_proxies def rebuild_method(self, prepared_request, response): """When being redirected we may want to change the method of the request based on certain specs or browser behavior. """ method = prepared_request.method # https://tools.ietf.org/html/rfc7231#section-6.4.4 if response.status_code == codes.see_other and method != 'HEAD': method = 'GET' # Do what the browsers do, despite standards... # First, turn 302s into GETs. if response.status_code == codes.found and method != 'HEAD': method = 'GET' # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if response.status_code == codes.moved and method == 'POST': method = 'GET' prepared_request.method = method class Session(SessionRedirectMixin): """A Requests session. Provides cookie persistence, connection-pooling, and configuration. Basic Usage:: >>> import requests >>> s = requests.Session() >>> s.get('https://httpbin.org/get') <Response [200]> Or as a context manager:: >>> with requests.Session() as s: ... s.get('https://httpbin.org/get') <Response [200]> """ __attrs__ = [ 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', 'cert', 'adapters', 'stream', 'trust_env', 'max_redirects', ] def __init__(self): #: A case-insensitive dictionary of headers to be sent on each #: :class:`Request <Request>` sent from this #: :class:`Session <Session>`. self.headers = default_headers() #: Default Authentication tuple or object to attach to #: :class:`Request <Request>`. self.auth = None #: Dictionary mapping protocol or protocol and host to the URL of the proxy #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to #: be used on each :class:`Request <Request>`. self.proxies = {} #: Event-handling hooks. self.hooks = default_hooks() #: Dictionary of querystring data to attach to each #: :class:`Request <Request>`. The dictionary values may be lists for #: representing multivalued query parameters. self.params = {} #: Stream response content default. self.stream = False #: SSL Verification default. self.verify = True #: SSL client certificate default, if String, path to ssl client #: cert file (.pem). If Tuple, ('cert', 'key') pair. self.cert = None #: Maximum number of redirects allowed. If the request exceeds this #: limit, a :class:`TooManyRedirects` exception is raised. #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is #: 30. self.max_redirects = DEFAULT_REDIRECT_LIMIT #: Trust environment settings for proxy configuration, default #: authentication and similar. self.trust_env = True #: A CookieJar containing all currently outstanding cookies set on this #: session. By default it is a #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but #: may be any other ``cookielib.CookieJar`` compatible object. self.cookies = cookiejar_from_dict({}) # Default connection adapters. self.adapters = OrderedDict() self.mount('https://', HTTPAdapter()) self.mount('http://', HTTPAdapter()) def __enter__(self): return self def __exit__(self, *args): self.close() def prepare_request(self, request): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest """ cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p def request(self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response """ # Create the Request. req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=cookies, hooks=hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings( prep.url, proxies, stream, verify, cert ) # Send the request. send_kwargs = { 'timeout': timeout, 'allow_redirects': allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) return resp def get(self, url, **kwargs): r"""Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('GET', url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return self.request('OPTIONS', url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return self.request('HEAD', url, **kwargs) def post(self, url, data=None, json=None, **kwargs): r"""Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('POST', url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PUT', url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('PATCH', url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request('DELETE', url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest. :rtype: requests.Response """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault('stream', self.stream) kwargs.setdefault('verify', self.verify) kwargs.setdefault('cert', self.cert) kwargs.setdefault('proxies', self.proxies) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): raise ValueError('You can only send PreparedRequests.') # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') hooks = request.hooks # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = preferred_clock() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) elapsed = preferred_clock() - start r.elapsed = timedelta(seconds=elapsed) # Response manipulation hooks r = dispatch_hook('response', hooks, r, **kwargs) # Persist cookies if r.history: # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Resolve redirects if allowed. if allow_redirects: # Redirect resolving generator. gen = self.resolve_redirects(r, request, **kwargs) history = [resp for resp in gen] else: history = [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = history # If redirects aren't being followed, store the response on the Request for Response.next(). if not allow_redirects: try: r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) except StopIteration: pass if not stream: r.content return r def merge_environment_settings(self, url, proxies, stream, verify, cert): """ Check the environment and merge it with some settings. :rtype: dict """ # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. no_proxy = proxies.get('no_proxy') if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration and be compatible # with cURL. if verify is True or verify is None: verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE')) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert} def get_adapter(self, url): """ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter """ for (prefix, adapter) in self.adapters.items(): if url.lower().startswith(prefix.lower()): return adapter # Nothing matches :-/ raise InvalidSchema("No connection adapters were found for {!r}".format(url)) def close(self): """Closes all adapters and as such the session""" for v in self.adapters.values(): v.close() def mount(self, prefix, adapter): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by prefix length. """ self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) def __getstate__(self): state = {attr: getattr(self, attr, None) for attr in self.__attrs__} return state def __setstate__(self, state): for attr, value in state.items(): setattr(self, attr, value) def session(): """ Returns a :class:`Session` for context-management. .. deprecated:: 1.0.0 This method has been deprecated since version 1.0.0 and is only kept for backwards compatibility. New code should use :class:`~requests.sessions.Session` to create a session. This may be removed at a future date. :rtype: Session """ return Session()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/hooks.py
# -*- coding: utf-8 -*- """ requests.hooks ~~~~~~~~~~~~~~ This module provides the capabilities for the Requests hooks system. Available hooks: ``response``: The response generated from a Request. """ HOOKS = ['response'] def default_hooks(): return {event: [] for event in HOOKS} # TODO: response is the only one def dispatch_hook(key, hooks, hook_data, **kwargs): """Dispatches a hook dictionary on a given piece of data.""" hooks = hooks or {} hooks = hooks.get(key) if hooks: if hasattr(hooks, '__call__'): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) if _hook_data is not None: hook_data = _hook_data return hook_data
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/compat.py
# -*- coding: utf-8 -*- """ requests.compat ~~~~~~~~~~~~~~~ This module handles import compatibility issues between Python 2 and Python 3. """ from pip._vendor import chardet import sys # ------- # Pythons # ------- # Syntax sugar. _ver = sys.version_info #: Python 2.x? is_py2 = (_ver[0] == 2) #: Python 3.x? is_py3 = (_ver[0] == 3) # Note: We've patched out simplejson support in pip because it prevents # upgrading simplejson on Windows. # try: # import simplejson as json # except (ImportError, SyntaxError): # # simplejson does not support Python 3.2, it throws a SyntaxError # # because of u'...' Unicode literals. import json # --------- # Specifics # --------- if is_py2: from urllib import ( quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment) from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag from urllib2 import parse_http_list import cookielib from Cookie import Morsel from StringIO import StringIO # Keep OrderedDict for backwards compatibility. from collections import Callable, Mapping, MutableMapping, OrderedDict builtin_str = str bytes = str str = unicode basestring = basestring numeric_types = (int, long, float) integer_types = (int, long) elif is_py3: from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment from http import cookiejar as cookielib from http.cookies import Morsel from io import StringIO # Keep OrderedDict for backwards compatibility. from collections import OrderedDict from collections.abc import Callable, Mapping, MutableMapping builtin_str = str str = str bytes = bytes basestring = (str, bytes) numeric_types = (int, float) integer_types = (int,)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/models.py
# -*- coding: utf-8 -*- """ requests.models ~~~~~~~~~~~~~~~ This module contains the primary objects that power Requests. """ import datetime import sys # Import encoding now, to avoid implicit import later. # Implicit import within threads may cause LookupError when standard library is in a ZIP, # such as in Embedded Python. See https://github.com/psf/requests/issues/3578. import encodings.idna from pip._vendor.urllib3.fields import RequestField from pip._vendor.urllib3.filepost import encode_multipart_formdata from pip._vendor.urllib3.util import parse_url from pip._vendor.urllib3.exceptions import ( DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) from io import UnsupportedOperation from .hooks import default_hooks from .structures import CaseInsensitiveDict from .auth import HTTPBasicAuth from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar from .exceptions import ( HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, ContentDecodingError, ConnectionError, StreamConsumedError) from ._internal_utils import to_native_string, unicode_is_ascii from .utils import ( guess_filename, get_auth_from_url, requote_uri, stream_decode_response_unicode, to_key_val_list, parse_header_links, iter_slices, guess_json_utf, super_len, check_header_validity) from .compat import ( Callable, Mapping, cookielib, urlunparse, urlsplit, urlencode, str, bytes, is_py2, chardet, builtin_str, basestring) from .compat import json as complexjson from .status_codes import codes #: The set of HTTP status codes that indicate an automatically #: processable redirect. REDIRECT_STATI = ( codes.moved, # 301 codes.found, # 302 codes.other, # 303 codes.temporary_redirect, # 307 codes.permanent_redirect, # 308 ) DEFAULT_REDIRECT_LIMIT = 30 CONTENT_CHUNK_SIZE = 10 * 1024 ITER_CHUNK_SIZE = 512 class RequestEncodingMixin(object): @property def path_url(self): """Build the path URL to use.""" url = [] p = urlsplit(self.url) path = p.path if not path: path = '/' url.append(path) query = p.query if query: url.append('?') url.append(query) return ''.join(url) @staticmethod def _encode_params(data): """Encode parameters in a piece of data. Will successfully encode parameters when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. """ if isinstance(data, (str, bytes)): return data elif hasattr(data, 'read'): return data elif hasattr(data, '__iter__'): result = [] for k, vs in to_key_val_list(data): if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): vs = [vs] for v in vs: if v is not None: result.append( (k.encode('utf-8') if isinstance(k, str) else k, v.encode('utf-8') if isinstance(v, str) else v)) return urlencode(result, doseq=True) else: return data @staticmethod def _encode_files(files, data): """Build the body for a multipart/form-data request. Will successfully encode files when passed as a dict or a list of tuples. Order is retained if data is a list of tuples but arbitrary if parameters are supplied as a dict. The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) or 4-tuples (filename, fileobj, contentype, custom_headers). """ if (not files): raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") new_fields = [] fields = to_key_val_list(data or {}) files = to_key_val_list(files or {}) for field, val in fields: if isinstance(val, basestring) or not hasattr(val, '__iter__'): val = [val] for v in val: if v is not None: # Don't call str() on bytestrings: in Py3 it all goes wrong. if not isinstance(v, bytes): v = str(v) new_fields.append( (field.decode('utf-8') if isinstance(field, bytes) else field, v.encode('utf-8') if isinstance(v, str) else v)) for (k, v) in files: # support for explicit filename ft = None fh = None if isinstance(v, (tuple, list)): if len(v) == 2: fn, fp = v elif len(v) == 3: fn, fp, ft = v else: fn, fp, ft, fh = v else: fn = guess_filename(v) or k fp = v if isinstance(fp, (str, bytes, bytearray)): fdata = fp elif hasattr(fp, 'read'): fdata = fp.read() elif fp is None: continue else: fdata = fp rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) rf.make_multipart(content_type=ft) new_fields.append(rf) body, content_type = encode_multipart_formdata(new_fields) return body, content_type class RequestHooksMixin(object): def register_hook(self, event, hook): """Properly register a hook.""" if event not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % (event)) if isinstance(hook, Callable): self.hooks[event].append(hook) elif hasattr(hook, '__iter__'): self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) def deregister_hook(self, event, hook): """Deregister a previously registered hook. Returns True if the hook existed, False if not. """ try: self.hooks[event].remove(hook) return True except ValueError: return False class Request(RequestHooksMixin): """A user-created :class:`Request <Request>` object. Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server. :param method: HTTP method to use. :param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. :param data: the body to attach to the request. If a dictionary or list of tuples ``[(key, value)]`` is provided, form-encoding will take place. :param json: json for the body to attach to the request (if files or data is not specified). :param params: URL parameters to append to the URL. If a dictionary or list of tuples ``[(key, value)]`` is provided, form-encoding will take place. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request. :param hooks: dictionary of callback hooks, for internal usage. Usage:: >>> import requests >>> req = requests.Request('GET', 'https://httpbin.org/get') >>> req.prepare() <PreparedRequest [GET]> """ def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): # Default empty dicts for dict params. data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): self.register_hook(event=k, hook=v) self.method = method self.url = url self.headers = headers self.files = files self.data = data self.json = json self.params = params self.auth = auth self.cookies = cookies def __repr__(self): return '<Request [%s]>' % (self.method) def prepare(self): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" p = PreparedRequest() p.prepare( method=self.method, url=self.url, headers=self.headers, files=self.files, data=self.data, json=self.json, params=self.params, auth=self.auth, cookies=self.cookies, hooks=self.hooks, ) return p class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): """The fully mutable :class:`PreparedRequest <PreparedRequest>` object, containing the exact bytes that will be sent to the server. Generated from either a :class:`Request <Request>` object or manually. Usage:: >>> import requests >>> req = requests.Request('GET', 'https://httpbin.org/get') >>> r = req.prepare() >>> r <PreparedRequest [GET]> >>> s = requests.Session() >>> s.send(r) <Response [200]> """ def __init__(self): #: HTTP verb to send to the server. self.method = None #: HTTP URL to send the request to. self.url = None #: dictionary of HTTP headers. self.headers = None # The `CookieJar` used to create the Cookie header will be stored here # after prepare_cookies is called self._cookies = None #: request body to send to the server. self.body = None #: dictionary of callback hooks, for internal usage. self.hooks = default_hooks() #: integer denoting starting position of a readable file-like body. self._body_position = None def prepare(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): """Prepares the entire request with the given parameters.""" self.prepare_method(method) self.prepare_url(url, params) self.prepare_headers(headers) self.prepare_cookies(cookies) self.prepare_body(data, files, json) self.prepare_auth(auth, url) # Note that prepare_auth must be last to enable authentication schemes # such as OAuth to work on a fully prepared request. # This MUST go after prepare_auth. Authenticators could add a hook self.prepare_hooks(hooks) def __repr__(self): return '<PreparedRequest [%s]>' % (self.method) def copy(self): p = PreparedRequest() p.method = self.method p.url = self.url p.headers = self.headers.copy() if self.headers is not None else None p._cookies = _copy_cookie_jar(self._cookies) p.body = self.body p.hooks = self.hooks p._body_position = self._body_position return p def prepare_method(self, method): """Prepares the given HTTP method.""" self.method = method if self.method is not None: self.method = to_native_string(self.method.upper()) @staticmethod def _get_idna_encoded_host(host): from pip._vendor import idna try: host = idna.encode(host, uts46=True).decode('utf-8') except idna.IDNAError: raise UnicodeError return host def prepare_url(self, url, params): """Prepares the given HTTP URL.""" #: Accept objects that have string representations. #: We're unable to blindly call unicode/str functions #: as this will include the bytestring indicator (b'') #: on python 3.x. #: https://github.com/psf/requests/pull/2238 if isinstance(url, bytes): url = url.decode('utf8') else: url = unicode(url) if is_py2 else str(url) # Remove leading whitespaces from url url = url.lstrip() # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. if ':' in url and not url.lower().startswith('http'): self.url = url return # Support for unicode domain names and paths. try: scheme, auth, host, port, path, query, fragment = parse_url(url) except LocationParseError as e: raise InvalidURL(*e.args) if not scheme: error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?") error = error.format(to_native_string(url, 'utf8')) raise MissingSchema(error) if not host: raise InvalidURL("Invalid URL %r: No host supplied" % url) # In general, we want to try IDNA encoding the hostname if the string contains # non-ASCII characters. This allows users to automatically get the correct IDNA # behaviour. For strings containing only ASCII characters, we need to also verify # it doesn't start with a wildcard (*), before allowing the unencoded hostname. if not unicode_is_ascii(host): try: host = self._get_idna_encoded_host(host) except UnicodeError: raise InvalidURL('URL has an invalid label.') elif host.startswith(u'*'): raise InvalidURL('URL has an invalid label.') # Carefully reconstruct the network location netloc = auth or '' if netloc: netloc += '@' netloc += host if port: netloc += ':' + str(port) # Bare domains aren't valid URLs. if not path: path = '/' if is_py2: if isinstance(scheme, str): scheme = scheme.encode('utf-8') if isinstance(netloc, str): netloc = netloc.encode('utf-8') if isinstance(path, str): path = path.encode('utf-8') if isinstance(query, str): query = query.encode('utf-8') if isinstance(fragment, str): fragment = fragment.encode('utf-8') if isinstance(params, (str, bytes)): params = to_native_string(params) enc_params = self._encode_params(params) if enc_params: if query: query = '%s&%s' % (query, enc_params) else: query = enc_params url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) self.url = url def prepare_headers(self, headers): """Prepares the given HTTP headers.""" self.headers = CaseInsensitiveDict() if headers: for header in headers.items(): # Raise exception on invalid header value. check_header_validity(header) name, value = header self.headers[to_native_string(name)] = value def prepare_body(self, data, files, json=None): """Prepares the given HTTP body data.""" # Check if file, fo, generator, iterator. # If not, run through normal process. # Nottin' on you. body = None content_type = None if not data and json is not None: # urllib3 requires a bytes-like body. Python 2's json.dumps # provides this natively, but Python 3 gives a Unicode string. content_type = 'application/json' body = complexjson.dumps(json) if not isinstance(body, bytes): body = body.encode('utf-8') is_stream = all([ hasattr(data, '__iter__'), not isinstance(data, (basestring, list, tuple, Mapping)) ]) if is_stream: try: length = super_len(data) except (TypeError, AttributeError, UnsupportedOperation): length = None body = data if getattr(body, 'tell', None) is not None: # Record the current file position before reading. # This will allow us to rewind a file in the event # of a redirect. try: self._body_position = body.tell() except (IOError, OSError): # This differentiates from None, allowing us to catch # a failed `tell()` later when trying to rewind the body self._body_position = object() if files: raise NotImplementedError('Streamed bodies and files are mutually exclusive.') if length: self.headers['Content-Length'] = builtin_str(length) else: self.headers['Transfer-Encoding'] = 'chunked' else: # Multi-part file uploads. if files: (body, content_type) = self._encode_files(files, data) else: if data: body = self._encode_params(data) if isinstance(data, basestring) or hasattr(data, 'read'): content_type = None else: content_type = 'application/x-www-form-urlencoded' self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. if content_type and ('content-type' not in self.headers): self.headers['Content-Type'] = content_type self.body = body def prepare_content_length(self, body): """Prepare Content-Length header based on request method and body""" if body is not None: length = super_len(body) if length: # If length exists, set it. Otherwise, we fallback # to Transfer-Encoding: chunked. self.headers['Content-Length'] = builtin_str(length) elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: # Set Content-Length to 0 for methods that can have a body # but don't provide one. (i.e. not GET or HEAD) self.headers['Content-Length'] = '0' def prepare_auth(self, auth, url=''): """Prepares the given HTTP auth data.""" # If no Auth is explicitly provided, extract it from the URL first. if auth is None: url_auth = get_auth_from_url(self.url) auth = url_auth if any(url_auth) else None if auth: if isinstance(auth, tuple) and len(auth) == 2: # special-case basic HTTP auth auth = HTTPBasicAuth(*auth) # Allow auth to make its changes. r = auth(self) # Update self to reflect the auth changes. self.__dict__.update(r.__dict__) # Recompute Content-Length self.prepare_content_length(self.body) def prepare_cookies(self, cookies): """Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand. """ if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers['Cookie'] = cookie_header def prepare_hooks(self, hooks): """Prepares the given hooks.""" # hooks can be passed as None to the prepare method and to this # method. To prevent iterating over None, simply use an empty list # if hooks is False-y hooks = hooks or [] for event in hooks: self.register_hook(event, hooks[event]) class Response(object): """The :class:`Response <Response>` object, which contains a server's response to an HTTP request. """ __attrs__ = [ '_content', 'status_code', 'headers', 'url', 'history', 'encoding', 'reason', 'cookies', 'elapsed', 'request' ] def __init__(self): self._content = False self._content_consumed = False self._next = None #: Integer Code of responded HTTP Status, e.g. 404 or 200. self.status_code = None #: Case-insensitive Dictionary of Response Headers. #: For example, ``headers['content-encoding']`` will return the #: value of a ``'Content-Encoding'`` response header. self.headers = CaseInsensitiveDict() #: File-like object representation of response (for advanced usage). #: Use of ``raw`` requires that ``stream=True`` be set on the request. #: This requirement does not apply for use internally to Requests. self.raw = None #: Final URL location of Response. self.url = None #: Encoding to decode with when accessing r.text. self.encoding = None #: A list of :class:`Response <Response>` objects from #: the history of the Request. Any redirect responses will end #: up here. The list is sorted from the oldest to the most recent request. self.history = [] #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". self.reason = None #: A CookieJar of Cookies the server sent back. self.cookies = cookiejar_from_dict({}) #: The amount of time elapsed between sending the request #: and the arrival of the response (as a timedelta). #: This property specifically measures the time taken between sending #: the first byte of the request and finishing parsing the headers. It #: is therefore unaffected by consuming the response content or the #: value of the ``stream`` keyword argument. self.elapsed = datetime.timedelta(0) #: The :class:`PreparedRequest <PreparedRequest>` object to which this #: is a response. self.request = None def __enter__(self): return self def __exit__(self, *args): self.close() def __getstate__(self): # Consume everything; accessing the content attribute makes # sure the content has been fully read. if not self._content_consumed: self.content return {attr: getattr(self, attr, None) for attr in self.__attrs__} def __setstate__(self, state): for name, value in state.items(): setattr(self, name, value) # pickled objects do not have .raw setattr(self, '_content_consumed', True) setattr(self, 'raw', None) def __repr__(self): return '<Response [%s]>' % (self.status_code) def __bool__(self): """Returns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ return self.ok def __nonzero__(self): """Returns True if :attr:`status_code` is less than 400. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code, is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ return self.ok def __iter__(self): """Allows you to use a response as an iterator.""" return self.iter_content(128) @property def ok(self): """Returns True if :attr:`status_code` is less than 400, False if not. This attribute checks if the status code of the response is between 400 and 600 to see if there was a client error or a server error. If the status code is between 200 and 400, this will return True. This is **not** a check to see if the response code is ``200 OK``. """ try: self.raise_for_status() except HTTPError: return False return True @property def is_redirect(self): """True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). """ return ('location' in self.headers and self.status_code in REDIRECT_STATI) @property def is_permanent_redirect(self): """True if this Response one of the permanent versions of redirect.""" return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) @property def next(self): """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" return self._next @property def apparent_encoding(self): """The apparent encoding, provided by the chardet library.""" return chardet.detect(self.content)['encoding'] def iter_content(self, chunk_size=1, decode_unicode=False): """Iterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. chunk_size must be of type int or None. A value of None will function differently depending on the value of `stream`. stream=True will read data as it arrives in whatever size the chunks are received. If stream=False, data is returned as a single chunk. If decode_unicode is True, content will be decoded using the best available encoding based on the response. """ def generate(): # Special case for urllib3. if hasattr(self.raw, 'stream'): try: for chunk in self.raw.stream(chunk_size, decode_content=True): yield chunk except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) else: # Standard file-like object. while True: chunk = self.raw.read(chunk_size) if not chunk: break yield chunk self._content_consumed = True if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() elif chunk_size is not None and not isinstance(chunk_size, int): raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size)) # simulate reading small chunks of the content reused_chunks = iter_slices(self._content, chunk_size) stream_chunks = generate() chunks = reused_chunks if self._content_consumed else stream_chunks if decode_unicode: chunks = stream_decode_response_unicode(chunks, self) return chunks def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe. """ pending = None for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk if delimiter: lines = chunk.split(delimiter) else: lines = chunk.splitlines() if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: pending = lines.pop() else: pending = None for line in lines: yield line if pending is not None: yield pending @property def content(self): """Content of the response, in bytes.""" if self._content is False: # Read the contents. if self._content_consumed: raise RuntimeError( 'The content for this response was already consumed') if self.status_code == 0 or self.raw is None: self._content = None else: self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b'' self._content_consumed = True # don't need to release the connection; that's been handled by urllib3 # since we exhausted the data. return self._content @property def text(self): """Content of the response, in unicode. If Response.encoding is None, encoding will be guessed using ``chardet``. The encoding of the response content is determined based solely on HTTP headers, following RFC 2616 to the letter. If you can take advantage of non-HTTP knowledge to make a better guess at the encoding, you should set ``r.encoding`` appropriately before accessing this property. """ # Try charset from content-type content = None encoding = self.encoding if not self.content: return str('') # Fallback to auto-detected encoding. if self.encoding is None: encoding = self.apparent_encoding # Decode unicode from given encoding. try: content = str(self.content, encoding, errors='replace') except (LookupError, TypeError): # A LookupError is raised if the encoding was not found which could # indicate a misspelling or similar mistake. # # A TypeError can be raised if encoding is None # # So we try blindly encoding. content = str(self.content, errors='replace') return content def json(self, **kwargs): r"""Returns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. :raises ValueError: If the response body does not contain valid json. """ if not self.encoding and self.content and len(self.content) > 3: # No encoding set. JSON RFC 4627 section 3 states we should expect # UTF-8, -16 or -32. Detect which one to use; If the detection or # decoding fails, fall back to `self.text` (using chardet to make # a best guess). encoding = guess_json_utf(self.content) if encoding is not None: try: return complexjson.loads( self.content.decode(encoding), **kwargs ) except UnicodeDecodeError: # Wrong UTF codec detected; usually because it's not UTF-8 # but some other 8-bit codec. This is an RFC violation, # and the server didn't bother to tell us what codec *was* # used. pass return complexjson.loads(self.text, **kwargs) @property def links(self): """Returns the parsed header links of the response, if any.""" header = self.headers.get('link') # l = MultiDict() l = {} if header: links = parse_header_links(header) for link in links: key = link.get('rel') or link.get('url') l[key] = link return l def raise_for_status(self): """Raises :class:`HTTPError`, if one occurred.""" http_error_msg = '' if isinstance(self.reason, bytes): # We attempt to decode utf-8 first because some servers # choose to localize their reason strings. If the string # isn't utf-8, we fall back to iso-8859-1 for all other # encodings. (See PR #3538) try: reason = self.reason.decode('utf-8') except UnicodeDecodeError: reason = self.reason.decode('iso-8859-1') else: reason = self.reason if 400 <= self.status_code < 500: http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) elif 500 <= self.status_code < 600: http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) if http_error_msg: raise HTTPError(http_error_msg, response=self) def close(self): """Releases the connection back to the pool. Once this method has been called the underlying ``raw`` object must not be accessed again. *Note: Should not normally need to be called explicitly.* """ if not self._content_consumed: self.raw.close() release_conn = getattr(self.raw, 'release_conn', None) if release_conn is not None: release_conn()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/certs.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ requests.certs ~~~~~~~~~~~~~~ This module returns the preferred default CA certificate bundle. There is only one — the one from the certifi package. If you are packaging Requests, e.g., for a Linux distribution or a managed environment, you can change the definition of where() to return a separately packaged CA bundle. """ from pip._vendor.certifi import where if __name__ == '__main__': print(where())
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/__init__.py
# -*- coding: utf-8 -*- # __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) # / """ Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> b'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('https://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key1": "value1", "key2": "value2" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at <https://requests.readthedocs.io>. :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. """ from pip._vendor import urllib3 from pip._vendor import chardet import warnings from .exceptions import RequestsDependencyWarning def check_compatibility(urllib3_version, chardet_version): urllib3_version = urllib3_version.split('.') assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. # Sometimes, urllib3 only reports its version as 16.1. if len(urllib3_version) == 2: urllib3_version.append('0') # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 major, minor, patch = int(major), int(minor), int(patch) # urllib3 >= 1.21.1, <= 1.25 assert major == 1 assert minor >= 21 assert minor <= 25 # Check chardet for compatibility. major, minor, patch = chardet_version.split('.')[:3] major, minor, patch = int(major), int(minor), int(patch) # chardet >= 3.0.2, < 3.1.0 assert major == 3 assert minor < 1 assert patch >= 2 def _check_cryptography(cryptography_version): # cryptography < 1.3.4 try: cryptography_version = list(map(int, cryptography_version.split('.'))) except ValueError: return if cryptography_version < [1, 3, 4]: warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) warnings.warn(warning, RequestsDependencyWarning) # Check imported dependencies for compatibility. try: check_compatibility(urllib3.__version__, chardet.__version__) except (AssertionError, ValueError): warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported " "version!".format(urllib3.__version__, chardet.__version__), RequestsDependencyWarning) # Attempt to enable urllib3's fallback for SNI support # if the standard library doesn't support SNI or the # 'ssl' library isn't available. try: # Note: This logic prevents upgrading cryptography on Windows, if imported # as part of pip. from pip._internal.utils.compat import WINDOWS if not WINDOWS: raise ImportError("pip internals: don't import cryptography on Windows") try: import ssl except ImportError: ssl = None if not getattr(ssl, "HAS_SNI", False): from pip._vendor.urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() # Check cryptography version from cryptography import __version__ as cryptography_version _check_cryptography(cryptography_version) except ImportError: pass # urllib3's DependencyWarnings should be silenced. from pip._vendor.urllib3.exceptions import DependencyWarning warnings.simplefilter('ignore', DependencyWarning) from .__version__ import __title__, __description__, __url__, __version__ from .__version__ import __build__, __author__, __author_email__, __license__ from .__version__ import __copyright__, __cake__ from . import utils from . import packages from .models import Request, Response, PreparedRequest from .api import request, get, head, post, patch, put, delete, options from .sessions import session, Session from .status_codes import codes from .exceptions import ( RequestException, Timeout, URLRequired, TooManyRedirects, HTTPError, ConnectionError, FileModeWarning, ConnectTimeout, ReadTimeout ) # Set default logging handler to avoid "No handler found" warnings. import logging from logging import NullHandler logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. warnings.simplefilter('default', FileModeWarning, append=True)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/status_codes.py
# -*- coding: utf-8 -*- r""" The ``codes`` object defines a mapping from common names for HTTP statuses to their numerical codes, accessible either as attributes or as dictionary items. Example:: >>> import requests >>> requests.codes['temporary_redirect'] 307 >>> requests.codes.teapot 418 >>> requests.codes['\o/'] 200 Some codes have multiple names, and both upper- and lower-case versions of the names are allowed. For example, ``codes.ok``, ``codes.OK``, and ``codes.okay`` all correspond to the HTTP status code 200. """ from .structures import LookupDict _codes = { # Informational. 100: ('continue',), 101: ('switching_protocols',), 102: ('processing',), 103: ('checkpoint',), 122: ('uri_too_long', 'request_uri_too_long'), 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), 201: ('created',), 202: ('accepted',), 203: ('non_authoritative_info', 'non_authoritative_information'), 204: ('no_content',), 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 208: ('already_reported',), 226: ('im_used',), # Redirection. 300: ('multiple_choices',), 301: ('moved_permanently', 'moved', '\\o-'), 302: ('found',), 303: ('see_other', 'other'), 304: ('not_modified',), 305: ('use_proxy',), 306: ('switch_proxy',), 307: ('temporary_redirect', 'temporary_moved', 'temporary'), 308: ('permanent_redirect', 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 # Client Error. 400: ('bad_request', 'bad'), 401: ('unauthorized',), 402: ('payment_required', 'payment'), 403: ('forbidden',), 404: ('not_found', '-o-'), 405: ('method_not_allowed', 'not_allowed'), 406: ('not_acceptable',), 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), 408: ('request_timeout', 'timeout'), 409: ('conflict',), 410: ('gone',), 411: ('length_required',), 412: ('precondition_failed', 'precondition'), 413: ('request_entity_too_large',), 414: ('request_uri_too_large',), 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), 417: ('expectation_failed',), 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), 421: ('misdirected_request',), 422: ('unprocessable_entity', 'unprocessable'), 423: ('locked',), 424: ('failed_dependency', 'dependency'), 425: ('unordered_collection', 'unordered'), 426: ('upgrade_required', 'upgrade'), 428: ('precondition_required', 'precondition'), 429: ('too_many_requests', 'too_many'), 431: ('header_fields_too_large', 'fields_too_large'), 444: ('no_response', 'none'), 449: ('retry_with', 'retry'), 450: ('blocked_by_windows_parental_controls', 'parental_controls'), 451: ('unavailable_for_legal_reasons', 'legal_reasons'), 499: ('client_closed_request',), # Server Error. 500: ('internal_server_error', 'server_error', '/o\\', '✗'), 501: ('not_implemented',), 502: ('bad_gateway',), 503: ('service_unavailable', 'unavailable'), 504: ('gateway_timeout',), 505: ('http_version_not_supported', 'http_version'), 506: ('variant_also_negotiates',), 507: ('insufficient_storage',), 509: ('bandwidth_limit_exceeded', 'bandwidth'), 510: ('not_extended',), 511: ('network_authentication_required', 'network_auth', 'network_authentication'), } codes = LookupDict(name='status_codes') def _init(): for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) if not title.startswith(('\\', '/')): setattr(codes, title.upper(), code) def doc(code): names = ', '.join('``%s``' % n for n in _codes[code]) return '* %d: %s' % (code, names) global __doc__ __doc__ = (__doc__ + '\n' + '\n'.join(doc(code) for code in sorted(_codes)) if __doc__ is not None else None) _init()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/packages.py
import sys # This code exists for backwards compatibility reasons. # I don't like it either. Just look the other way. :) for package in ('urllib3', 'idna', 'chardet'): vendored_package = "pip._vendor." + package locals()[package] = __import__(vendored_package) # This traversal is apparently necessary such that the identities are # preserved (requests.packages.urllib3.* is urllib3.*) for mod in list(sys.modules): if mod == vendored_package or mod.startswith(vendored_package + '.'): unprefixed_mod = mod[len("pip._vendor."):] sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod] # Kinda cool, though, right?
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/__version__.py
# .-. .-. .-. . . .-. .-. .-. .-. # |( |- |.| | | |- `-. | `-. # ' ' `-' `-`.`-' `-' `-' ' `-' __title__ = 'requests' __description__ = 'Python HTTP for Humans.' __url__ = 'https://requests.readthedocs.io' __version__ = '2.24.0' __build__ = 0x022400 __author__ = 'Kenneth Reitz' __author_email__ = 'me@kennethreitz.org' __license__ = 'Apache 2.0' __copyright__ = 'Copyright 2020 Kenneth Reitz' __cake__ = u'\u2728 \U0001f370 \u2728'
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/api.py
# -*- coding: utf-8 -*- """ requests.api ~~~~~~~~~~~~ This module implements the Requests API. :copyright: (c) 2012 by Kenneth Reitz. :license: Apache2, see LICENSE for more details. """ from . import sessions def request(method, url, **kwargs): """Constructs and sends a :class:`Request <Request>`. :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers to add for the file. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. :param stream: (optional) if ``False``, the response content will be immediately downloaded. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :return: :class:`Response <Response>` object :rtype: requests.Response Usage:: >>> import requests >>> req = requests.request('GET', 'https://httpbin.org/get') >>> req <Response [200]> """ # By using the 'with' statement we are sure the session is closed, thus we # avoid leaving sockets open which can trigger a ResourceWarning in some # cases, and look like a memory leak in others. with sessions.Session() as session: return session.request(method=method, url=url, **kwargs) def get(url, params=None, **kwargs): r"""Sends a GET request. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary, list of tuples or bytes to send in the query string for the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('get', url, params=params, **kwargs) def options(url, **kwargs): r"""Sends an OPTIONS request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', True) return request('options', url, **kwargs) def head(url, **kwargs): r"""Sends a HEAD request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. If `allow_redirects` is not provided, it will be set to `False` (as opposed to the default :meth:`request` behavior). :return: :class:`Response <Response>` object :rtype: requests.Response """ kwargs.setdefault('allow_redirects', False) return request('head', url, **kwargs) def post(url, data=None, json=None, **kwargs): r"""Sends a POST request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('post', url, data=data, json=json, **kwargs) def put(url, data=None, **kwargs): r"""Sends a PUT request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('put', url, data=data, **kwargs) def patch(url, data=None, **kwargs): r"""Sends a PATCH request. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json data to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('patch', url, data=data, **kwargs) def delete(url, **kwargs): r"""Sends a DELETE request. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :return: :class:`Response <Response>` object :rtype: requests.Response """ return request('delete', url, **kwargs)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/_internal_utils.py
# -*- coding: utf-8 -*- """ requests._internal_utils ~~~~~~~~~~~~~~ Provides utility functions that are consumed internally by Requests which depend on extremely few external helpers (such as compat) """ from .compat import is_py2, builtin_str, str def to_native_string(string, encoding='ascii'): """Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. """ if isinstance(string, builtin_str): out = string else: if is_py2: out = string.encode(encoding) else: out = string.decode(encoding) return out def unicode_is_ascii(u_string): """Determine if unicode string only contains ASCII characters. :param str u_string: unicode string to check. Must be unicode and not Python 2 `str`. :rtype: bool """ assert isinstance(u_string, str) try: u_string.encode('ascii') return True except UnicodeEncodeError: return False
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/utils.py
# -*- coding: utf-8 -*- """ requests.utils ~~~~~~~~~~~~~~ This module provides utility functions that are used within Requests that are also useful for external consumption. """ import codecs import contextlib import io import os import re import socket import struct import sys import tempfile import warnings import zipfile from collections import OrderedDict from .__version__ import __version__ from . import certs # to_native_string is unused here, but imported here for backwards compatibility from ._internal_utils import to_native_string from .compat import parse_http_list as _parse_list_header from .compat import ( quote, urlparse, bytes, str, unquote, getproxies, proxy_bypass, urlunparse, basestring, integer_types, is_py3, proxy_bypass_environment, getproxies_environment, Mapping) from .cookies import cookiejar_from_dict from .structures import CaseInsensitiveDict from .exceptions import ( InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError) NETRC_FILES = ('.netrc', '_netrc') DEFAULT_CA_BUNDLE_PATH = certs.where() DEFAULT_PORTS = {'http': 80, 'https': 443} if sys.platform == 'win32': # provide a proxy_bypass version on Windows without DNS lookups def proxy_bypass_registry(host): try: if is_py3: import winreg else: import _winreg as winreg except ImportError: return False try: internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it proxyEnable = int(winreg.QueryValueEx(internetSettings, 'ProxyEnable')[0]) # ProxyOverride is almost always a string proxyOverride = winreg.QueryValueEx(internetSettings, 'ProxyOverride')[0] except OSError: return False if not proxyEnable or not proxyOverride: return False # make a check value list from the registry entry: replace the # '<local>' string by the localhost entry and the corresponding # canonical entry. proxyOverride = proxyOverride.split(';') # now check if we match one of the registry values. for test in proxyOverride: if test == '<local>': if '.' not in host: return True test = test.replace(".", r"\.") # mask dots test = test.replace("*", r".*") # change glob sequence test = test.replace("?", r".") # change glob char if re.match(test, host, re.I): return True return False def proxy_bypass(host): # noqa """Return True, if the host should be bypassed. Checks proxy settings gathered from the environment, if specified, or the registry. """ if getproxies_environment(): return proxy_bypass_environment(host) else: return proxy_bypass_registry(host) def dict_to_sequence(d): """Returns an internal sequence dictionary update.""" if hasattr(d, 'items'): d = d.items() return d def super_len(o): total_length = None current_position = 0 if hasattr(o, '__len__'): total_length = len(o) elif hasattr(o, 'len'): total_length = o.len elif hasattr(o, 'fileno'): try: fileno = o.fileno() except io.UnsupportedOperation: pass else: total_length = os.fstat(fileno).st_size # Having used fstat to determine the file length, we need to # confirm that this file was opened up in binary mode. if 'b' not in o.mode: warnings.warn(( "Requests has determined the content-length for this " "request using the binary size of the file: however, the " "file has been opened in text mode (i.e. without the 'b' " "flag in the mode). This may lead to an incorrect " "content-length. In Requests 3.0, support will be removed " "for files in text mode."), FileModeWarning ) if hasattr(o, 'tell'): try: current_position = o.tell() except (OSError, IOError): # This can happen in some weird situations, such as when the file # is actually a special file descriptor like stdin. In this # instance, we don't know what the length is, so set it to zero and # let requests chunk it instead. if total_length is not None: current_position = total_length else: if hasattr(o, 'seek') and total_length is None: # StringIO and BytesIO have seek but no useable fileno try: # seek to end of file o.seek(0, 2) total_length = o.tell() # seek back to current position to support # partially read file-like objects o.seek(current_position or 0) except (OSError, IOError): total_length = 0 if total_length is None: total_length = 0 return max(0, total_length - current_position) def get_netrc_auth(url, raise_errors=False): """Returns the Requests tuple auth for a given url from netrc.""" try: from netrc import netrc, NetrcParseError netrc_path = None for f in NETRC_FILES: try: loc = os.path.expanduser('~/{}'.format(f)) except KeyError: # os.path.expanduser can fail when $HOME is undefined and # getpwuid fails. See https://bugs.python.org/issue20164 & # https://github.com/psf/requests/issues/1846 return if os.path.exists(loc): netrc_path = loc break # Abort early if there isn't one. if netrc_path is None: return ri = urlparse(url) # Strip port numbers from netloc. This weird `if...encode`` dance is # used for Python 3.2, which doesn't support unicode literals. splitstr = b':' if isinstance(url, str): splitstr = splitstr.decode('ascii') host = ri.netloc.split(splitstr)[0] try: _netrc = netrc(netrc_path).authenticators(host) if _netrc: # Return with login / password login_i = (0 if _netrc[0] else 1) return (_netrc[login_i], _netrc[2]) except (NetrcParseError, IOError): # If there was a parsing error or a permissions issue reading the file, # we'll just skip netrc auth unless explicitly asked to raise errors. if raise_errors: raise # AppEngine hackiness. except (ImportError, AttributeError): pass def guess_filename(obj): """Tries to guess the filename of the given object.""" name = getattr(obj, 'name', None) if (name and isinstance(name, basestring) and name[0] != '<' and name[-1] != '>'): return os.path.basename(name) def extract_zipped_paths(path): """Replace nonexistent paths that look like they refer to a member of a zip archive with the location of an extracted copy of the target, or else just return the provided path unchanged. """ if os.path.exists(path): # this is already a valid path, no need to do anything further return path # find the first valid part of the provided path and treat that as a zip archive # assume the rest of the path is the name of a member in the archive archive, member = os.path.split(path) while archive and not os.path.exists(archive): archive, prefix = os.path.split(archive) member = '/'.join([prefix, member]) if not zipfile.is_zipfile(archive): return path zip_file = zipfile.ZipFile(archive) if member not in zip_file.namelist(): return path # we have a valid zip archive and a valid member of that archive tmp = tempfile.gettempdir() extracted_path = os.path.join(tmp, *member.split('/')) if not os.path.exists(extracted_path): extracted_path = zip_file.extract(member, path=tmp) return extracted_path def from_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') Traceback (most recent call last): ... ValueError: cannot encode objects that are not 2-tuples >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) :rtype: OrderedDict """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') return OrderedDict(value) def to_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list('string') Traceback (most recent call last): ... ValueError: cannot encode objects that are not 2-tuples :rtype: list """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(value, Mapping): value = value.items() return list(value) # From mitsuhiko/werkzeug (used with permission). def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` :rtype: list """ result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result # From mitsuhiko/werkzeug (used with permission). def parse_dict_header(value): """Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict: >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` :rtype: dict """ result = {} for item in _parse_list_header(value): if '=' not in item: result[item] = None continue name, value = item.split('=', 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value return result # From mitsuhiko/werkzeug (used with permission). def unquote_header_value(value, is_filename=False): r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. :rtype: str """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != '\\\\': return value.replace('\\\\', '\\').replace('\\"', '"') return value def dict_from_cookiejar(cj): """Returns a key/value dictionary from a CookieJar. :param cj: CookieJar object to extract cookies from. :rtype: dict """ cookie_dict = {} for cookie in cj: cookie_dict[cookie.name] = cookie.value return cookie_dict def add_dict_to_cookiejar(cj, cookie_dict): """Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. :rtype: CookieJar """ return cookiejar_from_dict(cookie_dict, cj) def get_encodings_from_content(content): """Returns encodings from given content string. :param content: bytestring to extract encodings from. """ warnings.warn(( 'In requests 3.0, get_encodings_from_content will be removed. For ' 'more information, please see the discussion on issue #2266. (This' ' warning should only appear once.)'), DeprecationWarning) charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I) xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') return (charset_re.findall(content) + pragma_re.findall(content) + xml_re.findall(content)) def _parse_content_type_header(header): """Returns content type and parameters from given header :param header: string :return: tuple containing content type and dictionary of parameters """ tokens = header.split(';') content_type, params = tokens[0].strip(), tokens[1:] params_dict = {} items_to_strip = "\"' " for param in params: param = param.strip() if param: key, value = param, True index_of_equals = param.find("=") if index_of_equals != -1: key = param[:index_of_equals].strip(items_to_strip) value = param[index_of_equals + 1:].strip(items_to_strip) params_dict[key.lower()] = value return content_type, params_dict def get_encoding_from_headers(headers): """Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from. :rtype: str """ content_type = headers.get('content-type') if not content_type: return None content_type, params = _parse_content_type_header(content_type) if 'charset' in params: return params['charset'].strip("'\"") if 'text' in content_type: return 'ISO-8859-1' def stream_decode_response_unicode(iterator, r): """Stream decodes a iterator.""" if r.encoding is None: for item in iterator: yield item return decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') for chunk in iterator: rv = decoder.decode(chunk) if rv: yield rv rv = decoder.decode(b'', final=True) if rv: yield rv def iter_slices(string, slice_length): """Iterate over slices of a string.""" pos = 0 if slice_length is None or slice_length <= 0: slice_length = len(string) while pos < len(string): yield string[pos:pos + slice_length] pos += slice_length def get_unicode_from_response(r): """Returns the requested content back in unicode. :param r: Response object to get unicode content from. Tried: 1. charset from content-type 2. fall back and replace all unicode characters :rtype: str """ warnings.warn(( 'In requests 3.0, get_unicode_from_response will be removed. For ' 'more information, please see the discussion on issue #2266. (This' ' warning should only appear once.)'), DeprecationWarning) tried_encodings = [] # Try charset from content-type encoding = get_encoding_from_headers(r.headers) if encoding: try: return str(r.content, encoding) except UnicodeError: tried_encodings.append(encoding) # Fall back: try: return str(r.content, encoding, errors='replace') except TypeError: return r.content # The unreserved URI characters (RFC 3986) UNRESERVED_SET = frozenset( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") def unquote_unreserved(uri): """Un-escape any percent-escape sequences in a URI that are unreserved characters. This leaves all reserved, illegal and non-ASCII bytes encoded. :rtype: str """ parts = uri.split('%') for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: parts[i] = '%' + parts[i] else: parts[i] = '%' + parts[i] return ''.join(parts) def requote_uri(uri): """Re-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. :rtype: str """ safe_with_percent = "!#$%&'()*+,/:;=?@[]~" safe_without_percent = "!#$&'()*+,/:;=?@[]~" try: # Unquote only the unreserved characters # Then quote only illegal characters (do not quote reserved, # unreserved, or '%') return quote(unquote_unreserved(uri), safe=safe_with_percent) except InvalidURL: # We couldn't unquote the given URI, so let's try quoting it, but # there may be unquoted '%'s in the URI. We need to make sure they're # properly quoted so they do not cause issues elsewhere. return quote(uri, safe=safe_without_percent) def address_in_network(ip, net): """This function allows you to check if an IP belongs to a network subnet Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 :rtype: bool """ ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] netaddr, bits = net.split('/') netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask return (ipaddr & netmask) == (network & netmask) def dotted_netmask(mask): """Converts mask from /xx format to xxx.xxx.xxx.xxx Example: if mask is 24 function returns 255.255.255.0 :rtype: str """ bits = 0xffffffff ^ (1 << 32 - mask) - 1 return socket.inet_ntoa(struct.pack('>I', bits)) def is_ipv4_address(string_ip): """ :rtype: bool """ try: socket.inet_aton(string_ip) except socket.error: return False return True def is_valid_cidr(string_network): """ Very simple check of the cidr format in no_proxy variable. :rtype: bool """ if string_network.count('/') == 1: try: mask = int(string_network.split('/')[1]) except ValueError: return False if mask < 1 or mask > 32: return False try: socket.inet_aton(string_network.split('/')[0]) except socket.error: return False else: return False return True @contextlib.contextmanager def set_environ(env_name, value): """Set the environment variable 'env_name' to 'value' Save previous value, yield, and then restore the previous value stored in the environment variable 'env_name'. If 'value' is None, do nothing""" value_changed = value is not None if value_changed: old_value = os.environ.get(env_name) os.environ[env_name] = value try: yield finally: if value_changed: if old_value is None: del os.environ[env_name] else: os.environ[env_name] = old_value def should_bypass_proxies(url, no_proxy): """ Returns whether we should bypass proxies or not. :rtype: bool """ # Prioritize lowercase environment variables over uppercase # to keep a consistent behaviour with other http projects (curl, wget). get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy_arg = no_proxy if no_proxy is None: no_proxy = get_proxy('no_proxy') parsed = urlparse(url) if parsed.hostname is None: # URLs don't always have hostnames, e.g. file:/// urls. return True if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the hostname, both with and without the port. no_proxy = ( host for host in no_proxy.replace(' ', '').split(',') if host ) if is_ipv4_address(parsed.hostname): for proxy_ip in no_proxy: if is_valid_cidr(proxy_ip): if address_in_network(parsed.hostname, proxy_ip): return True elif parsed.hostname == proxy_ip: # If no_proxy ip was defined in plain IP notation instead of cidr notation & # matches the IP of the index return True else: host_with_port = parsed.hostname if parsed.port: host_with_port += ':{}'.format(parsed.port) for host in no_proxy: if parsed.hostname.endswith(host) or host_with_port.endswith(host): # The URL does match something in no_proxy, so we don't want # to apply the proxies on this URL. return True with set_environ('no_proxy', no_proxy_arg): # parsed.hostname can be `None` in cases such as a file URI. try: bypass = proxy_bypass(parsed.hostname) except (TypeError, socket.gaierror): bypass = False if bypass: return True return False def get_environ_proxies(url, no_proxy=None): """ Return a dict of environment proxies. :rtype: dict """ if should_bypass_proxies(url, no_proxy=no_proxy): return {} else: return getproxies() def select_proxy(url, proxies): """Select a proxy for the url, if applicable. :param url: The url being for the request :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs """ proxies = proxies or {} urlparts = urlparse(url) if urlparts.hostname is None: return proxies.get(urlparts.scheme, proxies.get('all')) proxy_keys = [ urlparts.scheme + '://' + urlparts.hostname, urlparts.scheme, 'all://' + urlparts.hostname, 'all', ] proxy = None for proxy_key in proxy_keys: if proxy_key in proxies: proxy = proxies[proxy_key] break return proxy def default_user_agent(name="python-requests"): """ Return a string representing the default user agent. :rtype: str """ return '%s/%s' % (name, __version__) def default_headers(): """ :rtype: requests.structures.CaseInsensitiveDict """ return CaseInsensitiveDict({ 'User-Agent': default_user_agent(), 'Accept-Encoding': ', '.join(('gzip', 'deflate')), 'Accept': '*/*', 'Connection': 'keep-alive', }) def parse_header_links(value): """Return a list of parsed link headers proxies. i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg" :rtype: list """ links = [] replace_chars = ' \'"' value = value.strip(replace_chars) if not value: return links for val in re.split(', *<', value): try: url, params = val.split(';', 1) except ValueError: url, params = val, '' link = {'url': url.strip('<> \'"')} for param in params.split(';'): try: key, value = param.split('=') except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links.append(link) return links # Null bytes; no need to recreate these on each call to guess_json_utf _null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 _null2 = _null * 2 _null3 = _null * 3 def guess_json_utf(data): """ :rtype: str """ # JSON always starts with two ASCII characters, so detection is as # easy as counting the nulls and from their location and count # determine the encoding. Also detect a BOM, if present. sample = data[:4] if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): return 'utf-32' # BOM included if sample[:3] == codecs.BOM_UTF8: return 'utf-8-sig' # BOM included, MS style (discouraged) if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): return 'utf-16' # BOM included nullcount = sample.count(_null) if nullcount == 0: return 'utf-8' if nullcount == 2: if sample[::2] == _null2: # 1st and 3rd are null return 'utf-16-be' if sample[1::2] == _null2: # 2nd and 4th are null return 'utf-16-le' # Did not detect 2 valid UTF-16 ascii-range characters if nullcount == 3: if sample[:3] == _null3: return 'utf-32-be' if sample[1:] == _null3: return 'utf-32-le' # Did not detect a valid UTF-32 ascii-range character return None def prepend_scheme_if_needed(url, new_scheme): """Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument. :rtype: str """ scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) # urlparse is a finicky beast, and sometimes decides that there isn't a # netloc present. Assume that it's being over-cautious, and switch netloc # and path if urlparse decided there was no netloc. if not netloc: netloc, path = path, netloc return urlunparse((scheme, netloc, path, params, query, fragment)) def get_auth_from_url(url): """Given a url with authentication components, extract them into a tuple of username,password. :rtype: (str,str) """ parsed = urlparse(url) try: auth = (unquote(parsed.username), unquote(parsed.password)) except (AttributeError, TypeError): auth = ('', '') return auth # Moved outside of function to avoid recompile every call _CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') _CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') def check_header_validity(header): """Verifies that header value is a string which doesn't contain leading whitespace or return characters. This prevents unintended header injection. :param header: tuple, in the format (name, value). """ name, value = header if isinstance(value, bytes): pat = _CLEAN_HEADER_REGEX_BYTE else: pat = _CLEAN_HEADER_REGEX_STR try: if not pat.match(value): raise InvalidHeader("Invalid return character or leading space in header: %s" % name) except TypeError: raise InvalidHeader("Value for header {%s: %s} must be of type str or " "bytes, not %s" % (name, value, type(value))) def urldefragauth(url): """ Given a url remove the fragment and the authentication part. :rtype: str """ scheme, netloc, path, params, query, fragment = urlparse(url) # see func:`prepend_scheme_if_needed` if not netloc: netloc, path = path, netloc netloc = netloc.rsplit('@', 1)[-1] return urlunparse((scheme, netloc, path, params, query, '')) def rewind_body(prepared_request): """Move file pointer back to its recorded starting position so it can be read again on redirect. """ body_seek = getattr(prepared_request.body, 'seek', None) if body_seek is not None and isinstance(prepared_request._body_position, integer_types): try: body_seek(prepared_request._body_position) except (IOError, OSError): raise UnrewindableBodyError("An error occurred when rewinding request " "body for redirect.") else: raise UnrewindableBodyError("Unable to rewind request body for redirect.")
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/exceptions.py
# -*- coding: utf-8 -*- """ requests.exceptions ~~~~~~~~~~~~~~~~~~~ This module contains the set of Requests' exceptions. """ from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError class RequestException(IOError): """There was an ambiguous exception that occurred while handling your request. """ def __init__(self, *args, **kwargs): """Initialize RequestException with `request` and `response` objects.""" response = kwargs.pop('response', None) self.response = response self.request = kwargs.pop('request', None) if (response is not None and not self.request and hasattr(response, 'request')): self.request = self.response.request super(RequestException, self).__init__(*args, **kwargs) class HTTPError(RequestException): """An HTTP error occurred.""" class ConnectionError(RequestException): """A Connection error occurred.""" class ProxyError(ConnectionError): """A proxy error occurred.""" class SSLError(ConnectionError): """An SSL error occurred.""" class Timeout(RequestException): """The request timed out. Catching this error will catch both :exc:`~requests.exceptions.ConnectTimeout` and :exc:`~requests.exceptions.ReadTimeout` errors. """ class ConnectTimeout(ConnectionError, Timeout): """The request timed out while trying to connect to the remote server. Requests that produced this error are safe to retry. """ class ReadTimeout(Timeout): """The server did not send any data in the allotted amount of time.""" class URLRequired(RequestException): """A valid URL is required to make a request.""" class TooManyRedirects(RequestException): """Too many redirects.""" class MissingSchema(RequestException, ValueError): """The URL schema (e.g. http or https) is missing.""" class InvalidSchema(RequestException, ValueError): """See defaults.py for valid schemas.""" class InvalidURL(RequestException, ValueError): """The URL provided was somehow invalid.""" class InvalidHeader(RequestException, ValueError): """The header value provided was somehow invalid.""" class InvalidProxyURL(InvalidURL): """The proxy URL provided is invalid.""" class ChunkedEncodingError(RequestException): """The server declared chunked encoding but sent an invalid chunk.""" class ContentDecodingError(RequestException, BaseHTTPError): """Failed to decode response content.""" class StreamConsumedError(RequestException, TypeError): """The content for this response was already consumed.""" class RetryError(RequestException): """Custom retries logic failed""" class UnrewindableBodyError(RequestException): """Requests encountered an error when trying to rewind a body.""" # Warnings class RequestsWarning(Warning): """Base warning for Requests.""" class FileModeWarning(RequestsWarning, DeprecationWarning): """A file was opened in text mode, but Requests determined its binary length.""" class RequestsDependencyWarning(RequestsWarning): """An imported dependency doesn't match the expected version range."""
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/structures.py
# -*- coding: utf-8 -*- """ requests.structures ~~~~~~~~~~~~~~~~~~~ Data structures that power Requests. """ from collections import OrderedDict from .compat import Mapping, MutableMapping class CaseInsensitiveDict(MutableMapping): """A case-insensitive ``dict``-like object. Implements all methods and operations of ``MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = OrderedDict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ( (lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items() ) def __eq__(self, other): if isinstance(other, Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return str(dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" def __init__(self, name=None): self.name = name super(LookupDict, self).__init__() def __repr__(self): return '<lookup \'%s\'>' % (self.name) def __getitem__(self, key): # We allow fall-through here, so values default to None return self.__dict__.get(key, None) def get(self, key, default=None): return self.__dict__.get(key, default)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/help.py
"""Module containing bug report helper(s).""" from __future__ import print_function import json import platform import sys import ssl from pip._vendor import idna from pip._vendor import urllib3 from pip._vendor import chardet from . import __version__ as requests_version try: from pip._vendor.urllib3.contrib import pyopenssl except ImportError: pyopenssl = None OpenSSL = None cryptography = None else: import OpenSSL import cryptography def _implementation(): """Return a dict with the Python implementation and version. Provide both the name and the version of the Python implementation currently running. For example, on CPython 2.7.5 it will return {'name': 'CPython', 'version': '2.7.5'}. This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done to work out the correct shape of the code for those platforms. """ implementation = platform.python_implementation() if implementation == 'CPython': implementation_version = platform.python_version() elif implementation == 'PyPy': implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro) if sys.pypy_version_info.releaselevel != 'final': implementation_version = ''.join([ implementation_version, sys.pypy_version_info.releaselevel ]) elif implementation == 'Jython': implementation_version = platform.python_version() # Complete Guess elif implementation == 'IronPython': implementation_version = platform.python_version() # Complete Guess else: implementation_version = 'Unknown' return {'name': implementation, 'version': implementation_version} def info(): """Generate information for a bug report.""" try: platform_info = { 'system': platform.system(), 'release': platform.release(), } except IOError: platform_info = { 'system': 'Unknown', 'release': 'Unknown', } implementation_info = _implementation() urllib3_info = {'version': urllib3.__version__} chardet_info = {'version': chardet.__version__} pyopenssl_info = { 'version': None, 'openssl_version': '', } if OpenSSL: pyopenssl_info = { 'version': OpenSSL.__version__, 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, } cryptography_info = { 'version': getattr(cryptography, '__version__', ''), } idna_info = { 'version': getattr(idna, '__version__', ''), } system_ssl = ssl.OPENSSL_VERSION_NUMBER system_ssl_info = { 'version': '%x' % system_ssl if system_ssl is not None else '' } return { 'platform': platform_info, 'implementation': implementation_info, 'system_ssl': system_ssl_info, 'using_pyopenssl': pyopenssl is not None, 'pyOpenSSL': pyopenssl_info, 'urllib3': urllib3_info, 'chardet': chardet_info, 'cryptography': cryptography_info, 'idna': idna_info, 'requests': { 'version': requests_version, }, } def main(): """Pretty-print the bug information as JSON.""" print(json.dumps(info(), sort_keys=True, indent=2)) if __name__ == '__main__': main()
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/requests/adapters.py
# -*- coding: utf-8 -*- """ requests.adapters ~~~~~~~~~~~~~~~~~ This module contains the transport adapters that Requests uses to define and maintain connections. """ import os.path import socket from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url from pip._vendor.urllib3.response import HTTPResponse from pip._vendor.urllib3.util import parse_url from pip._vendor.urllib3.util import Timeout as TimeoutSauce from pip._vendor.urllib3.util.retry import Retry from pip._vendor.urllib3.exceptions import ClosedPoolError from pip._vendor.urllib3.exceptions import ConnectTimeoutError from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError from pip._vendor.urllib3.exceptions import MaxRetryError from pip._vendor.urllib3.exceptions import NewConnectionError from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError from pip._vendor.urllib3.exceptions import ProtocolError from pip._vendor.urllib3.exceptions import ReadTimeoutError from pip._vendor.urllib3.exceptions import SSLError as _SSLError from pip._vendor.urllib3.exceptions import ResponseError from pip._vendor.urllib3.exceptions import LocationValueError from .models import Response from .compat import urlparse, basestring from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, get_encoding_from_headers, prepend_scheme_if_needed, get_auth_from_url, urldefragauth, select_proxy) from .structures import CaseInsensitiveDict from .cookies import extract_cookies_to_jar from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, ProxyError, RetryError, InvalidSchema, InvalidProxyURL, InvalidURL) from .auth import _basic_auth_str try: from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager except ImportError: def SOCKSProxyManager(*args, **kwargs): raise InvalidSchema("Missing dependencies for SOCKS support.") DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 DEFAULT_POOL_TIMEOUT = None class BaseAdapter(object): """The Base Transport Adapter""" def __init__(self): super(BaseAdapter, self).__init__() def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. """ raise NotImplementedError def close(self): """Cleans up adapter specific items.""" raise NotImplementedError class HTTPAdapter(BaseAdapter): """The built-in HTTP Adapter for urllib3. Provides a general-case interface for Requests sessions to contact HTTP and HTTPS urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session <Session>` class under the covers. :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', '_pool_block'] def __init__(self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: self.max_retries = Retry.from_int(max_retries) self.config = {} self.proxy_manager = {} super(HTTPAdapter, self).__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize self._pool_block = pool_block self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) def __getstate__(self): return {attr: getattr(self, attr, None) for attr in self.__attrs__} def __setstate__(self, state): # Can't handle by adding 'proxy_manager' to self.__attrs__ because # self.poolmanager uses a lambda function, which isn't pickleable. self.proxy_manager = {} self.config = {} for attr, value in state.items(): setattr(self, attr, value) self.init_poolmanager(self._pool_connections, self._pool_maxsize, block=self._pool_block) def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager :rtype: urllib3.ProxyManager """ if proxy in self.proxy_manager: manager = self.proxy_manager[proxy] elif proxy.lower().startswith('socks'): username, password = get_auth_from_url(proxy) manager = self.proxy_manager[proxy] = SOCKSProxyManager( proxy, username=username, password=password, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs ) else: proxy_headers = self.proxy_headers(proxy) manager = self.proxy_manager[proxy] = proxy_from_url( proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs) return manager def cert_verify(self, conn, url, verify, cert): """Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify. """ if url.lower().startswith('https') and verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) if not cert_loc or not os.path.exists(cert_loc): raise IOError("Could not find a suitable TLS CA certificate bundle, " "invalid path: {}".format(cert_loc)) conn.cert_reqs = 'CERT_REQUIRED' if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = 'CERT_NONE' conn.ca_certs = None conn.ca_cert_dir = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise IOError("Could not find the TLS certificate file, " "invalid path: {}".format(conn.cert_file)) if conn.key_file and not os.path.exists(conn.key_file): raise IOError("Could not find the TLS key file, " "invalid path: {}".format(conn.key_file)) def build_response(self, req, resp): """Builds a :class:`Response <requests.Response>` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>` :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response. :param resp: The urllib3 response object. :rtype: requests.Response """ response = Response() # Fallback to None if there's no status_code, for whatever reason. response.status_code = getattr(resp, 'status', None) # Make headers case-insensitive. response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) response.raw = resp response.reason = response.raw.reason if isinstance(req.url, bytes): response.url = req.url.decode('utf-8') else: response.url = req.url # Add new cookies from the server. extract_cookies_to_jar(response.cookies, req, resp) # Give the Response some context. response.request = req response.connection = self return response def get_connection(self, url, proxies=None): """Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool """ proxy = select_proxy(url, proxies) if proxy: proxy = prepend_scheme_if_needed(proxy, 'http') proxy_url = parse_url(proxy) if not proxy_url.host: raise InvalidProxyURL("Please check proxy URL. It is malformed" " and could be missing the host.") proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: # Only scheme should be lower case parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn def close(self): """Disposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. """ self.poolmanager.clear() for proxy in self.proxy_manager.values(): proxy.clear() def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str """ proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme is_proxied_http_request = (proxy and scheme != 'https') using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() using_socks_proxy = proxy_scheme.startswith('socks') url = request.path_url if is_proxied_http_request and not using_socks_proxy: url = urldefragauth(request.url) return url def add_headers(self, request, **kwargs): """Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. :param kwargs: The keyword arguments from the call to send(). """ pass def proxy_headers(self, proxy): """Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The url of the proxy being used for this request. :rtype: dict """ headers = {} username, password = get_auth_from_url(proxy) if username: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return headers def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ try: conn = self.get_connection(request.url, proxies) except LocationValueError as e: raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) chunked = not (request.body is None or 'Content-Length' in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError as e: # this may raise a string formatting error. err = ("Invalid timeout {}. Pass a (connect, read) " "timeout tuple, or a single float to set " "both timeouts to the same value".format(timeout)) raise ValueError(err) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout ) # Send the request. else: if hasattr(conn, 'proxy_pool'): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: low_conn.putrequest(request.method, url, skip_accept_encoding=True) for header, value in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode('utf-8')) low_conn.send(b'\r\n') low_conn.send(i) low_conn.send(b'\r\n') low_conn.send(b'0\r\n\r\n') # Receive the response from the server try: # For Python 2.7, use buffering of HTTP responses r = low_conn.getresponse(buffering=True) except TypeError: # For compatibility with Python 3.3+ r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, decode_content=False ) except: # If we hit any problems here, clean up the connection. # Then, reraise so that we can handle the actual exception. low_conn.close() raise except (ProtocolError, socket.error) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): # TODO: Remove this in 3.0.0: see #2811 if not isinstance(e.reason, NewConnectionError): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) if isinstance(e.reason, _ProxyError): raise ProxyError(e, request=request) if isinstance(e.reason, _SSLError): # This branch is for urllib3 v1.22 and later. raise SSLError(e, request=request) raise ConnectionError(e, request=request) except ClosedPoolError as e: raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): # This branch is for urllib3 versions earlier than v1.22 raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) else: raise return self.build_response(request, resp)
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib/_inputstream.py
from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type from pip._vendor.six.moves import http_client, urllib import codecs import re from io import BytesIO, StringIO from pip._vendor import webencodings from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase from .constants import _ReparseException from . import _utils # Non-unicode versions of constants for use in the pre-parser spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters]) asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters]) asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase]) spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"]) invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa if _utils.supports_lone_surrogates: # Use one extra step of indirection and create surrogates with # eval. Not using this indirection would introduce an illegal # unicode literal on platforms not supporting such lone # surrogates. assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1 invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] + eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used "]") else: invalid_unicode_re = re.compile(invalid_unicode_no_surrogate) non_bmp_invalid_codepoints = {0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF} ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]") # Cache for charsUntil() charsUntilRegEx = {} class BufferedStream(object): """Buffering for streams that do not have buffering of their own The buffer is implemented as a list of chunks on the assumption that joining many strings will be slow since it is O(n**2) """ def __init__(self, stream): self.stream = stream self.buffer = [] self.position = [-1, 0] # chunk number, offset def tell(self): pos = 0 for chunk in self.buffer[:self.position[0]]: pos += len(chunk) pos += self.position[1] return pos def seek(self, pos): assert pos <= self._bufferedBytes() offset = pos i = 0 while len(self.buffer[i]) < offset: offset -= len(self.buffer[i]) i += 1 self.position = [i, offset] def read(self, bytes): if not self.buffer: return self._readStream(bytes) elif (self.position[0] == len(self.buffer) and self.position[1] == len(self.buffer[-1])): return self._readStream(bytes) else: return self._readFromBuffer(bytes) def _bufferedBytes(self): return sum([len(item) for item in self.buffer]) def _readStream(self, bytes): data = self.stream.read(bytes) self.buffer.append(data) self.position[0] += 1 self.position[1] = len(data) return data def _readFromBuffer(self, bytes): remainingBytes = bytes rv = [] bufferIndex = self.position[0] bufferOffset = self.position[1] while bufferIndex < len(self.buffer) and remainingBytes != 0: assert remainingBytes > 0 bufferedData = self.buffer[bufferIndex] if remainingBytes <= len(bufferedData) - bufferOffset: bytesToRead = remainingBytes self.position = [bufferIndex, bufferOffset + bytesToRead] else: bytesToRead = len(bufferedData) - bufferOffset self.position = [bufferIndex, len(bufferedData)] bufferIndex += 1 rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead]) remainingBytes -= bytesToRead bufferOffset = 0 if remainingBytes: rv.append(self._readStream(remainingBytes)) return b"".join(rv) def HTMLInputStream(source, **kwargs): # Work around Python bug #20007: read(0) closes the connection. # http://bugs.python.org/issue20007 if (isinstance(source, http_client.HTTPResponse) or # Also check for addinfourl wrapping HTTPResponse (isinstance(source, urllib.response.addbase) and isinstance(source.fp, http_client.HTTPResponse))): isUnicode = False elif hasattr(source, "read"): isUnicode = isinstance(source.read(0), text_type) else: isUnicode = isinstance(source, text_type) if isUnicode: encodings = [x for x in kwargs if x.endswith("_encoding")] if encodings: raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings) return HTMLUnicodeInputStream(source, **kwargs) else: return HTMLBinaryInputStream(source, **kwargs) class HTMLUnicodeInputStream(object): """Provides a unicode stream of characters to the HTMLTokenizer. This class takes care of character encoding and removing or replacing incorrect byte-sequences and also provides column and line tracking. """ _defaultChunkSize = 10240 def __init__(self, source): """Initialises the HTMLInputStream. HTMLInputStream(source, [encoding]) -> Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) """ if not _utils.supports_lone_surrogates: # Such platforms will have already checked for such # surrogate errors, so no need to do this checking. self.reportCharacterErrors = None elif len("\U0010FFFF") == 1: self.reportCharacterErrors = self.characterErrorsUCS4 else: self.reportCharacterErrors = self.characterErrorsUCS2 # List of where new lines occur self.newLines = [0] self.charEncoding = (lookupEncoding("utf-8"), "certain") self.dataStream = self.openStream(source) self.reset() def reset(self): self.chunk = "" self.chunkSize = 0 self.chunkOffset = 0 self.errors = [] # number of (complete) lines in previous chunks self.prevNumLines = 0 # number of columns in the last line of the previous chunk self.prevNumCols = 0 # Deal with CR LF and surrogates split over chunk boundaries self._bufferedCharacter = None def openStream(self, source): """Produces a file object from source. source can be either a file object, local filename or a string. """ # Already a file object if hasattr(source, 'read'): stream = source else: stream = StringIO(source) return stream def _position(self, offset): chunk = self.chunk nLines = chunk.count('\n', 0, offset) positionLine = self.prevNumLines + nLines lastLinePos = chunk.rfind('\n', 0, offset) if lastLinePos == -1: positionColumn = self.prevNumCols + offset else: positionColumn = offset - (lastLinePos + 1) return (positionLine, positionColumn) def position(self): """Returns (line, col) of the current position in the stream.""" line, col = self._position(self.chunkOffset) return (line + 1, col) def char(self): """ Read one character from the stream or queue if available. Return EOF when EOF is reached. """ # Read a new chunk from the input stream if necessary if self.chunkOffset >= self.chunkSize: if not self.readChunk(): return EOF chunkOffset = self.chunkOffset char = self.chunk[chunkOffset] self.chunkOffset = chunkOffset + 1 return char def readChunk(self, chunkSize=None): if chunkSize is None: chunkSize = self._defaultChunkSize self.prevNumLines, self.prevNumCols = self._position(self.chunkSize) self.chunk = "" self.chunkSize = 0 self.chunkOffset = 0 data = self.dataStream.read(chunkSize) # Deal with CR LF and surrogates broken across chunks if self._bufferedCharacter: data = self._bufferedCharacter + data self._bufferedCharacter = None elif not data: # We have no more data, bye-bye stream return False if len(data) > 1: lastv = ord(data[-1]) if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF: self._bufferedCharacter = data[-1] data = data[:-1] if self.reportCharacterErrors: self.reportCharacterErrors(data) # Replace invalid characters data = data.replace("\r\n", "\n") data = data.replace("\r", "\n") self.chunk = data self.chunkSize = len(data) return True def characterErrorsUCS4(self, data): for _ in range(len(invalid_unicode_re.findall(data))): self.errors.append("invalid-codepoint") def characterErrorsUCS2(self, data): # Someone picked the wrong compile option # You lose skip = False for match in invalid_unicode_re.finditer(data): if skip: continue codepoint = ord(match.group()) pos = match.start() # Pretty sure there should be endianness issues here if _utils.isSurrogatePair(data[pos:pos + 2]): # We have a surrogate pair! char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2]) if char_val in non_bmp_invalid_codepoints: self.errors.append("invalid-codepoint") skip = True elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and pos == len(data) - 1): self.errors.append("invalid-codepoint") else: skip = False self.errors.append("invalid-codepoint") def charsUntil(self, characters, opposite=False): """ Returns a string of characters from the stream up to but not including any character in 'characters' or EOF. 'characters' must be a container that supports the 'in' method and iteration over its characters. """ # Use a cache of regexps to find the required characters try: chars = charsUntilRegEx[(characters, opposite)] except KeyError: if __debug__: for c in characters: assert(ord(c) < 128) regex = "".join(["\\x%02x" % ord(c) for c in characters]) if not opposite: regex = "^%s" % regex chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex) rv = [] while True: # Find the longest matching prefix m = chars.match(self.chunk, self.chunkOffset) if m is None: # If nothing matched, and it wasn't because we ran out of chunk, # then stop if self.chunkOffset != self.chunkSize: break else: end = m.end() # If not the whole chunk matched, return everything # up to the part that didn't match if end != self.chunkSize: rv.append(self.chunk[self.chunkOffset:end]) self.chunkOffset = end break # If the whole remainder of the chunk matched, # use it all and read the next chunk rv.append(self.chunk[self.chunkOffset:]) if not self.readChunk(): # Reached EOF break r = "".join(rv) return r def unget(self, char): # Only one character is allowed to be ungotten at once - it must # be consumed again before any further call to unget if char is not EOF: if self.chunkOffset == 0: # unget is called quite rarely, so it's a good idea to do # more work here if it saves a bit of work in the frequently # called char and charsUntil. # So, just prepend the ungotten character onto the current # chunk: self.chunk = char + self.chunk self.chunkSize += 1 else: self.chunkOffset -= 1 assert self.chunk[self.chunkOffset] == char class HTMLBinaryInputStream(HTMLUnicodeInputStream): """Provides a unicode stream of characters to the HTMLTokenizer. This class takes care of character encoding and removing or replacing incorrect byte-sequences and also provides column and line tracking. """ def __init__(self, source, override_encoding=None, transport_encoding=None, same_origin_parent_encoding=None, likely_encoding=None, default_encoding="windows-1252", useChardet=True): """Initialises the HTMLInputStream. HTMLInputStream(source, [encoding]) -> Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) """ # Raw Stream - for unicode objects this will encode to utf-8 and set # self.charEncoding as appropriate self.rawStream = self.openStream(source) HTMLUnicodeInputStream.__init__(self, self.rawStream) # Encoding Information # Number of bytes to use when looking for a meta element with # encoding information self.numBytesMeta = 1024 # Number of bytes to use when using detecting encoding using chardet self.numBytesChardet = 100 # Things from args self.override_encoding = override_encoding self.transport_encoding = transport_encoding self.same_origin_parent_encoding = same_origin_parent_encoding self.likely_encoding = likely_encoding self.default_encoding = default_encoding # Determine encoding self.charEncoding = self.determineEncoding(useChardet) assert self.charEncoding[0] is not None # Call superclass self.reset() def reset(self): self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace') HTMLUnicodeInputStream.reset(self) def openStream(self, source): """Produces a file object from source. source can be either a file object, local filename or a string. """ # Already a file object if hasattr(source, 'read'): stream = source else: stream = BytesIO(source) try: stream.seek(stream.tell()) except Exception: stream = BufferedStream(stream) return stream def determineEncoding(self, chardet=True): # BOMs take precedence over everything # This will also read past the BOM if present charEncoding = self.detectBOM(), "certain" if charEncoding[0] is not None: return charEncoding # If we've been overridden, we've been overridden charEncoding = lookupEncoding(self.override_encoding), "certain" if charEncoding[0] is not None: return charEncoding # Now check the transport layer charEncoding = lookupEncoding(self.transport_encoding), "certain" if charEncoding[0] is not None: return charEncoding # Look for meta elements with encoding information charEncoding = self.detectEncodingMeta(), "tentative" if charEncoding[0] is not None: return charEncoding # Parent document encoding charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative" if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"): return charEncoding # "likely" encoding charEncoding = lookupEncoding(self.likely_encoding), "tentative" if charEncoding[0] is not None: return charEncoding # Guess with chardet, if available if chardet: try: from pip._vendor.chardet.universaldetector import UniversalDetector except ImportError: pass else: buffers = [] detector = UniversalDetector() while not detector.done: buffer = self.rawStream.read(self.numBytesChardet) assert isinstance(buffer, bytes) if not buffer: break buffers.append(buffer) detector.feed(buffer) detector.close() encoding = lookupEncoding(detector.result['encoding']) self.rawStream.seek(0) if encoding is not None: return encoding, "tentative" # Try the default encoding charEncoding = lookupEncoding(self.default_encoding), "tentative" if charEncoding[0] is not None: return charEncoding # Fallback to html5lib's default if even that hasn't worked return lookupEncoding("windows-1252"), "tentative" def changeEncoding(self, newEncoding): assert self.charEncoding[1] != "certain" newEncoding = lookupEncoding(newEncoding) if newEncoding is None: return if newEncoding.name in ("utf-16be", "utf-16le"): newEncoding = lookupEncoding("utf-8") assert newEncoding is not None elif newEncoding == self.charEncoding[0]: self.charEncoding = (self.charEncoding[0], "certain") else: self.rawStream.seek(0) self.charEncoding = (newEncoding, "certain") self.reset() raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding)) def detectBOM(self): """Attempts to detect at BOM at the start of the stream. If an encoding can be determined from the BOM return the name of the encoding otherwise return None""" bomDict = { codecs.BOM_UTF8: 'utf-8', codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be', codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be' } # Go to beginning of file and read in 4 bytes string = self.rawStream.read(4) assert isinstance(string, bytes) # Try detecting the BOM using bytes from the string encoding = bomDict.get(string[:3]) # UTF-8 seek = 3 if not encoding: # Need to detect UTF-32 before UTF-16 encoding = bomDict.get(string) # UTF-32 seek = 4 if not encoding: encoding = bomDict.get(string[:2]) # UTF-16 seek = 2 # Set the read position past the BOM if one was found, otherwise # set it to the start of the stream if encoding: self.rawStream.seek(seek) return lookupEncoding(encoding) else: self.rawStream.seek(0) return None def detectEncodingMeta(self): """Report the encoding declared by the meta element """ buffer = self.rawStream.read(self.numBytesMeta) assert isinstance(buffer, bytes) parser = EncodingParser(buffer) self.rawStream.seek(0) encoding = parser.getEncoding() if encoding is not None and encoding.name in ("utf-16be", "utf-16le"): encoding = lookupEncoding("utf-8") return encoding class EncodingBytes(bytes): """String-like object with an associated position and various extra methods If the position is ever greater than the string length then an exception is raised""" def __new__(self, value): assert isinstance(value, bytes) return bytes.__new__(self, value.lower()) def __init__(self, value): # pylint:disable=unused-argument self._position = -1 def __iter__(self): return self def __next__(self): p = self._position = self._position + 1 if p >= len(self): raise StopIteration elif p < 0: raise TypeError return self[p:p + 1] def next(self): # Py2 compat return self.__next__() def previous(self): p = self._position if p >= len(self): raise StopIteration elif p < 0: raise TypeError self._position = p = p - 1 return self[p:p + 1] def setPosition(self, position): if self._position >= len(self): raise StopIteration self._position = position def getPosition(self): if self._position >= len(self): raise StopIteration if self._position >= 0: return self._position else: return None position = property(getPosition, setPosition) def getCurrentByte(self): return self[self.position:self.position + 1] currentByte = property(getCurrentByte) def skip(self, chars=spaceCharactersBytes): """Skip past a list of characters""" p = self.position # use property for the error-checking while p < len(self): c = self[p:p + 1] if c not in chars: self._position = p return c p += 1 self._position = p return None def skipUntil(self, chars): p = self.position while p < len(self): c = self[p:p + 1] if c in chars: self._position = p return c p += 1 self._position = p return None def matchBytes(self, bytes): """Look for a sequence of bytes at the start of a string. If the bytes are found return True and advance the position to the byte after the match. Otherwise return False and leave the position alone""" rv = self.startswith(bytes, self.position) if rv: self.position += len(bytes) return rv def jumpTo(self, bytes): """Look for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the match""" try: self._position = self.index(bytes, self.position) + len(bytes) - 1 except ValueError: raise StopIteration return True class EncodingParser(object): """Mini parser for detecting character encoding from meta elements""" def __init__(self, data): """string - the data to work on for encoding detection""" self.data = EncodingBytes(data) self.encoding = None def getEncoding(self): if b"<meta" not in self.data: return None methodDispatch = ( (b"<!--", self.handleComment), (b"<meta", self.handleMeta), (b"</", self.handlePossibleEndTag), (b"<!", self.handleOther), (b"<?", self.handleOther), (b"<", self.handlePossibleStartTag)) for _ in self.data: keepParsing = True try: self.data.jumpTo(b"<") except StopIteration: break for key, method in methodDispatch: if self.data.matchBytes(key): try: keepParsing = method() break except StopIteration: keepParsing = False break if not keepParsing: break return self.encoding def handleComment(self): """Skip over comments""" return self.data.jumpTo(b"-->") def handleMeta(self): if self.data.currentByte not in spaceCharactersBytes: # if we have <meta not followed by a space so just keep going return True # We have a valid meta element we want to search for attributes hasPragma = False pendingEncoding = None while True: # Try to find the next attribute after the current position attr = self.getAttribute() if attr is None: return True else: if attr[0] == b"http-equiv": hasPragma = attr[1] == b"content-type" if hasPragma and pendingEncoding is not None: self.encoding = pendingEncoding return False elif attr[0] == b"charset": tentativeEncoding = attr[1] codec = lookupEncoding(tentativeEncoding) if codec is not None: self.encoding = codec return False elif attr[0] == b"content": contentParser = ContentAttrParser(EncodingBytes(attr[1])) tentativeEncoding = contentParser.parse() if tentativeEncoding is not None: codec = lookupEncoding(tentativeEncoding) if codec is not None: if hasPragma: self.encoding = codec return False else: pendingEncoding = codec def handlePossibleStartTag(self): return self.handlePossibleTag(False) def handlePossibleEndTag(self): next(self.data) return self.handlePossibleTag(True) def handlePossibleTag(self, endTag): data = self.data if data.currentByte not in asciiLettersBytes: # If the next byte is not an ascii letter either ignore this # fragment (possible start tag case) or treat it according to # handleOther if endTag: data.previous() self.handleOther() return True c = data.skipUntil(spacesAngleBrackets) if c == b"<": # return to the first step in the overall "two step" algorithm # reprocessing the < byte data.previous() else: # Read all attributes attr = self.getAttribute() while attr is not None: attr = self.getAttribute() return True def handleOther(self): return self.data.jumpTo(b">") def getAttribute(self): """Return a name,value pair for the next attribute in the stream, if one is found, or None""" data = self.data # Step 1 (skip chars) c = data.skip(spaceCharactersBytes | frozenset([b"/"])) assert c is None or len(c) == 1 # Step 2 if c in (b">", None): return None # Step 3 attrName = [] attrValue = [] # Step 4 attribute name while True: if c == b"=" and attrName: break elif c in spaceCharactersBytes: # Step 6! c = data.skip() break elif c in (b"/", b">"): return b"".join(attrName), b"" elif c in asciiUppercaseBytes: attrName.append(c.lower()) elif c is None: return None else: attrName.append(c) # Step 5 c = next(data) # Step 7 if c != b"=": data.previous() return b"".join(attrName), b"" # Step 8 next(data) # Step 9 c = data.skip() # Step 10 if c in (b"'", b'"'): # 10.1 quoteChar = c while True: # 10.2 c = next(data) # 10.3 if c == quoteChar: next(data) return b"".join(attrName), b"".join(attrValue) # 10.4 elif c in asciiUppercaseBytes: attrValue.append(c.lower()) # 10.5 else: attrValue.append(c) elif c == b">": return b"".join(attrName), b"" elif c in asciiUppercaseBytes: attrValue.append(c.lower()) elif c is None: return None else: attrValue.append(c) # Step 11 while True: c = next(data) if c in spacesAngleBrackets: return b"".join(attrName), b"".join(attrValue) elif c in asciiUppercaseBytes: attrValue.append(c.lower()) elif c is None: return None else: attrValue.append(c) class ContentAttrParser(object): def __init__(self, data): assert isinstance(data, bytes) self.data = data def parse(self): try: # Check if the attr name is charset # otherwise return self.data.jumpTo(b"charset") self.data.position += 1 self.data.skip() if not self.data.currentByte == b"=": # If there is no = sign keep looking for attrs return None self.data.position += 1 self.data.skip() # Look for an encoding between matching quote marks if self.data.currentByte in (b'"', b"'"): quoteMark = self.data.currentByte self.data.position += 1 oldPosition = self.data.position if self.data.jumpTo(quoteMark): return self.data[oldPosition:self.data.position] else: return None else: # Unquoted value oldPosition = self.data.position try: self.data.skipUntil(spaceCharactersBytes) return self.data[oldPosition:self.data.position] except StopIteration: # Return the whole remaining value return self.data[oldPosition:] except StopIteration: return None def lookupEncoding(encoding): """Return the python codec name corresponding to an encoding or None if the string doesn't correspond to a valid encoding.""" if isinstance(encoding, bytes): try: encoding = encoding.decode("ascii") except UnicodeDecodeError: return None if encoding is not None: try: return webencodings.lookup(encoding) except AttributeError: return None else: return None
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib/_ihatexml.py
from __future__ import absolute_import, division, unicode_literals import re import warnings from .constants import DataLossWarning baseChar = """ [#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | [#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | [#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | [#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | [#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | [#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | [#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | [#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | [#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | [#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | [#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | [#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | [#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | [#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | [#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | [#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | [#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | [#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | [#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | [#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | [#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | [#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | [#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | [#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | [#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | [#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | [#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | [#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | [#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | #x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | #x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | #x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | [#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | [#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | #x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | [#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | [#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | [#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | [#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | [#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | #x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | [#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | [#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | [#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | [#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]""" ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]""" combiningCharacter = """ [#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | [#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | [#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | [#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | #x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | [#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | [#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | #x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | [#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | [#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | #x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | [#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | [#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | [#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | [#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | [#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | #x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | [#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | #x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | [#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | [#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | #x3099 | #x309A""" digit = """ [#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | [#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | [#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | [#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]""" extender = """ #x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | #[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]""" letter = " | ".join([baseChar, ideographic]) # Without the name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter, extender]) nameFirst = " | ".join([letter, "_"]) reChar = re.compile(r"#x([\d|A-F]{4,4})") reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]") def charStringToList(chars): charRanges = [item.strip() for item in chars.split(" | ")] rv = [] for item in charRanges: foundMatch = False for regexp in (reChar, reCharRange): match = regexp.match(item) if match is not None: rv.append([hexToInt(item) for item in match.groups()]) if len(rv[-1]) == 1: rv[-1] = rv[-1] * 2 foundMatch = True break if not foundMatch: assert len(item) == 1 rv.append([ord(item)] * 2) rv = normaliseCharList(rv) return rv def normaliseCharList(charList): charList = sorted(charList) for item in charList: assert item[1] >= item[0] rv = [] i = 0 while i < len(charList): j = 1 rv.append(charList[i]) while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1: rv[-1][1] = charList[i + j][1] j += 1 i += j return rv # We don't really support characters above the BMP :( max_unicode = int("FFFF", 16) def missingRanges(charList): rv = [] if charList[0] != 0: rv.append([0, charList[0][0] - 1]) for i, item in enumerate(charList[:-1]): rv.append([item[1] + 1, charList[i + 1][0] - 1]) if charList[-1][1] != max_unicode: rv.append([charList[-1][1] + 1, max_unicode]) return rv def listToRegexpStr(charList): rv = [] for item in charList: if item[0] == item[1]: rv.append(escapeRegexp(chr(item[0]))) else: rv.append(escapeRegexp(chr(item[0])) + "-" + escapeRegexp(chr(item[1]))) return "[%s]" % "".join(rv) def hexToInt(hex_str): return int(hex_str, 16) def escapeRegexp(string): specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}", "[", "]", "|", "(", ")", "-") for char in specialCharacters: string = string.replace(char, "\\" + char) return string # output from the above nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa # Simpler things nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]") class InfosetFilter(object): replacementRegexp = re.compile(r"U[\dA-F]{5,5}") def __init__(self, dropXmlnsLocalName=False, dropXmlnsAttrNs=False, preventDoubleDashComments=False, preventDashAtCommentEnd=False, replaceFormFeedCharacters=True, preventSingleQuotePubid=False): self.dropXmlnsLocalName = dropXmlnsLocalName self.dropXmlnsAttrNs = dropXmlnsAttrNs self.preventDoubleDashComments = preventDoubleDashComments self.preventDashAtCommentEnd = preventDashAtCommentEnd self.replaceFormFeedCharacters = replaceFormFeedCharacters self.preventSingleQuotePubid = preventSingleQuotePubid self.replaceCache = {} def coerceAttribute(self, name, namespace=None): if self.dropXmlnsLocalName and name.startswith("xmlns:"): warnings.warn("Attributes cannot begin with xmlns", DataLossWarning) return None elif (self.dropXmlnsAttrNs and namespace == "http://www.w3.org/2000/xmlns/"): warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning) return None else: return self.toXmlName(name) def coerceElement(self, name): return self.toXmlName(name) def coerceComment(self, data): if self.preventDoubleDashComments: while "--" in data: warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning) data = data.replace("--", "- -") if data.endswith("-"): warnings.warn("Comments cannot end in a dash", DataLossWarning) data += " " return data def coerceCharacters(self, data): if self.replaceFormFeedCharacters: for _ in range(data.count("\x0C")): warnings.warn("Text cannot contain U+000C", DataLossWarning) data = data.replace("\x0C", " ") # Other non-xml characters return data def coercePubid(self, data): dataOutput = data for char in nonPubidCharRegexp.findall(data): warnings.warn("Coercing non-XML pubid", DataLossWarning) replacement = self.getReplacementCharacter(char) dataOutput = dataOutput.replace(char, replacement) if self.preventSingleQuotePubid and dataOutput.find("'") >= 0: warnings.warn("Pubid cannot contain single quote", DataLossWarning) dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'")) return dataOutput def toXmlName(self, name): nameFirst = name[0] nameRest = name[1:] m = nonXmlNameFirstBMPRegexp.match(nameFirst) if m: warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning) nameFirstOutput = self.getReplacementCharacter(nameFirst) else: nameFirstOutput = nameFirst nameRestOutput = nameRest replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest)) for char in replaceChars: warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning) replacement = self.getReplacementCharacter(char) nameRestOutput = nameRestOutput.replace(char, replacement) return nameFirstOutput + nameRestOutput def getReplacementCharacter(self, char): if char in self.replaceCache: replacement = self.replaceCache[char] else: replacement = self.escapeChar(char) return replacement def fromXmlName(self, name): for item in set(self.replacementRegexp.findall(name)): name = name.replace(item, self.unescapeChar(item)) return name def escapeChar(self, char): replacement = "U%05X" % ord(char) self.replaceCache[char] = replacement return replacement def unescapeChar(self, charcode): return chr(int(charcode[1:], 16))
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib/constants.py
from __future__ import absolute_import, division, unicode_literals import string EOF = None E = { "null-character": "Null character in input stream, replaced with U+FFFD.", "invalid-codepoint": "Invalid codepoint in stream.", "incorrectly-placed-solidus": "Solidus (/) incorrectly placed in tag.", "incorrect-cr-newline-entity": "Incorrect CR newline entity, replaced with LF.", "illegal-windows-1252-entity": "Entity used with illegal number (windows-1252 reference).", "cant-convert-numeric-entity": "Numeric entity couldn't be converted to character " "(codepoint U+%(charAsInt)08x).", "illegal-codepoint-for-numeric-entity": "Numeric entity represents an illegal codepoint: " "U+%(charAsInt)08x.", "numeric-entity-without-semicolon": "Numeric entity didn't end with ';'.", "expected-numeric-entity-but-got-eof": "Numeric entity expected. Got end of file instead.", "expected-numeric-entity": "Numeric entity expected but none found.", "named-entity-without-semicolon": "Named entity didn't end with ';'.", "expected-named-entity": "Named entity expected. Got none.", "attributes-in-end-tag": "End tag contains unexpected attributes.", 'self-closing-flag-on-end-tag': "End tag contains unexpected self-closing flag.", "expected-tag-name-but-got-right-bracket": "Expected tag name. Got '>' instead.", "expected-tag-name-but-got-question-mark": "Expected tag name. Got '?' instead. (HTML doesn't " "support processing instructions.)", "expected-tag-name": "Expected tag name. Got something else instead", "expected-closing-tag-but-got-right-bracket": "Expected closing tag. Got '>' instead. Ignoring '</>'.", "expected-closing-tag-but-got-eof": "Expected closing tag. Unexpected end of file.", "expected-closing-tag-but-got-char": "Expected closing tag. Unexpected character '%(data)s' found.", "eof-in-tag-name": "Unexpected end of file in the tag name.", "expected-attribute-name-but-got-eof": "Unexpected end of file. Expected attribute name instead.", "eof-in-attribute-name": "Unexpected end of file in attribute name.", "invalid-character-in-attribute-name": "Invalid character in attribute name", "duplicate-attribute": "Dropped duplicate attribute on tag.", "expected-end-of-tag-name-but-got-eof": "Unexpected end of file. Expected = or end of tag.", "expected-attribute-value-but-got-eof": "Unexpected end of file. Expected attribute value.", "expected-attribute-value-but-got-right-bracket": "Expected attribute value. Got '>' instead.", 'equals-in-unquoted-attribute-value': "Unexpected = in unquoted attribute", 'unexpected-character-in-unquoted-attribute-value': "Unexpected character in unquoted attribute", "invalid-character-after-attribute-name": "Unexpected character after attribute name.", "unexpected-character-after-attribute-value": "Unexpected character after attribute value.", "eof-in-attribute-value-double-quote": "Unexpected end of file in attribute value (\").", "eof-in-attribute-value-single-quote": "Unexpected end of file in attribute value (').", "eof-in-attribute-value-no-quotes": "Unexpected end of file in attribute value.", "unexpected-EOF-after-solidus-in-tag": "Unexpected end of file in tag. Expected >", "unexpected-character-after-solidus-in-tag": "Unexpected character after / in tag. Expected >", "expected-dashes-or-doctype": "Expected '--' or 'DOCTYPE'. Not found.", "unexpected-bang-after-double-dash-in-comment": "Unexpected ! after -- in comment", "unexpected-space-after-double-dash-in-comment": "Unexpected space after -- in comment", "incorrect-comment": "Incorrect comment.", "eof-in-comment": "Unexpected end of file in comment.", "eof-in-comment-end-dash": "Unexpected end of file in comment (-)", "unexpected-dash-after-double-dash-in-comment": "Unexpected '-' after '--' found in comment.", "eof-in-comment-double-dash": "Unexpected end of file in comment (--).", "eof-in-comment-end-space-state": "Unexpected end of file in comment.", "eof-in-comment-end-bang-state": "Unexpected end of file in comment.", "unexpected-char-in-comment": "Unexpected character in comment found.", "need-space-after-doctype": "No space after literal string 'DOCTYPE'.", "expected-doctype-name-but-got-right-bracket": "Unexpected > character. Expected DOCTYPE name.", "expected-doctype-name-but-got-eof": "Unexpected end of file. Expected DOCTYPE name.", "eof-in-doctype-name": "Unexpected end of file in DOCTYPE name.", "eof-in-doctype": "Unexpected end of file in DOCTYPE.", "expected-space-or-right-bracket-in-doctype": "Expected space or '>'. Got '%(data)s'", "unexpected-end-of-doctype": "Unexpected end of DOCTYPE.", "unexpected-char-in-doctype": "Unexpected character in DOCTYPE.", "eof-in-innerhtml": "XXX innerHTML EOF", "unexpected-doctype": "Unexpected DOCTYPE. Ignored.", "non-html-root": "html needs to be the first start tag.", "expected-doctype-but-got-eof": "Unexpected End of file. Expected DOCTYPE.", "unknown-doctype": "Erroneous DOCTYPE.", "expected-doctype-but-got-chars": "Unexpected non-space characters. Expected DOCTYPE.", "expected-doctype-but-got-start-tag": "Unexpected start tag (%(name)s). Expected DOCTYPE.", "expected-doctype-but-got-end-tag": "Unexpected end tag (%(name)s). Expected DOCTYPE.", "end-tag-after-implied-root": "Unexpected end tag (%(name)s) after the (implied) root element.", "expected-named-closing-tag-but-got-eof": "Unexpected end of file. Expected end tag (%(name)s).", "two-heads-are-not-better-than-one": "Unexpected start tag head in existing head. Ignored.", "unexpected-end-tag": "Unexpected end tag (%(name)s). Ignored.", "unexpected-start-tag-out-of-my-head": "Unexpected start tag (%(name)s) that can be in head. Moved.", "unexpected-start-tag": "Unexpected start tag (%(name)s).", "missing-end-tag": "Missing end tag (%(name)s).", "missing-end-tags": "Missing end tags (%(name)s).", "unexpected-start-tag-implies-end-tag": "Unexpected start tag (%(startName)s) " "implies end tag (%(endName)s).", "unexpected-start-tag-treated-as": "Unexpected start tag (%(originalName)s). Treated as %(newName)s.", "deprecated-tag": "Unexpected start tag %(name)s. Don't use it!", "unexpected-start-tag-ignored": "Unexpected start tag %(name)s. Ignored.", "expected-one-end-tag-but-got-another": "Unexpected end tag (%(gotName)s). " "Missing end tag (%(expectedName)s).", "end-tag-too-early": "End tag (%(name)s) seen too early. Expected other end tag.", "end-tag-too-early-named": "Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).", "end-tag-too-early-ignored": "End tag (%(name)s) seen too early. Ignored.", "adoption-agency-1.1": "End tag (%(name)s) violates step 1, " "paragraph 1 of the adoption agency algorithm.", "adoption-agency-1.2": "End tag (%(name)s) violates step 1, " "paragraph 2 of the adoption agency algorithm.", "adoption-agency-1.3": "End tag (%(name)s) violates step 1, " "paragraph 3 of the adoption agency algorithm.", "adoption-agency-4.4": "End tag (%(name)s) violates step 4, " "paragraph 4 of the adoption agency algorithm.", "unexpected-end-tag-treated-as": "Unexpected end tag (%(originalName)s). Treated as %(newName)s.", "no-end-tag": "This element (%(name)s) has no end tag.", "unexpected-implied-end-tag-in-table": "Unexpected implied end tag (%(name)s) in the table phase.", "unexpected-implied-end-tag-in-table-body": "Unexpected implied end tag (%(name)s) in the table body phase.", "unexpected-char-implies-table-voodoo": "Unexpected non-space characters in " "table context caused voodoo mode.", "unexpected-hidden-input-in-table": "Unexpected input with type hidden in table context.", "unexpected-form-in-table": "Unexpected form in table context.", "unexpected-start-tag-implies-table-voodoo": "Unexpected start tag (%(name)s) in " "table context caused voodoo mode.", "unexpected-end-tag-implies-table-voodoo": "Unexpected end tag (%(name)s) in " "table context caused voodoo mode.", "unexpected-cell-in-table-body": "Unexpected table cell start tag (%(name)s) " "in the table body phase.", "unexpected-cell-end-tag": "Got table cell end tag (%(name)s) " "while required end tags are missing.", "unexpected-end-tag-in-table-body": "Unexpected end tag (%(name)s) in the table body phase. Ignored.", "unexpected-implied-end-tag-in-table-row": "Unexpected implied end tag (%(name)s) in the table row phase.", "unexpected-end-tag-in-table-row": "Unexpected end tag (%(name)s) in the table row phase. Ignored.", "unexpected-select-in-select": "Unexpected select start tag in the select phase " "treated as select end tag.", "unexpected-input-in-select": "Unexpected input start tag in the select phase.", "unexpected-start-tag-in-select": "Unexpected start tag token (%(name)s in the select phase. " "Ignored.", "unexpected-end-tag-in-select": "Unexpected end tag (%(name)s) in the select phase. Ignored.", "unexpected-table-element-start-tag-in-select-in-table": "Unexpected table element start tag (%(name)s) in the select in table phase.", "unexpected-table-element-end-tag-in-select-in-table": "Unexpected table element end tag (%(name)s) in the select in table phase.", "unexpected-char-after-body": "Unexpected non-space characters in the after body phase.", "unexpected-start-tag-after-body": "Unexpected start tag token (%(name)s)" " in the after body phase.", "unexpected-end-tag-after-body": "Unexpected end tag token (%(name)s)" " in the after body phase.", "unexpected-char-in-frameset": "Unexpected characters in the frameset phase. Characters ignored.", "unexpected-start-tag-in-frameset": "Unexpected start tag token (%(name)s)" " in the frameset phase. Ignored.", "unexpected-frameset-in-frameset-innerhtml": "Unexpected end tag token (frameset) " "in the frameset phase (innerHTML).", "unexpected-end-tag-in-frameset": "Unexpected end tag token (%(name)s)" " in the frameset phase. Ignored.", "unexpected-char-after-frameset": "Unexpected non-space characters in the " "after frameset phase. Ignored.", "unexpected-start-tag-after-frameset": "Unexpected start tag (%(name)s)" " in the after frameset phase. Ignored.", "unexpected-end-tag-after-frameset": "Unexpected end tag (%(name)s)" " in the after frameset phase. Ignored.", "unexpected-end-tag-after-body-innerhtml": "Unexpected end tag after body(innerHtml)", "expected-eof-but-got-char": "Unexpected non-space characters. Expected end of file.", "expected-eof-but-got-start-tag": "Unexpected start tag (%(name)s)" ". Expected end of file.", "expected-eof-but-got-end-tag": "Unexpected end tag (%(name)s)" ". Expected end of file.", "eof-in-table": "Unexpected end of file. Expected table content.", "eof-in-select": "Unexpected end of file. Expected select content.", "eof-in-frameset": "Unexpected end of file. Expected frameset content.", "eof-in-script-in-script": "Unexpected end of file. Expected script content.", "eof-in-foreign-lands": "Unexpected end of file. Expected foreign content", "non-void-element-with-trailing-solidus": "Trailing solidus not allowed on element %(name)s", "unexpected-html-element-in-foreign-content": "Element %(name)s not allowed in a non-html context", "unexpected-end-tag-before-html": "Unexpected end tag (%(name)s) before html.", "unexpected-inhead-noscript-tag": "Element %(name)s not allowed in a inhead-noscript context", "eof-in-head-noscript": "Unexpected end of file. Expected inhead-noscript content", "char-in-head-noscript": "Unexpected non-space character. Expected inhead-noscript content", "XXX-undefined-error": "Undefined error (this sucks and should be fixed)", } namespaces = { "html": "http://www.w3.org/1999/xhtml", "mathml": "http://www.w3.org/1998/Math/MathML", "svg": "http://www.w3.org/2000/svg", "xlink": "http://www.w3.org/1999/xlink", "xml": "http://www.w3.org/XML/1998/namespace", "xmlns": "http://www.w3.org/2000/xmlns/" } scopingElements = frozenset([ (namespaces["html"], "applet"), (namespaces["html"], "caption"), (namespaces["html"], "html"), (namespaces["html"], "marquee"), (namespaces["html"], "object"), (namespaces["html"], "table"), (namespaces["html"], "td"), (namespaces["html"], "th"), (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext"), (namespaces["mathml"], "annotation-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title"), ]) formattingElements = frozenset([ (namespaces["html"], "a"), (namespaces["html"], "b"), (namespaces["html"], "big"), (namespaces["html"], "code"), (namespaces["html"], "em"), (namespaces["html"], "font"), (namespaces["html"], "i"), (namespaces["html"], "nobr"), (namespaces["html"], "s"), (namespaces["html"], "small"), (namespaces["html"], "strike"), (namespaces["html"], "strong"), (namespaces["html"], "tt"), (namespaces["html"], "u") ]) specialElements = frozenset([ (namespaces["html"], "address"), (namespaces["html"], "applet"), (namespaces["html"], "area"), (namespaces["html"], "article"), (namespaces["html"], "aside"), (namespaces["html"], "base"), (namespaces["html"], "basefont"), (namespaces["html"], "bgsound"), (namespaces["html"], "blockquote"), (namespaces["html"], "body"), (namespaces["html"], "br"), (namespaces["html"], "button"), (namespaces["html"], "caption"), (namespaces["html"], "center"), (namespaces["html"], "col"), (namespaces["html"], "colgroup"), (namespaces["html"], "command"), (namespaces["html"], "dd"), (namespaces["html"], "details"), (namespaces["html"], "dir"), (namespaces["html"], "div"), (namespaces["html"], "dl"), (namespaces["html"], "dt"), (namespaces["html"], "embed"), (namespaces["html"], "fieldset"), (namespaces["html"], "figure"), (namespaces["html"], "footer"), (namespaces["html"], "form"), (namespaces["html"], "frame"), (namespaces["html"], "frameset"), (namespaces["html"], "h1"), (namespaces["html"], "h2"), (namespaces["html"], "h3"), (namespaces["html"], "h4"), (namespaces["html"], "h5"), (namespaces["html"], "h6"), (namespaces["html"], "head"), (namespaces["html"], "header"), (namespaces["html"], "hr"), (namespaces["html"], "html"), (namespaces["html"], "iframe"), # Note that image is commented out in the spec as "this isn't an # element that can end up on the stack, so it doesn't matter," (namespaces["html"], "image"), (namespaces["html"], "img"), (namespaces["html"], "input"), (namespaces["html"], "isindex"), (namespaces["html"], "li"), (namespaces["html"], "link"), (namespaces["html"], "listing"), (namespaces["html"], "marquee"), (namespaces["html"], "menu"), (namespaces["html"], "meta"), (namespaces["html"], "nav"), (namespaces["html"], "noembed"), (namespaces["html"], "noframes"), (namespaces["html"], "noscript"), (namespaces["html"], "object"), (namespaces["html"], "ol"), (namespaces["html"], "p"), (namespaces["html"], "param"), (namespaces["html"], "plaintext"), (namespaces["html"], "pre"), (namespaces["html"], "script"), (namespaces["html"], "section"), (namespaces["html"], "select"), (namespaces["html"], "style"), (namespaces["html"], "table"), (namespaces["html"], "tbody"), (namespaces["html"], "td"), (namespaces["html"], "textarea"), (namespaces["html"], "tfoot"), (namespaces["html"], "th"), (namespaces["html"], "thead"), (namespaces["html"], "title"), (namespaces["html"], "tr"), (namespaces["html"], "ul"), (namespaces["html"], "wbr"), (namespaces["html"], "xmp"), (namespaces["svg"], "foreignObject") ]) htmlIntegrationPointElements = frozenset([ (namespaces["mathml"], "annotation-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title") ]) mathmlTextIntegrationPointElements = frozenset([ (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext") ]) adjustSVGAttributes = { "attributename": "attributeName", "attributetype": "attributeType", "basefrequency": "baseFrequency", "baseprofile": "baseProfile", "calcmode": "calcMode", "clippathunits": "clipPathUnits", "contentscripttype": "contentScriptType", "contentstyletype": "contentStyleType", "diffuseconstant": "diffuseConstant", "edgemode": "edgeMode", "externalresourcesrequired": "externalResourcesRequired", "filterres": "filterRes", "filterunits": "filterUnits", "glyphref": "glyphRef", "gradienttransform": "gradientTransform", "gradientunits": "gradientUnits", "kernelmatrix": "kernelMatrix", "kernelunitlength": "kernelUnitLength", "keypoints": "keyPoints", "keysplines": "keySplines", "keytimes": "keyTimes", "lengthadjust": "lengthAdjust", "limitingconeangle": "limitingConeAngle", "markerheight": "markerHeight", "markerunits": "markerUnits", "markerwidth": "markerWidth", "maskcontentunits": "maskContentUnits", "maskunits": "maskUnits", "numoctaves": "numOctaves", "pathlength": "pathLength", "patterncontentunits": "patternContentUnits", "patterntransform": "patternTransform", "patternunits": "patternUnits", "pointsatx": "pointsAtX", "pointsaty": "pointsAtY", "pointsatz": "pointsAtZ", "preservealpha": "preserveAlpha", "preserveaspectratio": "preserveAspectRatio", "primitiveunits": "primitiveUnits", "refx": "refX", "refy": "refY", "repeatcount": "repeatCount", "repeatdur": "repeatDur", "requiredextensions": "requiredExtensions", "requiredfeatures": "requiredFeatures", "specularconstant": "specularConstant", "specularexponent": "specularExponent", "spreadmethod": "spreadMethod", "startoffset": "startOffset", "stddeviation": "stdDeviation", "stitchtiles": "stitchTiles", "surfacescale": "surfaceScale", "systemlanguage": "systemLanguage", "tablevalues": "tableValues", "targetx": "targetX", "targety": "targetY", "textlength": "textLength", "viewbox": "viewBox", "viewtarget": "viewTarget", "xchannelselector": "xChannelSelector", "ychannelselector": "yChannelSelector", "zoomandpan": "zoomAndPan" } adjustMathMLAttributes = {"definitionurl": "definitionURL"} adjustForeignAttributes = { "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), "xlink:href": ("xlink", "href", namespaces["xlink"]), "xlink:role": ("xlink", "role", namespaces["xlink"]), "xlink:show": ("xlink", "show", namespaces["xlink"]), "xlink:title": ("xlink", "title", namespaces["xlink"]), "xlink:type": ("xlink", "type", namespaces["xlink"]), "xml:base": ("xml", "base", namespaces["xml"]), "xml:lang": ("xml", "lang", namespaces["xml"]), "xml:space": ("xml", "space", namespaces["xml"]), "xmlns": (None, "xmlns", namespaces["xmlns"]), "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) } unadjustForeignAttributes = {(ns, local): qname for qname, (prefix, local, ns) in adjustForeignAttributes.items()} spaceCharacters = frozenset([ "\t", "\n", "\u000C", " ", "\r" ]) tableInsertModeElements = frozenset([ "table", "tbody", "tfoot", "thead", "tr" ]) asciiLowercase = frozenset(string.ascii_lowercase) asciiUppercase = frozenset(string.ascii_uppercase) asciiLetters = frozenset(string.ascii_letters) digits = frozenset(string.digits) hexDigits = frozenset(string.hexdigits) asciiUpper2Lower = {ord(c): ord(c.lower()) for c in string.ascii_uppercase} # Heading elements need to be ordered headingElements = ( "h1", "h2", "h3", "h4", "h5", "h6" ) voidElements = frozenset([ "base", "command", "event-source", "link", "meta", "hr", "br", "img", "embed", "param", "area", "col", "input", "source", "track" ]) cdataElements = frozenset(['title', 'textarea']) rcdataElements = frozenset([ 'style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript' ]) booleanAttributes = { "": frozenset(["irrelevant", "itemscope"]), "style": frozenset(["scoped"]), "img": frozenset(["ismap"]), "audio": frozenset(["autoplay", "controls"]), "video": frozenset(["autoplay", "controls"]), "script": frozenset(["defer", "async"]), "details": frozenset(["open"]), "datagrid": frozenset(["multiple", "disabled"]), "command": frozenset(["hidden", "disabled", "checked", "default"]), "hr": frozenset(["noshade"]), "menu": frozenset(["autosubmit"]), "fieldset": frozenset(["disabled", "readonly"]), "option": frozenset(["disabled", "readonly", "selected"]), "optgroup": frozenset(["disabled", "readonly"]), "button": frozenset(["disabled", "autofocus"]), "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), "output": frozenset(["disabled", "readonly"]), "iframe": frozenset(["seamless"]), } # entitiesWindows1252 has to be _ordered_ and needs to have an index. It # therefore can't be a frozenset. entitiesWindows1252 = ( 8364, # 0x80 0x20AC EURO SIGN 65533, # 0x81 UNDEFINED 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS 8224, # 0x86 0x2020 DAGGER 8225, # 0x87 0x2021 DOUBLE DAGGER 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT 8240, # 0x89 0x2030 PER MILLE SIGN 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE 65533, # 0x8D UNDEFINED 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON 65533, # 0x8F UNDEFINED 65533, # 0x90 UNDEFINED 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK 8226, # 0x95 0x2022 BULLET 8211, # 0x96 0x2013 EN DASH 8212, # 0x97 0x2014 EM DASH 732, # 0x98 0x02DC SMALL TILDE 8482, # 0x99 0x2122 TRADE MARK SIGN 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE 65533, # 0x9D UNDEFINED 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS ) xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;']) entities = { "AElig": "\xc6", "AElig;": "\xc6", "AMP": "&", "AMP;": "&", "Aacute": "\xc1", "Aacute;": "\xc1", "Abreve;": "\u0102", "Acirc": "\xc2", "Acirc;": "\xc2", "Acy;": "\u0410", "Afr;": "\U0001d504", "Agrave": "\xc0", "Agrave;": "\xc0", "Alpha;": "\u0391", "Amacr;": "\u0100", "And;": "\u2a53", "Aogon;": "\u0104", "Aopf;": "\U0001d538", "ApplyFunction;": "\u2061", "Aring": "\xc5", "Aring;": "\xc5", "Ascr;": "\U0001d49c", "Assign;": "\u2254", "Atilde": "\xc3", "Atilde;": "\xc3", "Auml": "\xc4", "Auml;": "\xc4", "Backslash;": "\u2216", "Barv;": "\u2ae7", "Barwed;": "\u2306", "Bcy;": "\u0411", "Because;": "\u2235", "Bernoullis;": "\u212c", "Beta;": "\u0392", "Bfr;": "\U0001d505", "Bopf;": "\U0001d539", "Breve;": "\u02d8", "Bscr;": "\u212c", "Bumpeq;": "\u224e", "CHcy;": "\u0427", "COPY": "\xa9", "COPY;": "\xa9", "Cacute;": "\u0106", "Cap;": "\u22d2", "CapitalDifferentialD;": "\u2145", "Cayleys;": "\u212d", "Ccaron;": "\u010c", "Ccedil": "\xc7", "Ccedil;": "\xc7", "Ccirc;": "\u0108", "Cconint;": "\u2230", "Cdot;": "\u010a", "Cedilla;": "\xb8", "CenterDot;": "\xb7", "Cfr;": "\u212d", "Chi;": "\u03a7", "CircleDot;": "\u2299", "CircleMinus;": "\u2296", "CirclePlus;": "\u2295", "CircleTimes;": "\u2297", "ClockwiseContourIntegral;": "\u2232", "CloseCurlyDoubleQuote;": "\u201d", "CloseCurlyQuote;": "\u2019", "Colon;": "\u2237", "Colone;": "\u2a74", "Congruent;": "\u2261", "Conint;": "\u222f", "ContourIntegral;": "\u222e", "Copf;": "\u2102", "Coproduct;": "\u2210", "CounterClockwiseContourIntegral;": "\u2233", "Cross;": "\u2a2f", "Cscr;": "\U0001d49e", "Cup;": "\u22d3", "CupCap;": "\u224d", "DD;": "\u2145", "DDotrahd;": "\u2911", "DJcy;": "\u0402", "DScy;": "\u0405", "DZcy;": "\u040f", "Dagger;": "\u2021", "Darr;": "\u21a1", "Dashv;": "\u2ae4", "Dcaron;": "\u010e", "Dcy;": "\u0414", "Del;": "\u2207", "Delta;": "\u0394", "Dfr;": "\U0001d507", "DiacriticalAcute;": "\xb4", "DiacriticalDot;": "\u02d9", "DiacriticalDoubleAcute;": "\u02dd", "DiacriticalGrave;": "`", "DiacriticalTilde;": "\u02dc", "Diamond;": "\u22c4", "DifferentialD;": "\u2146", "Dopf;": "\U0001d53b", "Dot;": "\xa8", "DotDot;": "\u20dc", "DotEqual;": "\u2250", "DoubleContourIntegral;": "\u222f", "DoubleDot;": "\xa8", "DoubleDownArrow;": "\u21d3", "DoubleLeftArrow;": "\u21d0", "DoubleLeftRightArrow;": "\u21d4", "DoubleLeftTee;": "\u2ae4", "DoubleLongLeftArrow;": "\u27f8", "DoubleLongLeftRightArrow;": "\u27fa", "DoubleLongRightArrow;": "\u27f9", "DoubleRightArrow;": "\u21d2", "DoubleRightTee;": "\u22a8", "DoubleUpArrow;": "\u21d1", "DoubleUpDownArrow;": "\u21d5", "DoubleVerticalBar;": "\u2225", "DownArrow;": "\u2193", "DownArrowBar;": "\u2913", "DownArrowUpArrow;": "\u21f5", "DownBreve;": "\u0311", "DownLeftRightVector;": "\u2950", "DownLeftTeeVector;": "\u295e", "DownLeftVector;": "\u21bd", "DownLeftVectorBar;": "\u2956", "DownRightTeeVector;": "\u295f", "DownRightVector;": "\u21c1", "DownRightVectorBar;": "\u2957", "DownTee;": "\u22a4", "DownTeeArrow;": "\u21a7", "Downarrow;": "\u21d3", "Dscr;": "\U0001d49f", "Dstrok;": "\u0110", "ENG;": "\u014a", "ETH": "\xd0", "ETH;": "\xd0", "Eacute": "\xc9", "Eacute;": "\xc9", "Ecaron;": "\u011a", "Ecirc": "\xca", "Ecirc;": "\xca", "Ecy;": "\u042d", "Edot;": "\u0116", "Efr;": "\U0001d508", "Egrave": "\xc8", "Egrave;": "\xc8", "Element;": "\u2208", "Emacr;": "\u0112", "EmptySmallSquare;": "\u25fb", "EmptyVerySmallSquare;": "\u25ab", "Eogon;": "\u0118", "Eopf;": "\U0001d53c", "Epsilon;": "\u0395", "Equal;": "\u2a75", "EqualTilde;": "\u2242", "Equilibrium;": "\u21cc", "Escr;": "\u2130", "Esim;": "\u2a73", "Eta;": "\u0397", "Euml": "\xcb", "Euml;": "\xcb", "Exists;": "\u2203", "ExponentialE;": "\u2147", "Fcy;": "\u0424", "Ffr;": "\U0001d509", "FilledSmallSquare;": "\u25fc", "FilledVerySmallSquare;": "\u25aa", "Fopf;": "\U0001d53d", "ForAll;": "\u2200", "Fouriertrf;": "\u2131", "Fscr;": "\u2131", "GJcy;": "\u0403", "GT": ">", "GT;": ">", "Gamma;": "\u0393", "Gammad;": "\u03dc", "Gbreve;": "\u011e", "Gcedil;": "\u0122", "Gcirc;": "\u011c", "Gcy;": "\u0413", "Gdot;": "\u0120", "Gfr;": "\U0001d50a", "Gg;": "\u22d9", "Gopf;": "\U0001d53e", "GreaterEqual;": "\u2265", "GreaterEqualLess;": "\u22db", "GreaterFullEqual;": "\u2267", "GreaterGreater;": "\u2aa2", "GreaterLess;": "\u2277", "GreaterSlantEqual;": "\u2a7e", "GreaterTilde;": "\u2273", "Gscr;": "\U0001d4a2", "Gt;": "\u226b", "HARDcy;": "\u042a", "Hacek;": "\u02c7", "Hat;": "^", "Hcirc;": "\u0124", "Hfr;": "\u210c", "HilbertSpace;": "\u210b", "Hopf;": "\u210d", "HorizontalLine;": "\u2500", "Hscr;": "\u210b", "Hstrok;": "\u0126", "HumpDownHump;": "\u224e", "HumpEqual;": "\u224f", "IEcy;": "\u0415", "IJlig;": "\u0132", "IOcy;": "\u0401", "Iacute": "\xcd", "Iacute;": "\xcd", "Icirc": "\xce", "Icirc;": "\xce", "Icy;": "\u0418", "Idot;": "\u0130", "Ifr;": "\u2111", "Igrave": "\xcc", "Igrave;": "\xcc", "Im;": "\u2111", "Imacr;": "\u012a", "ImaginaryI;": "\u2148", "Implies;": "\u21d2", "Int;": "\u222c", "Integral;": "\u222b", "Intersection;": "\u22c2", "InvisibleComma;": "\u2063", "InvisibleTimes;": "\u2062", "Iogon;": "\u012e", "Iopf;": "\U0001d540", "Iota;": "\u0399", "Iscr;": "\u2110", "Itilde;": "\u0128", "Iukcy;": "\u0406", "Iuml": "\xcf", "Iuml;": "\xcf", "Jcirc;": "\u0134", "Jcy;": "\u0419", "Jfr;": "\U0001d50d", "Jopf;": "\U0001d541", "Jscr;": "\U0001d4a5", "Jsercy;": "\u0408", "Jukcy;": "\u0404", "KHcy;": "\u0425", "KJcy;": "\u040c", "Kappa;": "\u039a", "Kcedil;": "\u0136", "Kcy;": "\u041a", "Kfr;": "\U0001d50e", "Kopf;": "\U0001d542", "Kscr;": "\U0001d4a6", "LJcy;": "\u0409", "LT": "<", "LT;": "<", "Lacute;": "\u0139", "Lambda;": "\u039b", "Lang;": "\u27ea", "Laplacetrf;": "\u2112", "Larr;": "\u219e", "Lcaron;": "\u013d", "Lcedil;": "\u013b", "Lcy;": "\u041b", "LeftAngleBracket;": "\u27e8", "LeftArrow;": "\u2190", "LeftArrowBar;": "\u21e4", "LeftArrowRightArrow;": "\u21c6", "LeftCeiling;": "\u2308", "LeftDoubleBracket;": "\u27e6", "LeftDownTeeVector;": "\u2961", "LeftDownVector;": "\u21c3", "LeftDownVectorBar;": "\u2959", "LeftFloor;": "\u230a", "LeftRightArrow;": "\u2194", "LeftRightVector;": "\u294e", "LeftTee;": "\u22a3", "LeftTeeArrow;": "\u21a4", "LeftTeeVector;": "\u295a", "LeftTriangle;": "\u22b2", "LeftTriangleBar;": "\u29cf", "LeftTriangleEqual;": "\u22b4", "LeftUpDownVector;": "\u2951", "LeftUpTeeVector;": "\u2960", "LeftUpVector;": "\u21bf", "LeftUpVectorBar;": "\u2958", "LeftVector;": "\u21bc", "LeftVectorBar;": "\u2952", "Leftarrow;": "\u21d0", "Leftrightarrow;": "\u21d4", "LessEqualGreater;": "\u22da", "LessFullEqual;": "\u2266", "LessGreater;": "\u2276", "LessLess;": "\u2aa1", "LessSlantEqual;": "\u2a7d", "LessTilde;": "\u2272", "Lfr;": "\U0001d50f", "Ll;": "\u22d8", "Lleftarrow;": "\u21da", "Lmidot;": "\u013f", "LongLeftArrow;": "\u27f5", "LongLeftRightArrow;": "\u27f7", "LongRightArrow;": "\u27f6", "Longleftarrow;": "\u27f8", "Longleftrightarrow;": "\u27fa", "Longrightarrow;": "\u27f9", "Lopf;": "\U0001d543", "LowerLeftArrow;": "\u2199", "LowerRightArrow;": "\u2198", "Lscr;": "\u2112", "Lsh;": "\u21b0", "Lstrok;": "\u0141", "Lt;": "\u226a", "Map;": "\u2905", "Mcy;": "\u041c", "MediumSpace;": "\u205f", "Mellintrf;": "\u2133", "Mfr;": "\U0001d510", "MinusPlus;": "\u2213", "Mopf;": "\U0001d544", "Mscr;": "\u2133", "Mu;": "\u039c", "NJcy;": "\u040a", "Nacute;": "\u0143", "Ncaron;": "\u0147", "Ncedil;": "\u0145", "Ncy;": "\u041d", "NegativeMediumSpace;": "\u200b", "NegativeThickSpace;": "\u200b", "NegativeThinSpace;": "\u200b", "NegativeVeryThinSpace;": "\u200b", "NestedGreaterGreater;": "\u226b", "NestedLessLess;": "\u226a", "NewLine;": "\n", "Nfr;": "\U0001d511", "NoBreak;": "\u2060", "NonBreakingSpace;": "\xa0", "Nopf;": "\u2115", "Not;": "\u2aec", "NotCongruent;": "\u2262", "NotCupCap;": "\u226d", "NotDoubleVerticalBar;": "\u2226", "NotElement;": "\u2209", "NotEqual;": "\u2260", "NotEqualTilde;": "\u2242\u0338", "NotExists;": "\u2204", "NotGreater;": "\u226f", "NotGreaterEqual;": "\u2271", "NotGreaterFullEqual;": "\u2267\u0338", "NotGreaterGreater;": "\u226b\u0338", "NotGreaterLess;": "\u2279", "NotGreaterSlantEqual;": "\u2a7e\u0338", "NotGreaterTilde;": "\u2275", "NotHumpDownHump;": "\u224e\u0338", "NotHumpEqual;": "\u224f\u0338", "NotLeftTriangle;": "\u22ea", "NotLeftTriangleBar;": "\u29cf\u0338", "NotLeftTriangleEqual;": "\u22ec", "NotLess;": "\u226e", "NotLessEqual;": "\u2270", "NotLessGreater;": "\u2278", "NotLessLess;": "\u226a\u0338", "NotLessSlantEqual;": "\u2a7d\u0338", "NotLessTilde;": "\u2274", "NotNestedGreaterGreater;": "\u2aa2\u0338", "NotNestedLessLess;": "\u2aa1\u0338", "NotPrecedes;": "\u2280", "NotPrecedesEqual;": "\u2aaf\u0338", "NotPrecedesSlantEqual;": "\u22e0", "NotReverseElement;": "\u220c", "NotRightTriangle;": "\u22eb", "NotRightTriangleBar;": "\u29d0\u0338", "NotRightTriangleEqual;": "\u22ed", "NotSquareSubset;": "\u228f\u0338", "NotSquareSubsetEqual;": "\u22e2", "NotSquareSuperset;": "\u2290\u0338", "NotSquareSupersetEqual;": "\u22e3", "NotSubset;": "\u2282\u20d2", "NotSubsetEqual;": "\u2288", "NotSucceeds;": "\u2281", "NotSucceedsEqual;": "\u2ab0\u0338", "NotSucceedsSlantEqual;": "\u22e1", "NotSucceedsTilde;": "\u227f\u0338", "NotSuperset;": "\u2283\u20d2", "NotSupersetEqual;": "\u2289", "NotTilde;": "\u2241", "NotTildeEqual;": "\u2244", "NotTildeFullEqual;": "\u2247", "NotTildeTilde;": "\u2249", "NotVerticalBar;": "\u2224", "Nscr;": "\U0001d4a9", "Ntilde": "\xd1", "Ntilde;": "\xd1", "Nu;": "\u039d", "OElig;": "\u0152", "Oacute": "\xd3", "Oacute;": "\xd3", "Ocirc": "\xd4", "Ocirc;": "\xd4", "Ocy;": "\u041e", "Odblac;": "\u0150", "Ofr;": "\U0001d512", "Ograve": "\xd2", "Ograve;": "\xd2", "Omacr;": "\u014c", "Omega;": "\u03a9", "Omicron;": "\u039f", "Oopf;": "\U0001d546", "OpenCurlyDoubleQuote;": "\u201c", "OpenCurlyQuote;": "\u2018", "Or;": "\u2a54", "Oscr;": "\U0001d4aa", "Oslash": "\xd8", "Oslash;": "\xd8", "Otilde": "\xd5", "Otilde;": "\xd5", "Otimes;": "\u2a37", "Ouml": "\xd6", "Ouml;": "\xd6", "OverBar;": "\u203e", "OverBrace;": "\u23de", "OverBracket;": "\u23b4", "OverParenthesis;": "\u23dc", "PartialD;": "\u2202", "Pcy;": "\u041f", "Pfr;": "\U0001d513", "Phi;": "\u03a6", "Pi;": "\u03a0", "PlusMinus;": "\xb1", "Poincareplane;": "\u210c", "Popf;": "\u2119", "Pr;": "\u2abb", "Precedes;": "\u227a", "PrecedesEqual;": "\u2aaf", "PrecedesSlantEqual;": "\u227c", "PrecedesTilde;": "\u227e", "Prime;": "\u2033", "Product;": "\u220f", "Proportion;": "\u2237", "Proportional;": "\u221d", "Pscr;": "\U0001d4ab", "Psi;": "\u03a8", "QUOT": "\"", "QUOT;": "\"", "Qfr;": "\U0001d514", "Qopf;": "\u211a", "Qscr;": "\U0001d4ac", "RBarr;": "\u2910", "REG": "\xae", "REG;": "\xae", "Racute;": "\u0154", "Rang;": "\u27eb", "Rarr;": "\u21a0", "Rarrtl;": "\u2916", "Rcaron;": "\u0158", "Rcedil;": "\u0156", "Rcy;": "\u0420", "Re;": "\u211c", "ReverseElement;": "\u220b", "ReverseEquilibrium;": "\u21cb", "ReverseUpEquilibrium;": "\u296f", "Rfr;": "\u211c", "Rho;": "\u03a1", "RightAngleBracket;": "\u27e9", "RightArrow;": "\u2192", "RightArrowBar;": "\u21e5", "RightArrowLeftArrow;": "\u21c4", "RightCeiling;": "\u2309", "RightDoubleBracket;": "\u27e7", "RightDownTeeVector;": "\u295d", "RightDownVector;": "\u21c2", "RightDownVectorBar;": "\u2955", "RightFloor;": "\u230b", "RightTee;": "\u22a2", "RightTeeArrow;": "\u21a6", "RightTeeVector;": "\u295b", "RightTriangle;": "\u22b3", "RightTriangleBar;": "\u29d0", "RightTriangleEqual;": "\u22b5", "RightUpDownVector;": "\u294f", "RightUpTeeVector;": "\u295c", "RightUpVector;": "\u21be", "RightUpVectorBar;": "\u2954", "RightVector;": "\u21c0", "RightVectorBar;": "\u2953", "Rightarrow;": "\u21d2", "Ropf;": "\u211d", "RoundImplies;": "\u2970", "Rrightarrow;": "\u21db", "Rscr;": "\u211b", "Rsh;": "\u21b1", "RuleDelayed;": "\u29f4", "SHCHcy;": "\u0429", "SHcy;": "\u0428", "SOFTcy;": "\u042c", "Sacute;": "\u015a", "Sc;": "\u2abc", "Scaron;": "\u0160", "Scedil;": "\u015e", "Scirc;": "\u015c", "Scy;": "\u0421", "Sfr;": "\U0001d516", "ShortDownArrow;": "\u2193", "ShortLeftArrow;": "\u2190", "ShortRightArrow;": "\u2192", "ShortUpArrow;": "\u2191", "Sigma;": "\u03a3", "SmallCircle;": "\u2218", "Sopf;": "\U0001d54a", "Sqrt;": "\u221a", "Square;": "\u25a1", "SquareIntersection;": "\u2293", "SquareSubset;": "\u228f", "SquareSubsetEqual;": "\u2291", "SquareSuperset;": "\u2290", "SquareSupersetEqual;": "\u2292", "SquareUnion;": "\u2294", "Sscr;": "\U0001d4ae", "Star;": "\u22c6", "Sub;": "\u22d0", "Subset;": "\u22d0", "SubsetEqual;": "\u2286", "Succeeds;": "\u227b", "SucceedsEqual;": "\u2ab0", "SucceedsSlantEqual;": "\u227d", "SucceedsTilde;": "\u227f", "SuchThat;": "\u220b", "Sum;": "\u2211", "Sup;": "\u22d1", "Superset;": "\u2283", "SupersetEqual;": "\u2287", "Supset;": "\u22d1", "THORN": "\xde", "THORN;": "\xde", "TRADE;": "\u2122", "TSHcy;": "\u040b", "TScy;": "\u0426", "Tab;": "\t", "Tau;": "\u03a4", "Tcaron;": "\u0164", "Tcedil;": "\u0162", "Tcy;": "\u0422", "Tfr;": "\U0001d517", "Therefore;": "\u2234", "Theta;": "\u0398", "ThickSpace;": "\u205f\u200a", "ThinSpace;": "\u2009", "Tilde;": "\u223c", "TildeEqual;": "\u2243", "TildeFullEqual;": "\u2245", "TildeTilde;": "\u2248", "Topf;": "\U0001d54b", "TripleDot;": "\u20db", "Tscr;": "\U0001d4af", "Tstrok;": "\u0166", "Uacute": "\xda", "Uacute;": "\xda", "Uarr;": "\u219f", "Uarrocir;": "\u2949", "Ubrcy;": "\u040e", "Ubreve;": "\u016c", "Ucirc": "\xdb", "Ucirc;": "\xdb", "Ucy;": "\u0423", "Udblac;": "\u0170", "Ufr;": "\U0001d518", "Ugrave": "\xd9", "Ugrave;": "\xd9", "Umacr;": "\u016a", "UnderBar;": "_", "UnderBrace;": "\u23df", "UnderBracket;": "\u23b5", "UnderParenthesis;": "\u23dd", "Union;": "\u22c3", "UnionPlus;": "\u228e", "Uogon;": "\u0172", "Uopf;": "\U0001d54c", "UpArrow;": "\u2191", "UpArrowBar;": "\u2912", "UpArrowDownArrow;": "\u21c5", "UpDownArrow;": "\u2195", "UpEquilibrium;": "\u296e", "UpTee;": "\u22a5", "UpTeeArrow;": "\u21a5", "Uparrow;": "\u21d1", "Updownarrow;": "\u21d5", "UpperLeftArrow;": "\u2196", "UpperRightArrow;": "\u2197", "Upsi;": "\u03d2", "Upsilon;": "\u03a5", "Uring;": "\u016e", "Uscr;": "\U0001d4b0", "Utilde;": "\u0168", "Uuml": "\xdc", "Uuml;": "\xdc", "VDash;": "\u22ab", "Vbar;": "\u2aeb", "Vcy;": "\u0412", "Vdash;": "\u22a9", "Vdashl;": "\u2ae6", "Vee;": "\u22c1", "Verbar;": "\u2016", "Vert;": "\u2016", "VerticalBar;": "\u2223", "VerticalLine;": "|", "VerticalSeparator;": "\u2758", "VerticalTilde;": "\u2240", "VeryThinSpace;": "\u200a", "Vfr;": "\U0001d519", "Vopf;": "\U0001d54d", "Vscr;": "\U0001d4b1", "Vvdash;": "\u22aa", "Wcirc;": "\u0174", "Wedge;": "\u22c0", "Wfr;": "\U0001d51a", "Wopf;": "\U0001d54e", "Wscr;": "\U0001d4b2", "Xfr;": "\U0001d51b", "Xi;": "\u039e", "Xopf;": "\U0001d54f", "Xscr;": "\U0001d4b3", "YAcy;": "\u042f", "YIcy;": "\u0407", "YUcy;": "\u042e", "Yacute": "\xdd", "Yacute;": "\xdd", "Ycirc;": "\u0176", "Ycy;": "\u042b", "Yfr;": "\U0001d51c", "Yopf;": "\U0001d550", "Yscr;": "\U0001d4b4", "Yuml;": "\u0178", "ZHcy;": "\u0416", "Zacute;": "\u0179", "Zcaron;": "\u017d", "Zcy;": "\u0417", "Zdot;": "\u017b", "ZeroWidthSpace;": "\u200b", "Zeta;": "\u0396", "Zfr;": "\u2128", "Zopf;": "\u2124", "Zscr;": "\U0001d4b5", "aacute": "\xe1", "aacute;": "\xe1", "abreve;": "\u0103", "ac;": "\u223e", "acE;": "\u223e\u0333", "acd;": "\u223f", "acirc": "\xe2", "acirc;": "\xe2", "acute": "\xb4", "acute;": "\xb4", "acy;": "\u0430", "aelig": "\xe6", "aelig;": "\xe6", "af;": "\u2061", "afr;": "\U0001d51e", "agrave": "\xe0", "agrave;": "\xe0", "alefsym;": "\u2135", "aleph;": "\u2135", "alpha;": "\u03b1", "amacr;": "\u0101", "amalg;": "\u2a3f", "amp": "&", "amp;": "&", "and;": "\u2227", "andand;": "\u2a55", "andd;": "\u2a5c", "andslope;": "\u2a58", "andv;": "\u2a5a", "ang;": "\u2220", "ange;": "\u29a4", "angle;": "\u2220", "angmsd;": "\u2221", "angmsdaa;": "\u29a8", "angmsdab;": "\u29a9", "angmsdac;": "\u29aa", "angmsdad;": "\u29ab", "angmsdae;": "\u29ac", "angmsdaf;": "\u29ad", "angmsdag;": "\u29ae", "angmsdah;": "\u29af", "angrt;": "\u221f", "angrtvb;": "\u22be", "angrtvbd;": "\u299d", "angsph;": "\u2222", "angst;": "\xc5", "angzarr;": "\u237c", "aogon;": "\u0105", "aopf;": "\U0001d552", "ap;": "\u2248", "apE;": "\u2a70", "apacir;": "\u2a6f", "ape;": "\u224a", "apid;": "\u224b", "apos;": "'", "approx;": "\u2248", "approxeq;": "\u224a", "aring": "\xe5", "aring;": "\xe5", "ascr;": "\U0001d4b6", "ast;": "*", "asymp;": "\u2248", "asympeq;": "\u224d", "atilde": "\xe3", "atilde;": "\xe3", "auml": "\xe4", "auml;": "\xe4", "awconint;": "\u2233", "awint;": "\u2a11", "bNot;": "\u2aed", "backcong;": "\u224c", "backepsilon;": "\u03f6", "backprime;": "\u2035", "backsim;": "\u223d", "backsimeq;": "\u22cd", "barvee;": "\u22bd", "barwed;": "\u2305", "barwedge;": "\u2305", "bbrk;": "\u23b5", "bbrktbrk;": "\u23b6", "bcong;": "\u224c", "bcy;": "\u0431", "bdquo;": "\u201e", "becaus;": "\u2235", "because;": "\u2235", "bemptyv;": "\u29b0", "bepsi;": "\u03f6", "bernou;": "\u212c", "beta;": "\u03b2", "beth;": "\u2136", "between;": "\u226c", "bfr;": "\U0001d51f", "bigcap;": "\u22c2", "bigcirc;": "\u25ef", "bigcup;": "\u22c3", "bigodot;": "\u2a00", "bigoplus;": "\u2a01", "bigotimes;": "\u2a02", "bigsqcup;": "\u2a06", "bigstar;": "\u2605", "bigtriangledown;": "\u25bd", "bigtriangleup;": "\u25b3", "biguplus;": "\u2a04", "bigvee;": "\u22c1", "bigwedge;": "\u22c0", "bkarow;": "\u290d", "blacklozenge;": "\u29eb", "blacksquare;": "\u25aa", "blacktriangle;": "\u25b4", "blacktriangledown;": "\u25be", "blacktriangleleft;": "\u25c2", "blacktriangleright;": "\u25b8", "blank;": "\u2423", "blk12;": "\u2592", "blk14;": "\u2591", "blk34;": "\u2593", "block;": "\u2588", "bne;": "=\u20e5", "bnequiv;": "\u2261\u20e5", "bnot;": "\u2310", "bopf;": "\U0001d553", "bot;": "\u22a5", "bottom;": "\u22a5", "bowtie;": "\u22c8", "boxDL;": "\u2557", "boxDR;": "\u2554", "boxDl;": "\u2556", "boxDr;": "\u2553", "boxH;": "\u2550", "boxHD;": "\u2566", "boxHU;": "\u2569", "boxHd;": "\u2564", "boxHu;": "\u2567", "boxUL;": "\u255d", "boxUR;": "\u255a", "boxUl;": "\u255c", "boxUr;": "\u2559", "boxV;": "\u2551", "boxVH;": "\u256c", "boxVL;": "\u2563", "boxVR;": "\u2560", "boxVh;": "\u256b", "boxVl;": "\u2562", "boxVr;": "\u255f", "boxbox;": "\u29c9", "boxdL;": "\u2555", "boxdR;": "\u2552", "boxdl;": "\u2510", "boxdr;": "\u250c", "boxh;": "\u2500", "boxhD;": "\u2565", "boxhU;": "\u2568", "boxhd;": "\u252c", "boxhu;": "\u2534", "boxminus;": "\u229f", "boxplus;": "\u229e", "boxtimes;": "\u22a0", "boxuL;": "\u255b", "boxuR;": "\u2558", "boxul;": "\u2518", "boxur;": "\u2514", "boxv;": "\u2502", "boxvH;": "\u256a", "boxvL;": "\u2561", "boxvR;": "\u255e", "boxvh;": "\u253c", "boxvl;": "\u2524", "boxvr;": "\u251c", "bprime;": "\u2035", "breve;": "\u02d8", "brvbar": "\xa6", "brvbar;": "\xa6", "bscr;": "\U0001d4b7", "bsemi;": "\u204f", "bsim;": "\u223d", "bsime;": "\u22cd", "bsol;": "\\", "bsolb;": "\u29c5", "bsolhsub;": "\u27c8", "bull;": "\u2022", "bullet;": "\u2022", "bump;": "\u224e", "bumpE;": "\u2aae", "bumpe;": "\u224f", "bumpeq;": "\u224f", "cacute;": "\u0107", "cap;": "\u2229", "capand;": "\u2a44", "capbrcup;": "\u2a49", "capcap;": "\u2a4b", "capcup;": "\u2a47", "capdot;": "\u2a40", "caps;": "\u2229\ufe00", "caret;": "\u2041", "caron;": "\u02c7", "ccaps;": "\u2a4d", "ccaron;": "\u010d", "ccedil": "\xe7", "ccedil;": "\xe7", "ccirc;": "\u0109", "ccups;": "\u2a4c", "ccupssm;": "\u2a50", "cdot;": "\u010b", "cedil": "\xb8", "cedil;": "\xb8", "cemptyv;": "\u29b2", "cent": "\xa2", "cent;": "\xa2", "centerdot;": "\xb7", "cfr;": "\U0001d520", "chcy;": "\u0447", "check;": "\u2713", "checkmark;": "\u2713", "chi;": "\u03c7", "cir;": "\u25cb", "cirE;": "\u29c3", "circ;": "\u02c6", "circeq;": "\u2257", "circlearrowleft;": "\u21ba", "circlearrowright;": "\u21bb", "circledR;": "\xae", "circledS;": "\u24c8", "circledast;": "\u229b", "circledcirc;": "\u229a", "circleddash;": "\u229d", "cire;": "\u2257", "cirfnint;": "\u2a10", "cirmid;": "\u2aef", "cirscir;": "\u29c2", "clubs;": "\u2663", "clubsuit;": "\u2663", "colon;": ":", "colone;": "\u2254", "coloneq;": "\u2254", "comma;": ",", "commat;": "@", "comp;": "\u2201", "compfn;": "\u2218", "complement;": "\u2201", "complexes;": "\u2102", "cong;": "\u2245", "congdot;": "\u2a6d", "conint;": "\u222e", "copf;": "\U0001d554", "coprod;": "\u2210", "copy": "\xa9", "copy;": "\xa9", "copysr;": "\u2117", "crarr;": "\u21b5", "cross;": "\u2717", "cscr;": "\U0001d4b8", "csub;": "\u2acf", "csube;": "\u2ad1", "csup;": "\u2ad0", "csupe;": "\u2ad2", "ctdot;": "\u22ef", "cudarrl;": "\u2938", "cudarrr;": "\u2935", "cuepr;": "\u22de", "cuesc;": "\u22df", "cularr;": "\u21b6", "cularrp;": "\u293d", "cup;": "\u222a", "cupbrcap;": "\u2a48", "cupcap;": "\u2a46", "cupcup;": "\u2a4a", "cupdot;": "\u228d", "cupor;": "\u2a45", "cups;": "\u222a\ufe00", "curarr;": "\u21b7", "curarrm;": "\u293c", "curlyeqprec;": "\u22de", "curlyeqsucc;": "\u22df", "curlyvee;": "\u22ce", "curlywedge;": "\u22cf", "curren": "\xa4", "curren;": "\xa4", "curvearrowleft;": "\u21b6", "curvearrowright;": "\u21b7", "cuvee;": "\u22ce", "cuwed;": "\u22cf", "cwconint;": "\u2232", "cwint;": "\u2231", "cylcty;": "\u232d", "dArr;": "\u21d3", "dHar;": "\u2965", "dagger;": "\u2020", "daleth;": "\u2138", "darr;": "\u2193", "dash;": "\u2010", "dashv;": "\u22a3", "dbkarow;": "\u290f", "dblac;": "\u02dd", "dcaron;": "\u010f", "dcy;": "\u0434", "dd;": "\u2146", "ddagger;": "\u2021", "ddarr;": "\u21ca", "ddotseq;": "\u2a77", "deg": "\xb0", "deg;": "\xb0", "delta;": "\u03b4", "demptyv;": "\u29b1", "dfisht;": "\u297f", "dfr;": "\U0001d521", "dharl;": "\u21c3", "dharr;": "\u21c2", "diam;": "\u22c4", "diamond;": "\u22c4", "diamondsuit;": "\u2666", "diams;": "\u2666", "die;": "\xa8", "digamma;": "\u03dd", "disin;": "\u22f2", "div;": "\xf7", "divide": "\xf7", "divide;": "\xf7", "divideontimes;": "\u22c7", "divonx;": "\u22c7", "djcy;": "\u0452", "dlcorn;": "\u231e", "dlcrop;": "\u230d", "dollar;": "$", "dopf;": "\U0001d555", "dot;": "\u02d9", "doteq;": "\u2250", "doteqdot;": "\u2251", "dotminus;": "\u2238", "dotplus;": "\u2214", "dotsquare;": "\u22a1", "doublebarwedge;": "\u2306", "downarrow;": "\u2193", "downdownarrows;": "\u21ca", "downharpoonleft;": "\u21c3", "downharpoonright;": "\u21c2", "drbkarow;": "\u2910", "drcorn;": "\u231f", "drcrop;": "\u230c", "dscr;": "\U0001d4b9", "dscy;": "\u0455", "dsol;": "\u29f6", "dstrok;": "\u0111", "dtdot;": "\u22f1", "dtri;": "\u25bf", "dtrif;": "\u25be", "duarr;": "\u21f5", "duhar;": "\u296f", "dwangle;": "\u29a6", "dzcy;": "\u045f", "dzigrarr;": "\u27ff", "eDDot;": "\u2a77", "eDot;": "\u2251", "eacute": "\xe9", "eacute;": "\xe9", "easter;": "\u2a6e", "ecaron;": "\u011b", "ecir;": "\u2256", "ecirc": "\xea", "ecirc;": "\xea", "ecolon;": "\u2255", "ecy;": "\u044d", "edot;": "\u0117", "ee;": "\u2147", "efDot;": "\u2252", "efr;": "\U0001d522", "eg;": "\u2a9a", "egrave": "\xe8", "egrave;": "\xe8", "egs;": "\u2a96", "egsdot;": "\u2a98", "el;": "\u2a99", "elinters;": "\u23e7", "ell;": "\u2113", "els;": "\u2a95", "elsdot;": "\u2a97", "emacr;": "\u0113", "empty;": "\u2205", "emptyset;": "\u2205", "emptyv;": "\u2205", "emsp13;": "\u2004", "emsp14;": "\u2005", "emsp;": "\u2003", "eng;": "\u014b", "ensp;": "\u2002", "eogon;": "\u0119", "eopf;": "\U0001d556", "epar;": "\u22d5", "eparsl;": "\u29e3", "eplus;": "\u2a71", "epsi;": "\u03b5", "epsilon;": "\u03b5", "epsiv;": "\u03f5", "eqcirc;": "\u2256", "eqcolon;": "\u2255", "eqsim;": "\u2242", "eqslantgtr;": "\u2a96", "eqslantless;": "\u2a95", "equals;": "=", "equest;": "\u225f", "equiv;": "\u2261", "equivDD;": "\u2a78", "eqvparsl;": "\u29e5", "erDot;": "\u2253", "erarr;": "\u2971", "escr;": "\u212f", "esdot;": "\u2250", "esim;": "\u2242", "eta;": "\u03b7", "eth": "\xf0", "eth;": "\xf0", "euml": "\xeb", "euml;": "\xeb", "euro;": "\u20ac", "excl;": "!", "exist;": "\u2203", "expectation;": "\u2130", "exponentiale;": "\u2147", "fallingdotseq;": "\u2252", "fcy;": "\u0444", "female;": "\u2640", "ffilig;": "\ufb03", "fflig;": "\ufb00", "ffllig;": "\ufb04", "ffr;": "\U0001d523", "filig;": "\ufb01", "fjlig;": "fj", "flat;": "\u266d", "fllig;": "\ufb02", "fltns;": "\u25b1", "fnof;": "\u0192", "fopf;": "\U0001d557", "forall;": "\u2200", "fork;": "\u22d4", "forkv;": "\u2ad9", "fpartint;": "\u2a0d", "frac12": "\xbd", "frac12;": "\xbd", "frac13;": "\u2153", "frac14": "\xbc", "frac14;": "\xbc", "frac15;": "\u2155", "frac16;": "\u2159", "frac18;": "\u215b", "frac23;": "\u2154", "frac25;": "\u2156", "frac34": "\xbe", "frac34;": "\xbe", "frac35;": "\u2157", "frac38;": "\u215c", "frac45;": "\u2158", "frac56;": "\u215a", "frac58;": "\u215d", "frac78;": "\u215e", "frasl;": "\u2044", "frown;": "\u2322", "fscr;": "\U0001d4bb", "gE;": "\u2267", "gEl;": "\u2a8c", "gacute;": "\u01f5", "gamma;": "\u03b3", "gammad;": "\u03dd", "gap;": "\u2a86", "gbreve;": "\u011f", "gcirc;": "\u011d", "gcy;": "\u0433", "gdot;": "\u0121", "ge;": "\u2265", "gel;": "\u22db", "geq;": "\u2265", "geqq;": "\u2267", "geqslant;": "\u2a7e", "ges;": "\u2a7e", "gescc;": "\u2aa9", "gesdot;": "\u2a80", "gesdoto;": "\u2a82", "gesdotol;": "\u2a84", "gesl;": "\u22db\ufe00", "gesles;": "\u2a94", "gfr;": "\U0001d524", "gg;": "\u226b", "ggg;": "\u22d9", "gimel;": "\u2137", "gjcy;": "\u0453", "gl;": "\u2277", "glE;": "\u2a92", "gla;": "\u2aa5", "glj;": "\u2aa4", "gnE;": "\u2269", "gnap;": "\u2a8a", "gnapprox;": "\u2a8a", "gne;": "\u2a88", "gneq;": "\u2a88", "gneqq;": "\u2269", "gnsim;": "\u22e7", "gopf;": "\U0001d558", "grave;": "`", "gscr;": "\u210a", "gsim;": "\u2273", "gsime;": "\u2a8e", "gsiml;": "\u2a90", "gt": ">", "gt;": ">", "gtcc;": "\u2aa7", "gtcir;": "\u2a7a", "gtdot;": "\u22d7", "gtlPar;": "\u2995", "gtquest;": "\u2a7c", "gtrapprox;": "\u2a86", "gtrarr;": "\u2978", "gtrdot;": "\u22d7", "gtreqless;": "\u22db", "gtreqqless;": "\u2a8c", "gtrless;": "\u2277", "gtrsim;": "\u2273", "gvertneqq;": "\u2269\ufe00", "gvnE;": "\u2269\ufe00", "hArr;": "\u21d4", "hairsp;": "\u200a", "half;": "\xbd", "hamilt;": "\u210b", "hardcy;": "\u044a", "harr;": "\u2194", "harrcir;": "\u2948", "harrw;": "\u21ad", "hbar;": "\u210f", "hcirc;": "\u0125", "hearts;": "\u2665", "heartsuit;": "\u2665", "hellip;": "\u2026", "hercon;": "\u22b9", "hfr;": "\U0001d525", "hksearow;": "\u2925", "hkswarow;": "\u2926", "hoarr;": "\u21ff", "homtht;": "\u223b", "hookleftarrow;": "\u21a9", "hookrightarrow;": "\u21aa", "hopf;": "\U0001d559", "horbar;": "\u2015", "hscr;": "\U0001d4bd", "hslash;": "\u210f", "hstrok;": "\u0127", "hybull;": "\u2043", "hyphen;": "\u2010", "iacute": "\xed", "iacute;": "\xed", "ic;": "\u2063", "icirc": "\xee", "icirc;": "\xee", "icy;": "\u0438", "iecy;": "\u0435", "iexcl": "\xa1", "iexcl;": "\xa1", "iff;": "\u21d4", "ifr;": "\U0001d526", "igrave": "\xec", "igrave;": "\xec", "ii;": "\u2148", "iiiint;": "\u2a0c", "iiint;": "\u222d", "iinfin;": "\u29dc", "iiota;": "\u2129", "ijlig;": "\u0133", "imacr;": "\u012b", "image;": "\u2111", "imagline;": "\u2110", "imagpart;": "\u2111", "imath;": "\u0131", "imof;": "\u22b7", "imped;": "\u01b5", "in;": "\u2208", "incare;": "\u2105", "infin;": "\u221e", "infintie;": "\u29dd", "inodot;": "\u0131", "int;": "\u222b", "intcal;": "\u22ba", "integers;": "\u2124", "intercal;": "\u22ba", "intlarhk;": "\u2a17", "intprod;": "\u2a3c", "iocy;": "\u0451", "iogon;": "\u012f", "iopf;": "\U0001d55a", "iota;": "\u03b9", "iprod;": "\u2a3c", "iquest": "\xbf", "iquest;": "\xbf", "iscr;": "\U0001d4be", "isin;": "\u2208", "isinE;": "\u22f9", "isindot;": "\u22f5", "isins;": "\u22f4", "isinsv;": "\u22f3", "isinv;": "\u2208", "it;": "\u2062", "itilde;": "\u0129", "iukcy;": "\u0456", "iuml": "\xef", "iuml;": "\xef", "jcirc;": "\u0135", "jcy;": "\u0439", "jfr;": "\U0001d527", "jmath;": "\u0237", "jopf;": "\U0001d55b", "jscr;": "\U0001d4bf", "jsercy;": "\u0458", "jukcy;": "\u0454", "kappa;": "\u03ba", "kappav;": "\u03f0", "kcedil;": "\u0137", "kcy;": "\u043a", "kfr;": "\U0001d528", "kgreen;": "\u0138", "khcy;": "\u0445", "kjcy;": "\u045c", "kopf;": "\U0001d55c", "kscr;": "\U0001d4c0", "lAarr;": "\u21da", "lArr;": "\u21d0", "lAtail;": "\u291b", "lBarr;": "\u290e", "lE;": "\u2266", "lEg;": "\u2a8b", "lHar;": "\u2962", "lacute;": "\u013a", "laemptyv;": "\u29b4", "lagran;": "\u2112", "lambda;": "\u03bb", "lang;": "\u27e8", "langd;": "\u2991", "langle;": "\u27e8", "lap;": "\u2a85", "laquo": "\xab", "laquo;": "\xab", "larr;": "\u2190", "larrb;": "\u21e4", "larrbfs;": "\u291f", "larrfs;": "\u291d", "larrhk;": "\u21a9", "larrlp;": "\u21ab", "larrpl;": "\u2939", "larrsim;": "\u2973", "larrtl;": "\u21a2", "lat;": "\u2aab", "latail;": "\u2919", "late;": "\u2aad", "lates;": "\u2aad\ufe00", "lbarr;": "\u290c", "lbbrk;": "\u2772", "lbrace;": "{", "lbrack;": "[", "lbrke;": "\u298b", "lbrksld;": "\u298f", "lbrkslu;": "\u298d", "lcaron;": "\u013e", "lcedil;": "\u013c", "lceil;": "\u2308", "lcub;": "{", "lcy;": "\u043b", "ldca;": "\u2936", "ldquo;": "\u201c", "ldquor;": "\u201e", "ldrdhar;": "\u2967", "ldrushar;": "\u294b", "ldsh;": "\u21b2", "le;": "\u2264", "leftarrow;": "\u2190", "leftarrowtail;": "\u21a2", "leftharpoondown;": "\u21bd", "leftharpoonup;": "\u21bc", "leftleftarrows;": "\u21c7", "leftrightarrow;": "\u2194", "leftrightarrows;": "\u21c6", "leftrightharpoons;": "\u21cb", "leftrightsquigarrow;": "\u21ad", "leftthreetimes;": "\u22cb", "leg;": "\u22da", "leq;": "\u2264", "leqq;": "\u2266", "leqslant;": "\u2a7d", "les;": "\u2a7d", "lescc;": "\u2aa8", "lesdot;": "\u2a7f", "lesdoto;": "\u2a81", "lesdotor;": "\u2a83", "lesg;": "\u22da\ufe00", "lesges;": "\u2a93", "lessapprox;": "\u2a85", "lessdot;": "\u22d6", "lesseqgtr;": "\u22da", "lesseqqgtr;": "\u2a8b", "lessgtr;": "\u2276", "lesssim;": "\u2272", "lfisht;": "\u297c", "lfloor;": "\u230a", "lfr;": "\U0001d529", "lg;": "\u2276", "lgE;": "\u2a91", "lhard;": "\u21bd", "lharu;": "\u21bc", "lharul;": "\u296a", "lhblk;": "\u2584", "ljcy;": "\u0459", "ll;": "\u226a", "llarr;": "\u21c7", "llcorner;": "\u231e", "llhard;": "\u296b", "lltri;": "\u25fa", "lmidot;": "\u0140", "lmoust;": "\u23b0", "lmoustache;": "\u23b0", "lnE;": "\u2268", "lnap;": "\u2a89", "lnapprox;": "\u2a89", "lne;": "\u2a87", "lneq;": "\u2a87", "lneqq;": "\u2268", "lnsim;": "\u22e6", "loang;": "\u27ec", "loarr;": "\u21fd", "lobrk;": "\u27e6", "longleftarrow;": "\u27f5", "longleftrightarrow;": "\u27f7", "longmapsto;": "\u27fc", "longrightarrow;": "\u27f6", "looparrowleft;": "\u21ab", "looparrowright;": "\u21ac", "lopar;": "\u2985", "lopf;": "\U0001d55d", "loplus;": "\u2a2d", "lotimes;": "\u2a34", "lowast;": "\u2217", "lowbar;": "_", "loz;": "\u25ca", "lozenge;": "\u25ca", "lozf;": "\u29eb", "lpar;": "(", "lparlt;": "\u2993", "lrarr;": "\u21c6", "lrcorner;": "\u231f", "lrhar;": "\u21cb", "lrhard;": "\u296d", "lrm;": "\u200e", "lrtri;": "\u22bf", "lsaquo;": "\u2039", "lscr;": "\U0001d4c1", "lsh;": "\u21b0", "lsim;": "\u2272", "lsime;": "\u2a8d", "lsimg;": "\u2a8f", "lsqb;": "[", "lsquo;": "\u2018", "lsquor;": "\u201a", "lstrok;": "\u0142", "lt": "<", "lt;": "<", "ltcc;": "\u2aa6", "ltcir;": "\u2a79", "ltdot;": "\u22d6", "lthree;": "\u22cb", "ltimes;": "\u22c9", "ltlarr;": "\u2976", "ltquest;": "\u2a7b", "ltrPar;": "\u2996", "ltri;": "\u25c3", "ltrie;": "\u22b4", "ltrif;": "\u25c2", "lurdshar;": "\u294a", "luruhar;": "\u2966", "lvertneqq;": "\u2268\ufe00", "lvnE;": "\u2268\ufe00", "mDDot;": "\u223a", "macr": "\xaf", "macr;": "\xaf", "male;": "\u2642", "malt;": "\u2720", "maltese;": "\u2720", "map;": "\u21a6", "mapsto;": "\u21a6", "mapstodown;": "\u21a7", "mapstoleft;": "\u21a4", "mapstoup;": "\u21a5", "marker;": "\u25ae", "mcomma;": "\u2a29", "mcy;": "\u043c", "mdash;": "\u2014", "measuredangle;": "\u2221", "mfr;": "\U0001d52a", "mho;": "\u2127", "micro": "\xb5", "micro;": "\xb5", "mid;": "\u2223", "midast;": "*", "midcir;": "\u2af0", "middot": "\xb7", "middot;": "\xb7", "minus;": "\u2212", "minusb;": "\u229f", "minusd;": "\u2238", "minusdu;": "\u2a2a", "mlcp;": "\u2adb", "mldr;": "\u2026", "mnplus;": "\u2213", "models;": "\u22a7", "mopf;": "\U0001d55e", "mp;": "\u2213", "mscr;": "\U0001d4c2", "mstpos;": "\u223e", "mu;": "\u03bc", "multimap;": "\u22b8", "mumap;": "\u22b8", "nGg;": "\u22d9\u0338", "nGt;": "\u226b\u20d2", "nGtv;": "\u226b\u0338", "nLeftarrow;": "\u21cd", "nLeftrightarrow;": "\u21ce", "nLl;": "\u22d8\u0338", "nLt;": "\u226a\u20d2", "nLtv;": "\u226a\u0338", "nRightarrow;": "\u21cf", "nVDash;": "\u22af", "nVdash;": "\u22ae", "nabla;": "\u2207", "nacute;": "\u0144", "nang;": "\u2220\u20d2", "nap;": "\u2249", "napE;": "\u2a70\u0338", "napid;": "\u224b\u0338", "napos;": "\u0149", "napprox;": "\u2249", "natur;": "\u266e", "natural;": "\u266e", "naturals;": "\u2115", "nbsp": "\xa0", "nbsp;": "\xa0", "nbump;": "\u224e\u0338", "nbumpe;": "\u224f\u0338", "ncap;": "\u2a43", "ncaron;": "\u0148", "ncedil;": "\u0146", "ncong;": "\u2247", "ncongdot;": "\u2a6d\u0338", "ncup;": "\u2a42", "ncy;": "\u043d", "ndash;": "\u2013", "ne;": "\u2260", "neArr;": "\u21d7", "nearhk;": "\u2924", "nearr;": "\u2197", "nearrow;": "\u2197", "nedot;": "\u2250\u0338", "nequiv;": "\u2262", "nesear;": "\u2928", "nesim;": "\u2242\u0338", "nexist;": "\u2204", "nexists;": "\u2204", "nfr;": "\U0001d52b", "ngE;": "\u2267\u0338", "nge;": "\u2271", "ngeq;": "\u2271", "ngeqq;": "\u2267\u0338", "ngeqslant;": "\u2a7e\u0338", "nges;": "\u2a7e\u0338", "ngsim;": "\u2275", "ngt;": "\u226f", "ngtr;": "\u226f", "nhArr;": "\u21ce", "nharr;": "\u21ae", "nhpar;": "\u2af2", "ni;": "\u220b", "nis;": "\u22fc", "nisd;": "\u22fa", "niv;": "\u220b", "njcy;": "\u045a", "nlArr;": "\u21cd", "nlE;": "\u2266\u0338", "nlarr;": "\u219a", "nldr;": "\u2025", "nle;": "\u2270", "nleftarrow;": "\u219a", "nleftrightarrow;": "\u21ae", "nleq;": "\u2270", "nleqq;": "\u2266\u0338", "nleqslant;": "\u2a7d\u0338", "nles;": "\u2a7d\u0338", "nless;": "\u226e", "nlsim;": "\u2274", "nlt;": "\u226e", "nltri;": "\u22ea", "nltrie;": "\u22ec", "nmid;": "\u2224", "nopf;": "\U0001d55f", "not": "\xac", "not;": "\xac", "notin;": "\u2209", "notinE;": "\u22f9\u0338", "notindot;": "\u22f5\u0338", "notinva;": "\u2209", "notinvb;": "\u22f7", "notinvc;": "\u22f6", "notni;": "\u220c", "notniva;": "\u220c", "notnivb;": "\u22fe", "notnivc;": "\u22fd", "npar;": "\u2226", "nparallel;": "\u2226", "nparsl;": "\u2afd\u20e5", "npart;": "\u2202\u0338", "npolint;": "\u2a14", "npr;": "\u2280", "nprcue;": "\u22e0", "npre;": "\u2aaf\u0338", "nprec;": "\u2280", "npreceq;": "\u2aaf\u0338", "nrArr;": "\u21cf", "nrarr;": "\u219b", "nrarrc;": "\u2933\u0338", "nrarrw;": "\u219d\u0338", "nrightarrow;": "\u219b", "nrtri;": "\u22eb", "nrtrie;": "\u22ed", "nsc;": "\u2281", "nsccue;": "\u22e1", "nsce;": "\u2ab0\u0338", "nscr;": "\U0001d4c3", "nshortmid;": "\u2224", "nshortparallel;": "\u2226", "nsim;": "\u2241", "nsime;": "\u2244", "nsimeq;": "\u2244", "nsmid;": "\u2224", "nspar;": "\u2226", "nsqsube;": "\u22e2", "nsqsupe;": "\u22e3", "nsub;": "\u2284", "nsubE;": "\u2ac5\u0338", "nsube;": "\u2288", "nsubset;": "\u2282\u20d2", "nsubseteq;": "\u2288", "nsubseteqq;": "\u2ac5\u0338", "nsucc;": "\u2281", "nsucceq;": "\u2ab0\u0338", "nsup;": "\u2285", "nsupE;": "\u2ac6\u0338", "nsupe;": "\u2289", "nsupset;": "\u2283\u20d2", "nsupseteq;": "\u2289", "nsupseteqq;": "\u2ac6\u0338", "ntgl;": "\u2279", "ntilde": "\xf1", "ntilde;": "\xf1", "ntlg;": "\u2278", "ntriangleleft;": "\u22ea", "ntrianglelefteq;": "\u22ec", "ntriangleright;": "\u22eb", "ntrianglerighteq;": "\u22ed", "nu;": "\u03bd", "num;": "#", "numero;": "\u2116", "numsp;": "\u2007", "nvDash;": "\u22ad", "nvHarr;": "\u2904", "nvap;": "\u224d\u20d2", "nvdash;": "\u22ac", "nvge;": "\u2265\u20d2", "nvgt;": ">\u20d2", "nvinfin;": "\u29de", "nvlArr;": "\u2902", "nvle;": "\u2264\u20d2", "nvlt;": "<\u20d2", "nvltrie;": "\u22b4\u20d2", "nvrArr;": "\u2903", "nvrtrie;": "\u22b5\u20d2", "nvsim;": "\u223c\u20d2", "nwArr;": "\u21d6", "nwarhk;": "\u2923", "nwarr;": "\u2196", "nwarrow;": "\u2196", "nwnear;": "\u2927", "oS;": "\u24c8", "oacute": "\xf3", "oacute;": "\xf3", "oast;": "\u229b", "ocir;": "\u229a", "ocirc": "\xf4", "ocirc;": "\xf4", "ocy;": "\u043e", "odash;": "\u229d", "odblac;": "\u0151", "odiv;": "\u2a38", "odot;": "\u2299", "odsold;": "\u29bc", "oelig;": "\u0153", "ofcir;": "\u29bf", "ofr;": "\U0001d52c", "ogon;": "\u02db", "ograve": "\xf2", "ograve;": "\xf2", "ogt;": "\u29c1", "ohbar;": "\u29b5", "ohm;": "\u03a9", "oint;": "\u222e", "olarr;": "\u21ba", "olcir;": "\u29be", "olcross;": "\u29bb", "oline;": "\u203e", "olt;": "\u29c0", "omacr;": "\u014d", "omega;": "\u03c9", "omicron;": "\u03bf", "omid;": "\u29b6", "ominus;": "\u2296", "oopf;": "\U0001d560", "opar;": "\u29b7", "operp;": "\u29b9", "oplus;": "\u2295", "or;": "\u2228", "orarr;": "\u21bb", "ord;": "\u2a5d", "order;": "\u2134", "orderof;": "\u2134", "ordf": "\xaa", "ordf;": "\xaa", "ordm": "\xba", "ordm;": "\xba", "origof;": "\u22b6", "oror;": "\u2a56", "orslope;": "\u2a57", "orv;": "\u2a5b", "oscr;": "\u2134", "oslash": "\xf8", "oslash;": "\xf8", "osol;": "\u2298", "otilde": "\xf5", "otilde;": "\xf5", "otimes;": "\u2297", "otimesas;": "\u2a36", "ouml": "\xf6", "ouml;": "\xf6", "ovbar;": "\u233d", "par;": "\u2225", "para": "\xb6", "para;": "\xb6", "parallel;": "\u2225", "parsim;": "\u2af3", "parsl;": "\u2afd", "part;": "\u2202", "pcy;": "\u043f", "percnt;": "%", "period;": ".", "permil;": "\u2030", "perp;": "\u22a5", "pertenk;": "\u2031", "pfr;": "\U0001d52d", "phi;": "\u03c6", "phiv;": "\u03d5", "phmmat;": "\u2133", "phone;": "\u260e", "pi;": "\u03c0", "pitchfork;": "\u22d4", "piv;": "\u03d6", "planck;": "\u210f", "planckh;": "\u210e", "plankv;": "\u210f", "plus;": "+", "plusacir;": "\u2a23", "plusb;": "\u229e", "pluscir;": "\u2a22", "plusdo;": "\u2214", "plusdu;": "\u2a25", "pluse;": "\u2a72", "plusmn": "\xb1", "plusmn;": "\xb1", "plussim;": "\u2a26", "plustwo;": "\u2a27", "pm;": "\xb1", "pointint;": "\u2a15", "popf;": "\U0001d561", "pound": "\xa3", "pound;": "\xa3", "pr;": "\u227a", "prE;": "\u2ab3", "prap;": "\u2ab7", "prcue;": "\u227c", "pre;": "\u2aaf", "prec;": "\u227a", "precapprox;": "\u2ab7", "preccurlyeq;": "\u227c", "preceq;": "\u2aaf", "precnapprox;": "\u2ab9", "precneqq;": "\u2ab5", "precnsim;": "\u22e8", "precsim;": "\u227e", "prime;": "\u2032", "primes;": "\u2119", "prnE;": "\u2ab5", "prnap;": "\u2ab9", "prnsim;": "\u22e8", "prod;": "\u220f", "profalar;": "\u232e", "profline;": "\u2312", "profsurf;": "\u2313", "prop;": "\u221d", "propto;": "\u221d", "prsim;": "\u227e", "prurel;": "\u22b0", "pscr;": "\U0001d4c5", "psi;": "\u03c8", "puncsp;": "\u2008", "qfr;": "\U0001d52e", "qint;": "\u2a0c", "qopf;": "\U0001d562", "qprime;": "\u2057", "qscr;": "\U0001d4c6", "quaternions;": "\u210d", "quatint;": "\u2a16", "quest;": "?", "questeq;": "\u225f", "quot": "\"", "quot;": "\"", "rAarr;": "\u21db", "rArr;": "\u21d2", "rAtail;": "\u291c", "rBarr;": "\u290f", "rHar;": "\u2964", "race;": "\u223d\u0331", "racute;": "\u0155", "radic;": "\u221a", "raemptyv;": "\u29b3", "rang;": "\u27e9", "rangd;": "\u2992", "range;": "\u29a5", "rangle;": "\u27e9", "raquo": "\xbb", "raquo;": "\xbb", "rarr;": "\u2192", "rarrap;": "\u2975", "rarrb;": "\u21e5", "rarrbfs;": "\u2920", "rarrc;": "\u2933", "rarrfs;": "\u291e", "rarrhk;": "\u21aa", "rarrlp;": "\u21ac", "rarrpl;": "\u2945", "rarrsim;": "\u2974", "rarrtl;": "\u21a3", "rarrw;": "\u219d", "ratail;": "\u291a", "ratio;": "\u2236", "rationals;": "\u211a", "rbarr;": "\u290d", "rbbrk;": "\u2773", "rbrace;": "}", "rbrack;": "]", "rbrke;": "\u298c", "rbrksld;": "\u298e", "rbrkslu;": "\u2990", "rcaron;": "\u0159", "rcedil;": "\u0157", "rceil;": "\u2309", "rcub;": "}", "rcy;": "\u0440", "rdca;": "\u2937", "rdldhar;": "\u2969", "rdquo;": "\u201d", "rdquor;": "\u201d", "rdsh;": "\u21b3", "real;": "\u211c", "realine;": "\u211b", "realpart;": "\u211c", "reals;": "\u211d", "rect;": "\u25ad", "reg": "\xae", "reg;": "\xae", "rfisht;": "\u297d", "rfloor;": "\u230b", "rfr;": "\U0001d52f", "rhard;": "\u21c1", "rharu;": "\u21c0", "rharul;": "\u296c", "rho;": "\u03c1", "rhov;": "\u03f1", "rightarrow;": "\u2192", "rightarrowtail;": "\u21a3", "rightharpoondown;": "\u21c1", "rightharpoonup;": "\u21c0", "rightleftarrows;": "\u21c4", "rightleftharpoons;": "\u21cc", "rightrightarrows;": "\u21c9", "rightsquigarrow;": "\u219d", "rightthreetimes;": "\u22cc", "ring;": "\u02da", "risingdotseq;": "\u2253", "rlarr;": "\u21c4", "rlhar;": "\u21cc", "rlm;": "\u200f", "rmoust;": "\u23b1", "rmoustache;": "\u23b1", "rnmid;": "\u2aee", "roang;": "\u27ed", "roarr;": "\u21fe", "robrk;": "\u27e7", "ropar;": "\u2986", "ropf;": "\U0001d563", "roplus;": "\u2a2e", "rotimes;": "\u2a35", "rpar;": ")", "rpargt;": "\u2994", "rppolint;": "\u2a12", "rrarr;": "\u21c9", "rsaquo;": "\u203a", "rscr;": "\U0001d4c7", "rsh;": "\u21b1", "rsqb;": "]", "rsquo;": "\u2019", "rsquor;": "\u2019", "rthree;": "\u22cc", "rtimes;": "\u22ca", "rtri;": "\u25b9", "rtrie;": "\u22b5", "rtrif;": "\u25b8", "rtriltri;": "\u29ce", "ruluhar;": "\u2968", "rx;": "\u211e", "sacute;": "\u015b", "sbquo;": "\u201a", "sc;": "\u227b", "scE;": "\u2ab4", "scap;": "\u2ab8", "scaron;": "\u0161", "sccue;": "\u227d", "sce;": "\u2ab0", "scedil;": "\u015f", "scirc;": "\u015d", "scnE;": "\u2ab6", "scnap;": "\u2aba", "scnsim;": "\u22e9", "scpolint;": "\u2a13", "scsim;": "\u227f", "scy;": "\u0441", "sdot;": "\u22c5", "sdotb;": "\u22a1", "sdote;": "\u2a66", "seArr;": "\u21d8", "searhk;": "\u2925", "searr;": "\u2198", "searrow;": "\u2198", "sect": "\xa7", "sect;": "\xa7", "semi;": ";", "seswar;": "\u2929", "setminus;": "\u2216", "setmn;": "\u2216", "sext;": "\u2736", "sfr;": "\U0001d530", "sfrown;": "\u2322", "sharp;": "\u266f", "shchcy;": "\u0449", "shcy;": "\u0448", "shortmid;": "\u2223", "shortparallel;": "\u2225", "shy": "\xad", "shy;": "\xad", "sigma;": "\u03c3", "sigmaf;": "\u03c2", "sigmav;": "\u03c2", "sim;": "\u223c", "simdot;": "\u2a6a", "sime;": "\u2243", "simeq;": "\u2243", "simg;": "\u2a9e", "simgE;": "\u2aa0", "siml;": "\u2a9d", "simlE;": "\u2a9f", "simne;": "\u2246", "simplus;": "\u2a24", "simrarr;": "\u2972", "slarr;": "\u2190", "smallsetminus;": "\u2216", "smashp;": "\u2a33", "smeparsl;": "\u29e4", "smid;": "\u2223", "smile;": "\u2323", "smt;": "\u2aaa", "smte;": "\u2aac", "smtes;": "\u2aac\ufe00", "softcy;": "\u044c", "sol;": "/", "solb;": "\u29c4", "solbar;": "\u233f", "sopf;": "\U0001d564", "spades;": "\u2660", "spadesuit;": "\u2660", "spar;": "\u2225", "sqcap;": "\u2293", "sqcaps;": "\u2293\ufe00", "sqcup;": "\u2294", "sqcups;": "\u2294\ufe00", "sqsub;": "\u228f", "sqsube;": "\u2291", "sqsubset;": "\u228f", "sqsubseteq;": "\u2291", "sqsup;": "\u2290", "sqsupe;": "\u2292", "sqsupset;": "\u2290", "sqsupseteq;": "\u2292", "squ;": "\u25a1", "square;": "\u25a1", "squarf;": "\u25aa", "squf;": "\u25aa", "srarr;": "\u2192", "sscr;": "\U0001d4c8", "ssetmn;": "\u2216", "ssmile;": "\u2323", "sstarf;": "\u22c6", "star;": "\u2606", "starf;": "\u2605", "straightepsilon;": "\u03f5", "straightphi;": "\u03d5", "strns;": "\xaf", "sub;": "\u2282", "subE;": "\u2ac5", "subdot;": "\u2abd", "sube;": "\u2286", "subedot;": "\u2ac3", "submult;": "\u2ac1", "subnE;": "\u2acb", "subne;": "\u228a", "subplus;": "\u2abf", "subrarr;": "\u2979", "subset;": "\u2282", "subseteq;": "\u2286", "subseteqq;": "\u2ac5", "subsetneq;": "\u228a", "subsetneqq;": "\u2acb", "subsim;": "\u2ac7", "subsub;": "\u2ad5", "subsup;": "\u2ad3", "succ;": "\u227b", "succapprox;": "\u2ab8", "succcurlyeq;": "\u227d", "succeq;": "\u2ab0", "succnapprox;": "\u2aba", "succneqq;": "\u2ab6", "succnsim;": "\u22e9", "succsim;": "\u227f", "sum;": "\u2211", "sung;": "\u266a", "sup1": "\xb9", "sup1;": "\xb9", "sup2": "\xb2", "sup2;": "\xb2", "sup3": "\xb3", "sup3;": "\xb3", "sup;": "\u2283", "supE;": "\u2ac6", "supdot;": "\u2abe", "supdsub;": "\u2ad8", "supe;": "\u2287", "supedot;": "\u2ac4", "suphsol;": "\u27c9", "suphsub;": "\u2ad7", "suplarr;": "\u297b", "supmult;": "\u2ac2", "supnE;": "\u2acc", "supne;": "\u228b", "supplus;": "\u2ac0", "supset;": "\u2283", "supseteq;": "\u2287", "supseteqq;": "\u2ac6", "supsetneq;": "\u228b", "supsetneqq;": "\u2acc", "supsim;": "\u2ac8", "supsub;": "\u2ad4", "supsup;": "\u2ad6", "swArr;": "\u21d9", "swarhk;": "\u2926", "swarr;": "\u2199", "swarrow;": "\u2199", "swnwar;": "\u292a", "szlig": "\xdf", "szlig;": "\xdf", "target;": "\u2316", "tau;": "\u03c4", "tbrk;": "\u23b4", "tcaron;": "\u0165", "tcedil;": "\u0163", "tcy;": "\u0442", "tdot;": "\u20db", "telrec;": "\u2315", "tfr;": "\U0001d531", "there4;": "\u2234", "therefore;": "\u2234", "theta;": "\u03b8", "thetasym;": "\u03d1", "thetav;": "\u03d1", "thickapprox;": "\u2248", "thicksim;": "\u223c", "thinsp;": "\u2009", "thkap;": "\u2248", "thksim;": "\u223c", "thorn": "\xfe", "thorn;": "\xfe", "tilde;": "\u02dc", "times": "\xd7", "times;": "\xd7", "timesb;": "\u22a0", "timesbar;": "\u2a31", "timesd;": "\u2a30", "tint;": "\u222d", "toea;": "\u2928", "top;": "\u22a4", "topbot;": "\u2336", "topcir;": "\u2af1", "topf;": "\U0001d565", "topfork;": "\u2ada", "tosa;": "\u2929", "tprime;": "\u2034", "trade;": "\u2122", "triangle;": "\u25b5", "triangledown;": "\u25bf", "triangleleft;": "\u25c3", "trianglelefteq;": "\u22b4", "triangleq;": "\u225c", "triangleright;": "\u25b9", "trianglerighteq;": "\u22b5", "tridot;": "\u25ec", "trie;": "\u225c", "triminus;": "\u2a3a", "triplus;": "\u2a39", "trisb;": "\u29cd", "tritime;": "\u2a3b", "trpezium;": "\u23e2", "tscr;": "\U0001d4c9", "tscy;": "\u0446", "tshcy;": "\u045b", "tstrok;": "\u0167", "twixt;": "\u226c", "twoheadleftarrow;": "\u219e", "twoheadrightarrow;": "\u21a0", "uArr;": "\u21d1", "uHar;": "\u2963", "uacute": "\xfa", "uacute;": "\xfa", "uarr;": "\u2191", "ubrcy;": "\u045e", "ubreve;": "\u016d", "ucirc": "\xfb", "ucirc;": "\xfb", "ucy;": "\u0443", "udarr;": "\u21c5", "udblac;": "\u0171", "udhar;": "\u296e", "ufisht;": "\u297e", "ufr;": "\U0001d532", "ugrave": "\xf9", "ugrave;": "\xf9", "uharl;": "\u21bf", "uharr;": "\u21be", "uhblk;": "\u2580", "ulcorn;": "\u231c", "ulcorner;": "\u231c", "ulcrop;": "\u230f", "ultri;": "\u25f8", "umacr;": "\u016b", "uml": "\xa8", "uml;": "\xa8", "uogon;": "\u0173", "uopf;": "\U0001d566", "uparrow;": "\u2191", "updownarrow;": "\u2195", "upharpoonleft;": "\u21bf", "upharpoonright;": "\u21be", "uplus;": "\u228e", "upsi;": "\u03c5", "upsih;": "\u03d2", "upsilon;": "\u03c5", "upuparrows;": "\u21c8", "urcorn;": "\u231d", "urcorner;": "\u231d", "urcrop;": "\u230e", "uring;": "\u016f", "urtri;": "\u25f9", "uscr;": "\U0001d4ca", "utdot;": "\u22f0", "utilde;": "\u0169", "utri;": "\u25b5", "utrif;": "\u25b4", "uuarr;": "\u21c8", "uuml": "\xfc", "uuml;": "\xfc", "uwangle;": "\u29a7", "vArr;": "\u21d5", "vBar;": "\u2ae8", "vBarv;": "\u2ae9", "vDash;": "\u22a8", "vangrt;": "\u299c", "varepsilon;": "\u03f5", "varkappa;": "\u03f0", "varnothing;": "\u2205", "varphi;": "\u03d5", "varpi;": "\u03d6", "varpropto;": "\u221d", "varr;": "\u2195", "varrho;": "\u03f1", "varsigma;": "\u03c2", "varsubsetneq;": "\u228a\ufe00", "varsubsetneqq;": "\u2acb\ufe00", "varsupsetneq;": "\u228b\ufe00", "varsupsetneqq;": "\u2acc\ufe00", "vartheta;": "\u03d1", "vartriangleleft;": "\u22b2", "vartriangleright;": "\u22b3", "vcy;": "\u0432", "vdash;": "\u22a2", "vee;": "\u2228", "veebar;": "\u22bb", "veeeq;": "\u225a", "vellip;": "\u22ee", "verbar;": "|", "vert;": "|", "vfr;": "\U0001d533", "vltri;": "\u22b2", "vnsub;": "\u2282\u20d2", "vnsup;": "\u2283\u20d2", "vopf;": "\U0001d567", "vprop;": "\u221d", "vrtri;": "\u22b3", "vscr;": "\U0001d4cb", "vsubnE;": "\u2acb\ufe00", "vsubne;": "\u228a\ufe00", "vsupnE;": "\u2acc\ufe00", "vsupne;": "\u228b\ufe00", "vzigzag;": "\u299a", "wcirc;": "\u0175", "wedbar;": "\u2a5f", "wedge;": "\u2227", "wedgeq;": "\u2259", "weierp;": "\u2118", "wfr;": "\U0001d534", "wopf;": "\U0001d568", "wp;": "\u2118", "wr;": "\u2240", "wreath;": "\u2240", "wscr;": "\U0001d4cc", "xcap;": "\u22c2", "xcirc;": "\u25ef", "xcup;": "\u22c3", "xdtri;": "\u25bd", "xfr;": "\U0001d535", "xhArr;": "\u27fa", "xharr;": "\u27f7", "xi;": "\u03be", "xlArr;": "\u27f8", "xlarr;": "\u27f5", "xmap;": "\u27fc", "xnis;": "\u22fb", "xodot;": "\u2a00", "xopf;": "\U0001d569", "xoplus;": "\u2a01", "xotime;": "\u2a02", "xrArr;": "\u27f9", "xrarr;": "\u27f6", "xscr;": "\U0001d4cd", "xsqcup;": "\u2a06", "xuplus;": "\u2a04", "xutri;": "\u25b3", "xvee;": "\u22c1", "xwedge;": "\u22c0", "yacute": "\xfd", "yacute;": "\xfd", "yacy;": "\u044f", "ycirc;": "\u0177", "ycy;": "\u044b", "yen": "\xa5", "yen;": "\xa5", "yfr;": "\U0001d536", "yicy;": "\u0457", "yopf;": "\U0001d56a", "yscr;": "\U0001d4ce", "yucy;": "\u044e", "yuml": "\xff", "yuml;": "\xff", "zacute;": "\u017a", "zcaron;": "\u017e", "zcy;": "\u0437", "zdot;": "\u017c", "zeetrf;": "\u2128", "zeta;": "\u03b6", "zfr;": "\U0001d537", "zhcy;": "\u0436", "zigrarr;": "\u21dd", "zopf;": "\U0001d56b", "zscr;": "\U0001d4cf", "zwj;": "\u200d", "zwnj;": "\u200c", } replacementCharacters = { 0x0: "\uFFFD", 0x0d: "\u000D", 0x80: "\u20AC", 0x81: "\u0081", 0x82: "\u201A", 0x83: "\u0192", 0x84: "\u201E", 0x85: "\u2026", 0x86: "\u2020", 0x87: "\u2021", 0x88: "\u02C6", 0x89: "\u2030", 0x8A: "\u0160", 0x8B: "\u2039", 0x8C: "\u0152", 0x8D: "\u008D", 0x8E: "\u017D", 0x8F: "\u008F", 0x90: "\u0090", 0x91: "\u2018", 0x92: "\u2019", 0x93: "\u201C", 0x94: "\u201D", 0x95: "\u2022", 0x96: "\u2013", 0x97: "\u2014", 0x98: "\u02DC", 0x99: "\u2122", 0x9A: "\u0161", 0x9B: "\u203A", 0x9C: "\u0153", 0x9D: "\u009D", 0x9E: "\u017E", 0x9F: "\u0178", } tokenTypes = { "Doctype": 0, "Characters": 1, "SpaceCharacters": 2, "StartTag": 3, "EndTag": 4, "EmptyTag": 5, "Comment": 6, "ParseError": 7 } tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"], tokenTypes["EmptyTag"]]) prefixes = {v: k for k, v in namespaces.items()} prefixes["http://www.w3.org/1998/Math/MathML"] = "math" class DataLossWarning(UserWarning): """Raised when the current tree is unable to represent the input data""" pass class _ReparseException(Exception): pass
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib/html5parser.py
from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import with_metaclass, viewkeys import types from . import _inputstream from . import _tokenizer from . import treebuilders from .treebuilders.base import Marker from . import _utils from .constants import ( spaceCharacters, asciiUpper2Lower, specialElements, headingElements, cdataElements, rcdataElements, tokenTypes, tagTokenTypes, namespaces, htmlIntegrationPointElements, mathmlTextIntegrationPointElements, adjustForeignAttributes as adjustForeignAttributesMap, adjustMathMLAttributes, adjustSVGAttributes, E, _ReparseException ) def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs): """Parse an HTML document as a string or file-like object into a tree :arg doc: the document to parse as a string or file-like object :arg treebuilder: the treebuilder to use when parsing :arg namespaceHTMLElements: whether or not to namespace HTML elements :returns: parsed tree Example: >>> from html5lib.html5parser import parse >>> parse('<html><body><p>This is a doc</p></body></html>') <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0> """ tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parse(doc, **kwargs) def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs): """Parse an HTML fragment as a string or file-like object into a tree :arg doc: the fragment to parse as a string or file-like object :arg container: the container context to parse the fragment in :arg treebuilder: the treebuilder to use when parsing :arg namespaceHTMLElements: whether or not to namespace HTML elements :returns: parsed tree Example: >>> from html5lib.html5libparser import parseFragment >>> parseFragment('<b>this is a fragment</b>') <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> """ tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parseFragment(doc, container=container, **kwargs) def method_decorator_metaclass(function): class Decorated(type): def __new__(meta, classname, bases, classDict): for attributeName, attribute in classDict.items(): if isinstance(attribute, types.FunctionType): attribute = function(attribute) classDict[attributeName] = attribute return type.__new__(meta, classname, bases, classDict) return Decorated class HTMLParser(object): """HTML parser Generates a tree structure from a stream of (possibly malformed) HTML. """ def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False): """ :arg tree: a treebuilder class controlling the type of tree that will be returned. Built in treebuilders can be accessed through html5lib.treebuilders.getTreeBuilder(treeType) :arg strict: raise an exception when a parse error is encountered :arg namespaceHTMLElements: whether or not to namespace HTML elements :arg debug: whether or not to enable debug mode which logs things Example: >>> from html5lib.html5parser import HTMLParser >>> parser = HTMLParser() # generates parser with etree builder >>> parser = HTMLParser('lxml', strict=True) # generates parser with lxml builder which is strict """ # Raise an exception on the first error encountered self.strict = strict if tree is None: tree = treebuilders.getTreeBuilder("etree") self.tree = tree(namespaceHTMLElements) self.errors = [] self.phases = {name: cls(self, self.tree) for name, cls in getPhases(debug).items()} def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs): self.innerHTMLMode = innerHTML self.container = container self.scripting = scripting self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs) self.reset() try: self.mainLoop() except _ReparseException: self.reset() self.mainLoop() def reset(self): self.tree.reset() self.firstStartTag = False self.errors = [] self.log = [] # only used with debug mode # "quirks" / "limited quirks" / "no quirks" self.compatMode = "no quirks" if self.innerHTMLMode: self.innerHTML = self.container.lower() if self.innerHTML in cdataElements: self.tokenizer.state = self.tokenizer.rcdataState elif self.innerHTML in rcdataElements: self.tokenizer.state = self.tokenizer.rawtextState elif self.innerHTML == 'plaintext': self.tokenizer.state = self.tokenizer.plaintextState else: # state already is data state # self.tokenizer.state = self.tokenizer.dataState pass self.phase = self.phases["beforeHtml"] self.phase.insertHtmlElement() self.resetInsertionMode() else: self.innerHTML = False # pylint:disable=redefined-variable-type self.phase = self.phases["initial"] self.lastPhase = None self.beforeRCDataPhase = None self.framesetOK = True @property def documentEncoding(self): """Name of the character encoding that was used to decode the input stream, or :obj:`None` if that is not determined yet """ if not hasattr(self, 'tokenizer'): return None return self.tokenizer.stream.charEncoding[0].name def isHTMLIntegrationPoint(self, element): if (element.name == "annotation-xml" and element.namespace == namespaces["mathml"]): return ("encoding" in element.attributes and element.attributes["encoding"].translate( asciiUpper2Lower) in ("text/html", "application/xhtml+xml")) else: return (element.namespace, element.name) in htmlIntegrationPointElements def isMathMLTextIntegrationPoint(self, element): return (element.namespace, element.name) in mathmlTextIntegrationPointElements def mainLoop(self): CharactersToken = tokenTypes["Characters"] SpaceCharactersToken = tokenTypes["SpaceCharacters"] StartTagToken = tokenTypes["StartTag"] EndTagToken = tokenTypes["EndTag"] CommentToken = tokenTypes["Comment"] DoctypeToken = tokenTypes["Doctype"] ParseErrorToken = tokenTypes["ParseError"] for token in self.tokenizer: prev_token = None new_token = token while new_token is not None: prev_token = new_token currentNode = self.tree.openElements[-1] if self.tree.openElements else None currentNodeNamespace = currentNode.namespace if currentNode else None currentNodeName = currentNode.name if currentNode else None type = new_token["type"] if type == ParseErrorToken: self.parseError(new_token["data"], new_token.get("datavars", {})) new_token = None else: if (len(self.tree.openElements) == 0 or currentNodeNamespace == self.tree.defaultNamespace or (self.isMathMLTextIntegrationPoint(currentNode) and ((type == StartTagToken and token["name"] not in frozenset(["mglyph", "malignmark"])) or type in (CharactersToken, SpaceCharactersToken))) or (currentNodeNamespace == namespaces["mathml"] and currentNodeName == "annotation-xml" and type == StartTagToken and token["name"] == "svg") or (self.isHTMLIntegrationPoint(currentNode) and type in (StartTagToken, CharactersToken, SpaceCharactersToken))): phase = self.phase else: phase = self.phases["inForeignContent"] if type == CharactersToken: new_token = phase.processCharacters(new_token) elif type == SpaceCharactersToken: new_token = phase.processSpaceCharacters(new_token) elif type == StartTagToken: new_token = phase.processStartTag(new_token) elif type == EndTagToken: new_token = phase.processEndTag(new_token) elif type == CommentToken: new_token = phase.processComment(new_token) elif type == DoctypeToken: new_token = phase.processDoctype(new_token) if (type == StartTagToken and prev_token["selfClosing"] and not prev_token["selfClosingAcknowledged"]): self.parseError("non-void-element-with-trailing-solidus", {"name": prev_token["name"]}) # When the loop finishes it's EOF reprocess = True phases = [] while reprocess: phases.append(self.phase) reprocess = self.phase.processEOF() if reprocess: assert self.phase not in phases def parse(self, stream, *args, **kwargs): """Parse a HTML document into a well-formed tree :arg stream: a file-like object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element). :arg scripting: treat noscript elements as if JavaScript was turned on :returns: parsed tree Example: >>> from html5lib.html5parser import HTMLParser >>> parser = HTMLParser() >>> parser.parse('<html><body><p>This is a doc</p></body></html>') <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0> """ self._parse(stream, False, None, *args, **kwargs) return self.tree.getDocument() def parseFragment(self, stream, *args, **kwargs): """Parse a HTML fragment into a well-formed tree fragment :arg container: name of the element we're setting the innerHTML property if set to None, default to 'div' :arg stream: a file-like object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) :arg scripting: treat noscript elements as if JavaScript was turned on :returns: parsed tree Example: >>> from html5lib.html5libparser import HTMLParser >>> parser = HTMLParser() >>> parser.parseFragment('<b>this is a fragment</b>') <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090> """ self._parse(stream, True, *args, **kwargs) return self.tree.getFragment() def parseError(self, errorcode="XXX-undefined-error", datavars=None): # XXX The idea is to make errorcode mandatory. if datavars is None: datavars = {} self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) if self.strict: raise ParseError(E[errorcode] % datavars) def adjustMathMLAttributes(self, token): adjust_attributes(token, adjustMathMLAttributes) def adjustSVGAttributes(self, token): adjust_attributes(token, adjustSVGAttributes) def adjustForeignAttributes(self, token): adjust_attributes(token, adjustForeignAttributesMap) def reparseTokenNormal(self, token): # pylint:disable=unused-argument self.parser.phase() def resetInsertionMode(self): # The name of this method is mostly historical. (It's also used in the # specification.) last = False newModes = { "select": "inSelect", "td": "inCell", "th": "inCell", "tr": "inRow", "tbody": "inTableBody", "thead": "inTableBody", "tfoot": "inTableBody", "caption": "inCaption", "colgroup": "inColumnGroup", "table": "inTable", "head": "inBody", "body": "inBody", "frameset": "inFrameset", "html": "beforeHead" } for node in self.tree.openElements[::-1]: nodeName = node.name new_phase = None if node == self.tree.openElements[0]: assert self.innerHTML last = True nodeName = self.innerHTML # Check for conditions that should only happen in the innerHTML # case if nodeName in ("select", "colgroup", "head", "html"): assert self.innerHTML if not last and node.namespace != self.tree.defaultNamespace: continue if nodeName in newModes: new_phase = self.phases[newModes[nodeName]] break elif last: new_phase = self.phases["inBody"] break self.phase = new_phase def parseRCDataRawtext(self, token, contentType): # Generic RCDATA/RAWTEXT Parsing algorithm assert contentType in ("RAWTEXT", "RCDATA") self.tree.insertElement(token) if contentType == "RAWTEXT": self.tokenizer.state = self.tokenizer.rawtextState else: self.tokenizer.state = self.tokenizer.rcdataState self.originalPhase = self.phase self.phase = self.phases["text"] @_utils.memoize def getPhases(debug): def log(function): """Logger that records which phase processes each token""" type_names = {value: key for key, value in tokenTypes.items()} def wrapped(self, *args, **kwargs): if function.__name__.startswith("process") and len(args) > 0: token = args[0] info = {"type": type_names[token['type']]} if token['type'] in tagTokenTypes: info["name"] = token['name'] self.parser.log.append((self.parser.tokenizer.state.__name__, self.parser.phase.__class__.__name__, self.__class__.__name__, function.__name__, info)) return function(self, *args, **kwargs) else: return function(self, *args, **kwargs) return wrapped def getMetaclass(use_metaclass, metaclass_func): if use_metaclass: return method_decorator_metaclass(metaclass_func) else: return type # pylint:disable=unused-argument class Phase(with_metaclass(getMetaclass(debug, log))): """Base class for helper object that implements each phase of processing """ __slots__ = ("parser", "tree", "__startTagCache", "__endTagCache") def __init__(self, parser, tree): self.parser = parser self.tree = tree self.__startTagCache = {} self.__endTagCache = {} def processEOF(self): raise NotImplementedError def processComment(self, token): # For most phases the following is correct. Where it's not it will be # overridden. self.tree.insertComment(token, self.tree.openElements[-1]) def processDoctype(self, token): self.parser.parseError("unexpected-doctype") def processCharacters(self, token): self.tree.insertText(token["data"]) def processSpaceCharacters(self, token): self.tree.insertText(token["data"]) def processStartTag(self, token): # Note the caching is done here rather than BoundMethodDispatcher as doing it there # requires a circular reference to the Phase, and this ends up with a significant # (CPython 2.7, 3.8) GC cost when parsing many short inputs name = token["name"] # In Py2, using `in` is quicker in general than try/except KeyError # In Py3, `in` is quicker when there are few cache hits (typically short inputs) if name in self.__startTagCache: func = self.__startTagCache[name] else: func = self.__startTagCache[name] = self.startTagHandler[name] # bound the cache size in case we get loads of unknown tags while len(self.__startTagCache) > len(self.startTagHandler) * 1.1: # this makes the eviction policy random on Py < 3.7 and FIFO >= 3.7 self.__startTagCache.pop(next(iter(self.__startTagCache))) return func(token) def startTagHtml(self, token): if not self.parser.firstStartTag and token["name"] == "html": self.parser.parseError("non-html-root") # XXX Need a check here to see if the first start tag token emitted is # this token... If it's not, invoke self.parser.parseError(). for attr, value in token["data"].items(): if attr not in self.tree.openElements[0].attributes: self.tree.openElements[0].attributes[attr] = value self.parser.firstStartTag = False def processEndTag(self, token): # Note the caching is done here rather than BoundMethodDispatcher as doing it there # requires a circular reference to the Phase, and this ends up with a significant # (CPython 2.7, 3.8) GC cost when parsing many short inputs name = token["name"] # In Py2, using `in` is quicker in general than try/except KeyError # In Py3, `in` is quicker when there are few cache hits (typically short inputs) if name in self.__endTagCache: func = self.__endTagCache[name] else: func = self.__endTagCache[name] = self.endTagHandler[name] # bound the cache size in case we get loads of unknown tags while len(self.__endTagCache) > len(self.endTagHandler) * 1.1: # this makes the eviction policy random on Py < 3.7 and FIFO >= 3.7 self.__endTagCache.pop(next(iter(self.__endTagCache))) return func(token) class InitialPhase(Phase): __slots__ = tuple() def processSpaceCharacters(self, token): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] correct = token["correct"] if (name != "html" or publicId is not None or systemId is not None and systemId != "about:legacy-compat"): self.parser.parseError("unknown-doctype") if publicId is None: publicId = "" self.tree.insertDoctype(token) if publicId != "": publicId = publicId.translate(asciiUpper2Lower) if (not correct or token["name"] != "html" or publicId.startswith( ("+//silmaril//dtd html pro v0r11 19970101//", "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", "-//as//dtd html 3.0 aswedit + extensions//", "-//ietf//dtd html 2.0 level 1//", "-//ietf//dtd html 2.0 level 2//", "-//ietf//dtd html 2.0 strict level 1//", "-//ietf//dtd html 2.0 strict level 2//", "-//ietf//dtd html 2.0 strict//", "-//ietf//dtd html 2.0//", "-//ietf//dtd html 2.1e//", "-//ietf//dtd html 3.0//", "-//ietf//dtd html 3.2 final//", "-//ietf//dtd html 3.2//", "-//ietf//dtd html 3//", "-//ietf//dtd html level 0//", "-//ietf//dtd html level 1//", "-//ietf//dtd html level 2//", "-//ietf//dtd html level 3//", "-//ietf//dtd html strict level 0//", "-//ietf//dtd html strict level 1//", "-//ietf//dtd html strict level 2//", "-//ietf//dtd html strict level 3//", "-//ietf//dtd html strict//", "-//ietf//dtd html//", "-//metrius//dtd metrius presentational//", "-//microsoft//dtd internet explorer 2.0 html strict//", "-//microsoft//dtd internet explorer 2.0 html//", "-//microsoft//dtd internet explorer 2.0 tables//", "-//microsoft//dtd internet explorer 3.0 html strict//", "-//microsoft//dtd internet explorer 3.0 html//", "-//microsoft//dtd internet explorer 3.0 tables//", "-//netscape comm. corp.//dtd html//", "-//netscape comm. corp.//dtd strict html//", "-//o'reilly and associates//dtd html 2.0//", "-//o'reilly and associates//dtd html extended 1.0//", "-//o'reilly and associates//dtd html extended relaxed 1.0//", "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", "-//spyglass//dtd html 2.0 extended//", "-//sq//dtd html 2.0 hotmetal + extensions//", "-//sun microsystems corp.//dtd hotjava html//", "-//sun microsystems corp.//dtd hotjava strict html//", "-//w3c//dtd html 3 1995-03-24//", "-//w3c//dtd html 3.2 draft//", "-//w3c//dtd html 3.2 final//", "-//w3c//dtd html 3.2//", "-//w3c//dtd html 3.2s draft//", "-//w3c//dtd html 4.0 frameset//", "-//w3c//dtd html 4.0 transitional//", "-//w3c//dtd html experimental 19960712//", "-//w3c//dtd html experimental 970421//", "-//w3c//dtd w3 html//", "-//w3o//dtd w3 html 3.0//", "-//webtechs//dtd mozilla html 2.0//", "-//webtechs//dtd mozilla html//")) or publicId in ("-//w3o//dtd w3 html strict 3.0//en//", "-/w3c/dtd html 4.0 transitional/en", "html") or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is None or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): self.parser.compatMode = "quirks" elif (publicId.startswith( ("-//w3c//dtd xhtml 1.0 frameset//", "-//w3c//dtd xhtml 1.0 transitional//")) or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is not None): self.parser.compatMode = "limited quirks" self.parser.phase = self.parser.phases["beforeHtml"] def anythingElse(self): self.parser.compatMode = "quirks" self.parser.phase = self.parser.phases["beforeHtml"] def processCharacters(self, token): self.parser.parseError("expected-doctype-but-got-chars") self.anythingElse() return token def processStartTag(self, token): self.parser.parseError("expected-doctype-but-got-start-tag", {"name": token["name"]}) self.anythingElse() return token def processEndTag(self, token): self.parser.parseError("expected-doctype-but-got-end-tag", {"name": token["name"]}) self.anythingElse() return token def processEOF(self): self.parser.parseError("expected-doctype-but-got-eof") self.anythingElse() return True class BeforeHtmlPhase(Phase): __slots__ = tuple() # helper methods def insertHtmlElement(self): self.tree.insertRoot(impliedTagToken("html", "StartTag")) self.parser.phase = self.parser.phases["beforeHead"] # other def processEOF(self): self.insertHtmlElement() return True def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): pass def processCharacters(self, token): self.insertHtmlElement() return token def processStartTag(self, token): if token["name"] == "html": self.parser.firstStartTag = True self.insertHtmlElement() return token def processEndTag(self, token): if token["name"] not in ("head", "body", "html", "br"): self.parser.parseError("unexpected-end-tag-before-html", {"name": token["name"]}) else: self.insertHtmlElement() return token class BeforeHeadPhase(Phase): __slots__ = tuple() def processEOF(self): self.startTagHead(impliedTagToken("head", "StartTag")) return True def processSpaceCharacters(self, token): pass def processCharacters(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagHead(self, token): self.tree.insertElement(token) self.tree.headPointer = self.tree.openElements[-1] self.parser.phase = self.parser.phases["inHead"] def startTagOther(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def endTagImplyHead(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def endTagOther(self, token): self.parser.parseError("end-tag-after-implied-root", {"name": token["name"]}) startTagHandler = _utils.MethodDispatcher([ ("html", startTagHtml), ("head", startTagHead) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ (("head", "body", "html", "br"), endTagImplyHead) ]) endTagHandler.default = endTagOther class InHeadPhase(Phase): __slots__ = tuple() # the real thing def processEOF(self): self.anythingElse() return True def processCharacters(self, token): self.anythingElse() return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagHead(self, token): self.parser.parseError("two-heads-are-not-better-than-one") def startTagBaseLinkCommand(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagMeta(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True attributes = token["data"] if self.parser.tokenizer.stream.charEncoding[1] == "tentative": if "charset" in attributes: self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) elif ("content" in attributes and "http-equiv" in attributes and attributes["http-equiv"].lower() == "content-type"): # Encoding it as UTF-8 here is a hack, as really we should pass # the abstract Unicode string, and just use the # ContentAttrParser on that, but using UTF-8 allows all chars # to be encoded and as a ASCII-superset works. data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8")) parser = _inputstream.ContentAttrParser(data) codec = parser.parse() self.parser.tokenizer.stream.changeEncoding(codec) def startTagTitle(self, token): self.parser.parseRCDataRawtext(token, "RCDATA") def startTagNoFramesStyle(self, token): # Need to decide whether to implement the scripting-disabled case self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagNoscript(self, token): if self.parser.scripting: self.parser.parseRCDataRawtext(token, "RAWTEXT") else: self.tree.insertElement(token) self.parser.phase = self.parser.phases["inHeadNoscript"] def startTagScript(self, token): self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState self.parser.originalPhase = self.parser.phase self.parser.phase = self.parser.phases["text"] def startTagOther(self, token): self.anythingElse() return token def endTagHead(self, token): node = self.parser.tree.openElements.pop() assert node.name == "head", "Expected head got %s" % node.name self.parser.phase = self.parser.phases["afterHead"] def endTagHtmlBodyBr(self, token): self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): self.endTagHead(impliedTagToken("head")) startTagHandler = _utils.MethodDispatcher([ ("html", startTagHtml), ("title", startTagTitle), (("noframes", "style"), startTagNoFramesStyle), ("noscript", startTagNoscript), ("script", startTagScript), (("base", "basefont", "bgsound", "command", "link"), startTagBaseLinkCommand), ("meta", startTagMeta), ("head", startTagHead) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("head", endTagHead), (("br", "html", "body"), endTagHtmlBodyBr) ]) endTagHandler.default = endTagOther class InHeadNoscriptPhase(Phase): __slots__ = tuple() def processEOF(self): self.parser.parseError("eof-in-head-noscript") self.anythingElse() return True def processComment(self, token): return self.parser.phases["inHead"].processComment(token) def processCharacters(self, token): self.parser.parseError("char-in-head-noscript") self.anythingElse() return token def processSpaceCharacters(self, token): return self.parser.phases["inHead"].processSpaceCharacters(token) def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagBaseLinkCommand(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagHeadNoscript(self, token): self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) def startTagOther(self, token): self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) self.anythingElse() return token def endTagNoscript(self, token): node = self.parser.tree.openElements.pop() assert node.name == "noscript", "Expected noscript got %s" % node.name self.parser.phase = self.parser.phases["inHead"] def endTagBr(self, token): self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]}) self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): # Caller must raise parse error first! self.endTagNoscript(impliedTagToken("noscript")) startTagHandler = _utils.MethodDispatcher([ ("html", startTagHtml), (("basefont", "bgsound", "link", "meta", "noframes", "style"), startTagBaseLinkCommand), (("head", "noscript"), startTagHeadNoscript), ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("noscript", endTagNoscript), ("br", endTagBr), ]) endTagHandler.default = endTagOther class AfterHeadPhase(Phase): __slots__ = tuple() def processEOF(self): self.anythingElse() return True def processCharacters(self, token): self.anythingElse() return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagBody(self, token): self.parser.framesetOK = False self.tree.insertElement(token) self.parser.phase = self.parser.phases["inBody"] def startTagFrameset(self, token): self.tree.insertElement(token) self.parser.phase = self.parser.phases["inFrameset"] def startTagFromHead(self, token): self.parser.parseError("unexpected-start-tag-out-of-my-head", {"name": token["name"]}) self.tree.openElements.append(self.tree.headPointer) self.parser.phases["inHead"].processStartTag(token) for node in self.tree.openElements[::-1]: if node.name == "head": self.tree.openElements.remove(node) break def startTagHead(self, token): self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) def startTagOther(self, token): self.anythingElse() return token def endTagHtmlBodyBr(self, token): self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): self.tree.insertElement(impliedTagToken("body", "StartTag")) self.parser.phase = self.parser.phases["inBody"] self.parser.framesetOK = True startTagHandler = _utils.MethodDispatcher([ ("html", startTagHtml), ("body", startTagBody), ("frameset", startTagFrameset), (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", "style", "title"), startTagFromHead), ("head", startTagHead) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"), endTagHtmlBodyBr)]) endTagHandler.default = endTagOther class InBodyPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody # the really-really-really-very crazy mode __slots__ = ("processSpaceCharacters",) def __init__(self, *args, **kwargs): super(InBodyPhase, self).__init__(*args, **kwargs) # Set this to the default handler self.processSpaceCharacters = self.processSpaceCharactersNonPre def isMatchingFormattingElement(self, node1, node2): return (node1.name == node2.name and node1.namespace == node2.namespace and node1.attributes == node2.attributes) # helper def addFormattingElement(self, token): self.tree.insertElement(token) element = self.tree.openElements[-1] matchingElements = [] for node in self.tree.activeFormattingElements[::-1]: if node is Marker: break elif self.isMatchingFormattingElement(node, element): matchingElements.append(node) assert len(matchingElements) <= 3 if len(matchingElements) == 3: self.tree.activeFormattingElements.remove(matchingElements[-1]) self.tree.activeFormattingElements.append(element) # the real deal def processEOF(self): allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td", "tfoot", "th", "thead", "tr", "body", "html")) for node in self.tree.openElements[::-1]: if node.name not in allowed_elements: self.parser.parseError("expected-closing-tag-but-got-eof") break # Stop parsing def processSpaceCharactersDropNewline(self, token): # Sometimes (start of <pre>, <listing>, and <textarea> blocks) we # want to drop leading newlines data = token["data"] self.processSpaceCharacters = self.processSpaceCharactersNonPre if (data.startswith("\n") and self.tree.openElements[-1].name in ("pre", "listing", "textarea") and not self.tree.openElements[-1].hasContent()): data = data[1:] if data: self.tree.reconstructActiveFormattingElements() self.tree.insertText(data) def processCharacters(self, token): if token["data"] == "\u0000": # The tokenizer should always emit null on its own return self.tree.reconstructActiveFormattingElements() self.tree.insertText(token["data"]) # This must be bad for performance if (self.parser.framesetOK and any([char not in spaceCharacters for char in token["data"]])): self.parser.framesetOK = False def processSpaceCharactersNonPre(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertText(token["data"]) def startTagProcessInHead(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagBody(self, token): self.parser.parseError("unexpected-start-tag", {"name": "body"}) if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): assert self.parser.innerHTML else: self.parser.framesetOK = False for attr, value in token["data"].items(): if attr not in self.tree.openElements[1].attributes: self.tree.openElements[1].attributes[attr] = value def startTagFrameset(self, token): self.parser.parseError("unexpected-start-tag", {"name": "frameset"}) if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): assert self.parser.innerHTML elif not self.parser.framesetOK: pass else: if self.tree.openElements[1].parent: self.tree.openElements[1].parent.removeChild(self.tree.openElements[1]) while self.tree.openElements[-1].name != "html": self.tree.openElements.pop() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inFrameset"] def startTagCloseP(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) def startTagPreListing(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.parser.framesetOK = False self.processSpaceCharacters = self.processSpaceCharactersDropNewline def startTagForm(self, token): if self.tree.formPointer: self.parser.parseError("unexpected-start-tag", {"name": "form"}) else: if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.tree.formPointer = self.tree.openElements[-1] def startTagListItem(self, token): self.parser.framesetOK = False stopNamesMap = {"li": ["li"], "dt": ["dt", "dd"], "dd": ["dt", "dd"]} stopNames = stopNamesMap[token["name"]] for node in reversed(self.tree.openElements): if node.name in stopNames: self.parser.phase.processEndTag( impliedTagToken(node.name, "EndTag")) break if (node.nameTuple in specialElements and node.name not in ("address", "div", "p")): break if self.tree.elementInScope("p", variant="button"): self.parser.phase.processEndTag( impliedTagToken("p", "EndTag")) self.tree.insertElement(token) def startTagPlaintext(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.plaintextState def startTagHeading(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) if self.tree.openElements[-1].name in headingElements: self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) self.tree.openElements.pop() self.tree.insertElement(token) def startTagA(self, token): afeAElement = self.tree.elementInActiveFormattingElements("a") if afeAElement: self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "a", "endName": "a"}) self.endTagFormatting(impliedTagToken("a")) if afeAElement in self.tree.openElements: self.tree.openElements.remove(afeAElement) if afeAElement in self.tree.activeFormattingElements: self.tree.activeFormattingElements.remove(afeAElement) self.tree.reconstructActiveFormattingElements() self.addFormattingElement(token) def startTagFormatting(self, token): self.tree.reconstructActiveFormattingElements() self.addFormattingElement(token) def startTagNobr(self, token): self.tree.reconstructActiveFormattingElements() if self.tree.elementInScope("nobr"): self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "nobr", "endName": "nobr"}) self.processEndTag(impliedTagToken("nobr")) # XXX Need tests that trigger the following self.tree.reconstructActiveFormattingElements() self.addFormattingElement(token) def startTagButton(self, token): if self.tree.elementInScope("button"): self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "button", "endName": "button"}) self.processEndTag(impliedTagToken("button")) return token else: self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.parser.framesetOK = False def startTagAppletMarqueeObject(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.tree.activeFormattingElements.append(Marker) self.parser.framesetOK = False def startTagXmp(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.reconstructActiveFormattingElements() self.parser.framesetOK = False self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagTable(self, token): if self.parser.compatMode != "quirks": if self.tree.elementInScope("p", variant="button"): self.processEndTag(impliedTagToken("p")) self.tree.insertElement(token) self.parser.framesetOK = False self.parser.phase = self.parser.phases["inTable"] def startTagVoidFormatting(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True self.parser.framesetOK = False def startTagInput(self, token): framesetOK = self.parser.framesetOK self.startTagVoidFormatting(token) if ("type" in token["data"] and token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): # input type=hidden doesn't change framesetOK self.parser.framesetOK = framesetOK def startTagParamSource(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagHr(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True self.parser.framesetOK = False def startTagImage(self, token): # No really... self.parser.parseError("unexpected-start-tag-treated-as", {"originalName": "image", "newName": "img"}) self.processStartTag(impliedTagToken("img", "StartTag", attributes=token["data"], selfClosing=token["selfClosing"])) def startTagIsIndex(self, token): self.parser.parseError("deprecated-tag", {"name": "isindex"}) if self.tree.formPointer: return form_attrs = {} if "action" in token["data"]: form_attrs["action"] = token["data"]["action"] self.processStartTag(impliedTagToken("form", "StartTag", attributes=form_attrs)) self.processStartTag(impliedTagToken("hr", "StartTag")) self.processStartTag(impliedTagToken("label", "StartTag")) # XXX Localization ... if "prompt" in token["data"]: prompt = token["data"]["prompt"] else: prompt = "This is a searchable index. Enter search keywords: " self.processCharacters( {"type": tokenTypes["Characters"], "data": prompt}) attributes = token["data"].copy() if "action" in attributes: del attributes["action"] if "prompt" in attributes: del attributes["prompt"] attributes["name"] = "isindex" self.processStartTag(impliedTagToken("input", "StartTag", attributes=attributes, selfClosing=token["selfClosing"])) self.processEndTag(impliedTagToken("label")) self.processStartTag(impliedTagToken("hr", "StartTag")) self.processEndTag(impliedTagToken("form")) def startTagTextarea(self, token): self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.rcdataState self.processSpaceCharacters = self.processSpaceCharactersDropNewline self.parser.framesetOK = False def startTagIFrame(self, token): self.parser.framesetOK = False self.startTagRawtext(token) def startTagNoscript(self, token): if self.parser.scripting: self.startTagRawtext(token) else: self.startTagOther(token) def startTagRawtext(self, token): """iframe, noembed noframes, noscript(if scripting enabled)""" self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagOpt(self, token): if self.tree.openElements[-1].name == "option": self.parser.phase.processEndTag(impliedTagToken("option")) self.tree.reconstructActiveFormattingElements() self.parser.tree.insertElement(token) def startTagSelect(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.parser.framesetOK = False if self.parser.phase in (self.parser.phases["inTable"], self.parser.phases["inCaption"], self.parser.phases["inColumnGroup"], self.parser.phases["inTableBody"], self.parser.phases["inRow"], self.parser.phases["inCell"]): self.parser.phase = self.parser.phases["inSelectInTable"] else: self.parser.phase = self.parser.phases["inSelect"] def startTagRpRt(self, token): if self.tree.elementInScope("ruby"): self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != "ruby": self.parser.parseError() self.tree.insertElement(token) def startTagMath(self, token): self.tree.reconstructActiveFormattingElements() self.parser.adjustMathMLAttributes(token) self.parser.adjustForeignAttributes(token) token["namespace"] = namespaces["mathml"] self.tree.insertElement(token) # Need to get the parse error right for the case where the token # has a namespace not equal to the xmlns attribute if token["selfClosing"]: self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagSvg(self, token): self.tree.reconstructActiveFormattingElements() self.parser.adjustSVGAttributes(token) self.parser.adjustForeignAttributes(token) token["namespace"] = namespaces["svg"] self.tree.insertElement(token) # Need to get the parse error right for the case where the token # has a namespace not equal to the xmlns attribute if token["selfClosing"]: self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagMisplaced(self, token): """ Elements that should be children of other elements that have a different insertion mode; here they are ignored "caption", "col", "colgroup", "frame", "frameset", "head", "option", "optgroup", "tbody", "td", "tfoot", "th", "thead", "tr", "noscript" """ self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]}) def startTagOther(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) def endTagP(self, token): if not self.tree.elementInScope("p", variant="button"): self.startTagCloseP(impliedTagToken("p", "StartTag")) self.parser.parseError("unexpected-end-tag", {"name": "p"}) self.endTagP(impliedTagToken("p", "EndTag")) else: self.tree.generateImpliedEndTags("p") if self.tree.openElements[-1].name != "p": self.parser.parseError("unexpected-end-tag", {"name": "p"}) node = self.tree.openElements.pop() while node.name != "p": node = self.tree.openElements.pop() def endTagBody(self, token): if not self.tree.elementInScope("body"): self.parser.parseError() return elif self.tree.openElements[-1].name != "body": for node in self.tree.openElements[2:]: if node.name not in frozenset(("dd", "dt", "li", "optgroup", "option", "p", "rp", "rt", "tbody", "td", "tfoot", "th", "thead", "tr", "body", "html")): # Not sure this is the correct name for the parse error self.parser.parseError( "expected-one-end-tag-but-got-another", {"gotName": "body", "expectedName": node.name}) break self.parser.phase = self.parser.phases["afterBody"] def endTagHtml(self, token): # We repeat the test for the body end tag token being ignored here if self.tree.elementInScope("body"): self.endTagBody(impliedTagToken("body")) return token def endTagBlock(self, token): # Put us back in the right whitespace handling mode if token["name"] == "pre": self.processSpaceCharacters = self.processSpaceCharactersNonPre inScope = self.tree.elementInScope(token["name"]) if inScope: self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("end-tag-too-early", {"name": token["name"]}) if inScope: node = self.tree.openElements.pop() while node.name != token["name"]: node = self.tree.openElements.pop() def endTagForm(self, token): node = self.tree.formPointer self.tree.formPointer = None if node is None or not self.tree.elementInScope(node): self.parser.parseError("unexpected-end-tag", {"name": "form"}) else: self.tree.generateImpliedEndTags() if self.tree.openElements[-1] != node: self.parser.parseError("end-tag-too-early-ignored", {"name": "form"}) self.tree.openElements.remove(node) def endTagListItem(self, token): if token["name"] == "li": variant = "list" else: variant = None if not self.tree.elementInScope(token["name"], variant=variant): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) else: self.tree.generateImpliedEndTags(exclude=token["name"]) if self.tree.openElements[-1].name != token["name"]: self.parser.parseError( "end-tag-too-early", {"name": token["name"]}) node = self.tree.openElements.pop() while node.name != token["name"]: node = self.tree.openElements.pop() def endTagHeading(self, token): for item in headingElements: if self.tree.elementInScope(item): self.tree.generateImpliedEndTags() break if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("end-tag-too-early", {"name": token["name"]}) for item in headingElements: if self.tree.elementInScope(item): item = self.tree.openElements.pop() while item.name not in headingElements: item = self.tree.openElements.pop() break def endTagFormatting(self, token): """The much-feared adoption agency algorithm""" # http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867 # XXX Better parseError messages appreciated. # Step 1 outerLoopCounter = 0 # Step 2 while outerLoopCounter < 8: # Step 3 outerLoopCounter += 1 # Step 4: # Let the formatting element be the last element in # the list of active formatting elements that: # - is between the end of the list and the last scope # marker in the list, if any, or the start of the list # otherwise, and # - has the same tag name as the token. formattingElement = self.tree.elementInActiveFormattingElements( token["name"]) if (not formattingElement or (formattingElement in self.tree.openElements and not self.tree.elementInScope(formattingElement.name))): # If there is no such node, then abort these steps # and instead act as described in the "any other # end tag" entry below. self.endTagOther(token) return # Otherwise, if there is such a node, but that node is # not in the stack of open elements, then this is a # parse error; remove the element from the list, and # abort these steps. elif formattingElement not in self.tree.openElements: self.parser.parseError("adoption-agency-1.2", {"name": token["name"]}) self.tree.activeFormattingElements.remove(formattingElement) return # Otherwise, if there is such a node, and that node is # also in the stack of open elements, but the element # is not in scope, then this is a parse error; ignore # the token, and abort these steps. elif not self.tree.elementInScope(formattingElement.name): self.parser.parseError("adoption-agency-4.4", {"name": token["name"]}) return # Otherwise, there is a formatting element and that # element is in the stack and is in scope. If the # element is not the current node, this is a parse # error. In any case, proceed with the algorithm as # written in the following steps. else: if formattingElement != self.tree.openElements[-1]: self.parser.parseError("adoption-agency-1.3", {"name": token["name"]}) # Step 5: # Let the furthest block be the topmost node in the # stack of open elements that is lower in the stack # than the formatting element, and is an element in # the special category. There might not be one. afeIndex = self.tree.openElements.index(formattingElement) furthestBlock = None for element in self.tree.openElements[afeIndex:]: if element.nameTuple in specialElements: furthestBlock = element break # Step 6: # If there is no furthest block, then the UA must # first pop all the nodes from the bottom of the stack # of open elements, from the current node up to and # including the formatting element, then remove the # formatting element from the list of active # formatting elements, and finally abort these steps. if furthestBlock is None: element = self.tree.openElements.pop() while element != formattingElement: element = self.tree.openElements.pop() self.tree.activeFormattingElements.remove(element) return # Step 7 commonAncestor = self.tree.openElements[afeIndex - 1] # Step 8: # The bookmark is supposed to help us identify where to reinsert # nodes in step 15. We have to ensure that we reinsert nodes after # the node before the active formatting element. Note the bookmark # can move in step 9.7 bookmark = self.tree.activeFormattingElements.index(formattingElement) # Step 9 lastNode = node = furthestBlock innerLoopCounter = 0 index = self.tree.openElements.index(node) while innerLoopCounter < 3: innerLoopCounter += 1 # Node is element before node in open elements index -= 1 node = self.tree.openElements[index] if node not in self.tree.activeFormattingElements: self.tree.openElements.remove(node) continue # Step 9.6 if node == formattingElement: break # Step 9.7 if lastNode == furthestBlock: bookmark = self.tree.activeFormattingElements.index(node) + 1 # Step 9.8 clone = node.cloneNode() # Replace node with clone self.tree.activeFormattingElements[ self.tree.activeFormattingElements.index(node)] = clone self.tree.openElements[ self.tree.openElements.index(node)] = clone node = clone # Step 9.9 # Remove lastNode from its parents, if any if lastNode.parent: lastNode.parent.removeChild(lastNode) node.appendChild(lastNode) # Step 9.10 lastNode = node # Step 10 # Foster parent lastNode if commonAncestor is a # table, tbody, tfoot, thead, or tr we need to foster # parent the lastNode if lastNode.parent: lastNode.parent.removeChild(lastNode) if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")): parent, insertBefore = self.tree.getTableMisnestedNodePosition() parent.insertBefore(lastNode, insertBefore) else: commonAncestor.appendChild(lastNode) # Step 11 clone = formattingElement.cloneNode() # Step 12 furthestBlock.reparentChildren(clone) # Step 13 furthestBlock.appendChild(clone) # Step 14 self.tree.activeFormattingElements.remove(formattingElement) self.tree.activeFormattingElements.insert(bookmark, clone) # Step 15 self.tree.openElements.remove(formattingElement) self.tree.openElements.insert( self.tree.openElements.index(furthestBlock) + 1, clone) def endTagAppletMarqueeObject(self, token): if self.tree.elementInScope(token["name"]): self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("end-tag-too-early", {"name": token["name"]}) if self.tree.elementInScope(token["name"]): element = self.tree.openElements.pop() while element.name != token["name"]: element = self.tree.openElements.pop() self.tree.clearActiveFormattingElements() def endTagBr(self, token): self.parser.parseError("unexpected-end-tag-treated-as", {"originalName": "br", "newName": "br element"}) self.tree.reconstructActiveFormattingElements() self.tree.insertElement(impliedTagToken("br", "StartTag")) self.tree.openElements.pop() def endTagOther(self, token): for node in self.tree.openElements[::-1]: if node.name == token["name"]: self.tree.generateImpliedEndTags(exclude=token["name"]) if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) while self.tree.openElements.pop() != node: pass break else: if node.nameTuple in specialElements: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) break startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), (("base", "basefont", "bgsound", "command", "link", "meta", "script", "style", "title"), startTagProcessInHead), ("body", startTagBody), ("frameset", startTagFrameset), (("address", "article", "aside", "blockquote", "center", "details", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p", "section", "summary", "ul"), startTagCloseP), (headingElements, startTagHeading), (("pre", "listing"), startTagPreListing), ("form", startTagForm), (("li", "dd", "dt"), startTagListItem), ("plaintext", startTagPlaintext), ("a", startTagA), (("b", "big", "code", "em", "font", "i", "s", "small", "strike", "strong", "tt", "u"), startTagFormatting), ("nobr", startTagNobr), ("button", startTagButton), (("applet", "marquee", "object"), startTagAppletMarqueeObject), ("xmp", startTagXmp), ("table", startTagTable), (("area", "br", "embed", "img", "keygen", "wbr"), startTagVoidFormatting), (("param", "source", "track"), startTagParamSource), ("input", startTagInput), ("hr", startTagHr), ("image", startTagImage), ("isindex", startTagIsIndex), ("textarea", startTagTextarea), ("iframe", startTagIFrame), ("noscript", startTagNoscript), (("noembed", "noframes"), startTagRawtext), ("select", startTagSelect), (("rp", "rt"), startTagRpRt), (("option", "optgroup"), startTagOpt), (("math"), startTagMath), (("svg"), startTagSvg), (("caption", "col", "colgroup", "frame", "head", "tbody", "td", "tfoot", "th", "thead", "tr"), startTagMisplaced) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("body", endTagBody), ("html", endTagHtml), (("address", "article", "aside", "blockquote", "button", "center", "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre", "section", "summary", "ul"), endTagBlock), ("form", endTagForm), ("p", endTagP), (("dd", "dt", "li"), endTagListItem), (headingElements, endTagHeading), (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", "strike", "strong", "tt", "u"), endTagFormatting), (("applet", "marquee", "object"), endTagAppletMarqueeObject), ("br", endTagBr), ]) endTagHandler.default = endTagOther class TextPhase(Phase): __slots__ = tuple() def processCharacters(self, token): self.tree.insertText(token["data"]) def processEOF(self): self.parser.parseError("expected-named-closing-tag-but-got-eof", {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() self.parser.phase = self.parser.originalPhase return True def startTagOther(self, token): assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name'] def endTagScript(self, token): node = self.tree.openElements.pop() assert node.name == "script" self.parser.phase = self.parser.originalPhase # The rest of this method is all stuff that only happens if # document.write works def endTagOther(self, token): self.tree.openElements.pop() self.parser.phase = self.parser.originalPhase startTagHandler = _utils.MethodDispatcher([]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("script", endTagScript)]) endTagHandler.default = endTagOther class InTablePhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-table __slots__ = tuple() # helper methods def clearStackToTableContext(self): # "clear the stack back to a table context" while self.tree.openElements[-1].name not in ("table", "html"): # self.parser.parseError("unexpected-implied-end-tag-in-table", # {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() # When the current node is <html> it's an innerHTML case # processing methods def processEOF(self): if self.tree.openElements[-1].name != "html": self.parser.parseError("eof-in-table") else: assert self.parser.innerHTML # Stop parsing def processSpaceCharacters(self, token): originalPhase = self.parser.phase self.parser.phase = self.parser.phases["inTableText"] self.parser.phase.originalPhase = originalPhase self.parser.phase.processSpaceCharacters(token) def processCharacters(self, token): originalPhase = self.parser.phase self.parser.phase = self.parser.phases["inTableText"] self.parser.phase.originalPhase = originalPhase self.parser.phase.processCharacters(token) def insertText(self, token): # If we get here there must be at least one non-whitespace character # Do the table magic! self.tree.insertFromTable = True self.parser.phases["inBody"].processCharacters(token) self.tree.insertFromTable = False def startTagCaption(self, token): self.clearStackToTableContext() self.tree.activeFormattingElements.append(Marker) self.tree.insertElement(token) self.parser.phase = self.parser.phases["inCaption"] def startTagColgroup(self, token): self.clearStackToTableContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inColumnGroup"] def startTagCol(self, token): self.startTagColgroup(impliedTagToken("colgroup", "StartTag")) return token def startTagRowGroup(self, token): self.clearStackToTableContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inTableBody"] def startTagImplyTbody(self, token): self.startTagRowGroup(impliedTagToken("tbody", "StartTag")) return token def startTagTable(self, token): self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "table", "endName": "table"}) self.parser.phase.processEndTag(impliedTagToken("table")) if not self.parser.innerHTML: return token def startTagStyleScript(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagInput(self, token): if ("type" in token["data"] and token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): self.parser.parseError("unexpected-hidden-input-in-table") self.tree.insertElement(token) # XXX associate with form self.tree.openElements.pop() else: self.startTagOther(token) def startTagForm(self, token): self.parser.parseError("unexpected-form-in-table") if self.tree.formPointer is None: self.tree.insertElement(token) self.tree.formPointer = self.tree.openElements[-1] self.tree.openElements.pop() def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]}) # Do the table magic! self.tree.insertFromTable = True self.parser.phases["inBody"].processStartTag(token) self.tree.insertFromTable = False def endTagTable(self, token): if self.tree.elementInScope("table", variant="table"): self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != "table": self.parser.parseError("end-tag-too-early-named", {"gotName": "table", "expectedName": self.tree.openElements[-1].name}) while self.tree.openElements[-1].name != "table": self.tree.openElements.pop() self.tree.openElements.pop() self.parser.resetInsertionMode() else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]}) # Do the table magic! self.tree.insertFromTable = True self.parser.phases["inBody"].processEndTag(token) self.tree.insertFromTable = False startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), ("caption", startTagCaption), ("colgroup", startTagColgroup), ("col", startTagCol), (("tbody", "tfoot", "thead"), startTagRowGroup), (("td", "th", "tr"), startTagImplyTbody), ("table", startTagTable), (("style", "script"), startTagStyleScript), ("input", startTagInput), ("form", startTagForm) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("table", endTagTable), (("body", "caption", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", "thead", "tr"), endTagIgnore) ]) endTagHandler.default = endTagOther class InTableTextPhase(Phase): __slots__ = ("originalPhase", "characterTokens") def __init__(self, *args, **kwargs): super(InTableTextPhase, self).__init__(*args, **kwargs) self.originalPhase = None self.characterTokens = [] def flushCharacters(self): data = "".join([item["data"] for item in self.characterTokens]) if any([item not in spaceCharacters for item in data]): token = {"type": tokenTypes["Characters"], "data": data} self.parser.phases["inTable"].insertText(token) elif data: self.tree.insertText(data) self.characterTokens = [] def processComment(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token def processEOF(self): self.flushCharacters() self.parser.phase = self.originalPhase return True def processCharacters(self, token): if token["data"] == "\u0000": return self.characterTokens.append(token) def processSpaceCharacters(self, token): # pretty sure we should never reach here self.characterTokens.append(token) # assert False def processStartTag(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token def processEndTag(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token class InCaptionPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-caption __slots__ = tuple() def ignoreEndTagCaption(self): return not self.tree.elementInScope("caption", variant="table") def processEOF(self): self.parser.phases["inBody"].processEOF() def processCharacters(self, token): return self.parser.phases["inBody"].processCharacters(token) def startTagTableElement(self, token): self.parser.parseError() # XXX Have to duplicate logic here to find out if the tag is ignored ignoreEndTag = self.ignoreEndTagCaption() self.parser.phase.processEndTag(impliedTagToken("caption")) if not ignoreEndTag: return token def startTagOther(self, token): return self.parser.phases["inBody"].processStartTag(token) def endTagCaption(self, token): if not self.ignoreEndTagCaption(): # AT this code is quite similar to endTagTable in "InTable" self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != "caption": self.parser.parseError("expected-one-end-tag-but-got-another", {"gotName": "caption", "expectedName": self.tree.openElements[-1].name}) while self.tree.openElements[-1].name != "caption": self.tree.openElements.pop() self.tree.openElements.pop() self.tree.clearActiveFormattingElements() self.parser.phase = self.parser.phases["inTable"] else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagTable(self, token): self.parser.parseError() ignoreEndTag = self.ignoreEndTagCaption() self.parser.phase.processEndTag(impliedTagToken("caption")) if not ignoreEndTag: return token def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagOther(self, token): return self.parser.phases["inBody"].processEndTag(token) startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", "thead", "tr"), startTagTableElement) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("caption", endTagCaption), ("table", endTagTable), (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", "thead", "tr"), endTagIgnore) ]) endTagHandler.default = endTagOther class InColumnGroupPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-column __slots__ = tuple() def ignoreEndTagColgroup(self): return self.tree.openElements[-1].name == "html" def processEOF(self): if self.tree.openElements[-1].name == "html": assert self.parser.innerHTML return else: ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return True def processCharacters(self, token): ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return token def startTagCol(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagOther(self, token): ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return token def endTagColgroup(self, token): if self.ignoreEndTagColgroup(): # innerHTML case assert self.parser.innerHTML self.parser.parseError() else: self.tree.openElements.pop() self.parser.phase = self.parser.phases["inTable"] def endTagCol(self, token): self.parser.parseError("no-end-tag", {"name": "col"}) def endTagOther(self, token): ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return token startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), ("col", startTagCol) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("colgroup", endTagColgroup), ("col", endTagCol) ]) endTagHandler.default = endTagOther class InTableBodyPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-table0 __slots__ = tuple() # helper methods def clearStackToTableBodyContext(self): while self.tree.openElements[-1].name not in ("tbody", "tfoot", "thead", "html"): # self.parser.parseError("unexpected-implied-end-tag-in-table", # {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() if self.tree.openElements[-1].name == "html": assert self.parser.innerHTML # the rest def processEOF(self): self.parser.phases["inTable"].processEOF() def processSpaceCharacters(self, token): return self.parser.phases["inTable"].processSpaceCharacters(token) def processCharacters(self, token): return self.parser.phases["inTable"].processCharacters(token) def startTagTr(self, token): self.clearStackToTableBodyContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inRow"] def startTagTableCell(self, token): self.parser.parseError("unexpected-cell-in-table-body", {"name": token["name"]}) self.startTagTr(impliedTagToken("tr", "StartTag")) return token def startTagTableOther(self, token): # XXX AT Any ideas on how to share this with endTagTable? if (self.tree.elementInScope("tbody", variant="table") or self.tree.elementInScope("thead", variant="table") or self.tree.elementInScope("tfoot", variant="table")): self.clearStackToTableBodyContext() self.endTagTableRowGroup( impliedTagToken(self.tree.openElements[-1].name)) return token else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def startTagOther(self, token): return self.parser.phases["inTable"].processStartTag(token) def endTagTableRowGroup(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.clearStackToTableBodyContext() self.tree.openElements.pop() self.parser.phase = self.parser.phases["inTable"] else: self.parser.parseError("unexpected-end-tag-in-table-body", {"name": token["name"]}) def endTagTable(self, token): if (self.tree.elementInScope("tbody", variant="table") or self.tree.elementInScope("thead", variant="table") or self.tree.elementInScope("tfoot", variant="table")): self.clearStackToTableBodyContext() self.endTagTableRowGroup( impliedTagToken(self.tree.openElements[-1].name)) return token else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag-in-table-body", {"name": token["name"]}) def endTagOther(self, token): return self.parser.phases["inTable"].processEndTag(token) startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), ("tr", startTagTr), (("td", "th"), startTagTableCell), (("caption", "col", "colgroup", "tbody", "tfoot", "thead"), startTagTableOther) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ (("tbody", "tfoot", "thead"), endTagTableRowGroup), ("table", endTagTable), (("body", "caption", "col", "colgroup", "html", "td", "th", "tr"), endTagIgnore) ]) endTagHandler.default = endTagOther class InRowPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-row __slots__ = tuple() # helper methods (XXX unify this with other table helper methods) def clearStackToTableRowContext(self): while self.tree.openElements[-1].name not in ("tr", "html"): self.parser.parseError("unexpected-implied-end-tag-in-table-row", {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() def ignoreEndTagTr(self): return not self.tree.elementInScope("tr", variant="table") # the rest def processEOF(self): self.parser.phases["inTable"].processEOF() def processSpaceCharacters(self, token): return self.parser.phases["inTable"].processSpaceCharacters(token) def processCharacters(self, token): return self.parser.phases["inTable"].processCharacters(token) def startTagTableCell(self, token): self.clearStackToTableRowContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inCell"] self.tree.activeFormattingElements.append(Marker) def startTagTableOther(self, token): ignoreEndTag = self.ignoreEndTagTr() self.endTagTr(impliedTagToken("tr")) # XXX how are we sure it's always ignored in the innerHTML case? if not ignoreEndTag: return token def startTagOther(self, token): return self.parser.phases["inTable"].processStartTag(token) def endTagTr(self, token): if not self.ignoreEndTagTr(): self.clearStackToTableRowContext() self.tree.openElements.pop() self.parser.phase = self.parser.phases["inTableBody"] else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagTable(self, token): ignoreEndTag = self.ignoreEndTagTr() self.endTagTr(impliedTagToken("tr")) # Reprocess the current tag if the tr end tag was not ignored # XXX how are we sure it's always ignored in the innerHTML case? if not ignoreEndTag: return token def endTagTableRowGroup(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.endTagTr(impliedTagToken("tr")) return token else: self.parser.parseError() def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag-in-table-row", {"name": token["name"]}) def endTagOther(self, token): return self.parser.phases["inTable"].processEndTag(token) startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), (("td", "th"), startTagTableCell), (("caption", "col", "colgroup", "tbody", "tfoot", "thead", "tr"), startTagTableOther) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("tr", endTagTr), ("table", endTagTable), (("tbody", "tfoot", "thead"), endTagTableRowGroup), (("body", "caption", "col", "colgroup", "html", "td", "th"), endTagIgnore) ]) endTagHandler.default = endTagOther class InCellPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-cell __slots__ = tuple() # helper def closeCell(self): if self.tree.elementInScope("td", variant="table"): self.endTagTableCell(impliedTagToken("td")) elif self.tree.elementInScope("th", variant="table"): self.endTagTableCell(impliedTagToken("th")) # the rest def processEOF(self): self.parser.phases["inBody"].processEOF() def processCharacters(self, token): return self.parser.phases["inBody"].processCharacters(token) def startTagTableOther(self, token): if (self.tree.elementInScope("td", variant="table") or self.tree.elementInScope("th", variant="table")): self.closeCell() return token else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def startTagOther(self, token): return self.parser.phases["inBody"].processStartTag(token) def endTagTableCell(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.tree.generateImpliedEndTags(token["name"]) if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("unexpected-cell-end-tag", {"name": token["name"]}) while True: node = self.tree.openElements.pop() if node.name == token["name"]: break else: self.tree.openElements.pop() self.tree.clearActiveFormattingElements() self.parser.phase = self.parser.phases["inRow"] else: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagImply(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.closeCell() return token else: # sometimes innerHTML case self.parser.parseError() def endTagOther(self, token): return self.parser.phases["inBody"].processEndTag(token) startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", "thead", "tr"), startTagTableOther) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ (("td", "th"), endTagTableCell), (("body", "caption", "col", "colgroup", "html"), endTagIgnore), (("table", "tbody", "tfoot", "thead", "tr"), endTagImply) ]) endTagHandler.default = endTagOther class InSelectPhase(Phase): __slots__ = tuple() # http://www.whatwg.org/specs/web-apps/current-work/#in-select def processEOF(self): if self.tree.openElements[-1].name != "html": self.parser.parseError("eof-in-select") else: assert self.parser.innerHTML def processCharacters(self, token): if token["data"] == "\u0000": return self.tree.insertText(token["data"]) def startTagOption(self, token): # We need to imply </option> if <option> is the current node. if self.tree.openElements[-1].name == "option": self.tree.openElements.pop() self.tree.insertElement(token) def startTagOptgroup(self, token): if self.tree.openElements[-1].name == "option": self.tree.openElements.pop() if self.tree.openElements[-1].name == "optgroup": self.tree.openElements.pop() self.tree.insertElement(token) def startTagSelect(self, token): self.parser.parseError("unexpected-select-in-select") self.endTagSelect(impliedTagToken("select")) def startTagInput(self, token): self.parser.parseError("unexpected-input-in-select") if self.tree.elementInScope("select", variant="select"): self.endTagSelect(impliedTagToken("select")) return token else: assert self.parser.innerHTML def startTagScript(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-in-select", {"name": token["name"]}) def endTagOption(self, token): if self.tree.openElements[-1].name == "option": self.tree.openElements.pop() else: self.parser.parseError("unexpected-end-tag-in-select", {"name": "option"}) def endTagOptgroup(self, token): # </optgroup> implicitly closes <option> if (self.tree.openElements[-1].name == "option" and self.tree.openElements[-2].name == "optgroup"): self.tree.openElements.pop() # It also closes </optgroup> if self.tree.openElements[-1].name == "optgroup": self.tree.openElements.pop() # But nothing else else: self.parser.parseError("unexpected-end-tag-in-select", {"name": "optgroup"}) def endTagSelect(self, token): if self.tree.elementInScope("select", variant="select"): node = self.tree.openElements.pop() while node.name != "select": node = self.tree.openElements.pop() self.parser.resetInsertionMode() else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-in-select", {"name": token["name"]}) startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), ("option", startTagOption), ("optgroup", startTagOptgroup), ("select", startTagSelect), (("input", "keygen", "textarea"), startTagInput), ("script", startTagScript) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("option", endTagOption), ("optgroup", endTagOptgroup), ("select", endTagSelect) ]) endTagHandler.default = endTagOther class InSelectInTablePhase(Phase): __slots__ = tuple() def processEOF(self): self.parser.phases["inSelect"].processEOF() def processCharacters(self, token): return self.parser.phases["inSelect"].processCharacters(token) def startTagTable(self, token): self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]}) self.endTagOther(impliedTagToken("select")) return token def startTagOther(self, token): return self.parser.phases["inSelect"].processStartTag(token) def endTagTable(self, token): self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]}) if self.tree.elementInScope(token["name"], variant="table"): self.endTagOther(impliedTagToken("select")) return token def endTagOther(self, token): return self.parser.phases["inSelect"].processEndTag(token) startTagHandler = _utils.MethodDispatcher([ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), startTagTable) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), endTagTable) ]) endTagHandler.default = endTagOther class InForeignContentPhase(Phase): __slots__ = tuple() breakoutElements = frozenset(["b", "big", "blockquote", "body", "br", "center", "code", "dd", "div", "dl", "dt", "em", "embed", "h1", "h2", "h3", "h4", "h5", "h6", "head", "hr", "i", "img", "li", "listing", "menu", "meta", "nobr", "ol", "p", "pre", "ruby", "s", "small", "span", "strong", "strike", "sub", "sup", "table", "tt", "u", "ul", "var"]) def adjustSVGTagNames(self, token): replacements = {"altglyph": "altGlyph", "altglyphdef": "altGlyphDef", "altglyphitem": "altGlyphItem", "animatecolor": "animateColor", "animatemotion": "animateMotion", "animatetransform": "animateTransform", "clippath": "clipPath", "feblend": "feBlend", "fecolormatrix": "feColorMatrix", "fecomponenttransfer": "feComponentTransfer", "fecomposite": "feComposite", "feconvolvematrix": "feConvolveMatrix", "fediffuselighting": "feDiffuseLighting", "fedisplacementmap": "feDisplacementMap", "fedistantlight": "feDistantLight", "feflood": "feFlood", "fefunca": "feFuncA", "fefuncb": "feFuncB", "fefuncg": "feFuncG", "fefuncr": "feFuncR", "fegaussianblur": "feGaussianBlur", "feimage": "feImage", "femerge": "feMerge", "femergenode": "feMergeNode", "femorphology": "feMorphology", "feoffset": "feOffset", "fepointlight": "fePointLight", "fespecularlighting": "feSpecularLighting", "fespotlight": "feSpotLight", "fetile": "feTile", "feturbulence": "feTurbulence", "foreignobject": "foreignObject", "glyphref": "glyphRef", "lineargradient": "linearGradient", "radialgradient": "radialGradient", "textpath": "textPath"} if token["name"] in replacements: token["name"] = replacements[token["name"]] def processCharacters(self, token): if token["data"] == "\u0000": token["data"] = "\uFFFD" elif (self.parser.framesetOK and any(char not in spaceCharacters for char in token["data"])): self.parser.framesetOK = False Phase.processCharacters(self, token) def processStartTag(self, token): currentNode = self.tree.openElements[-1] if (token["name"] in self.breakoutElements or (token["name"] == "font" and set(token["data"].keys()) & {"color", "face", "size"})): self.parser.parseError("unexpected-html-element-in-foreign-content", {"name": token["name"]}) while (self.tree.openElements[-1].namespace != self.tree.defaultNamespace and not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])): self.tree.openElements.pop() return token else: if currentNode.namespace == namespaces["mathml"]: self.parser.adjustMathMLAttributes(token) elif currentNode.namespace == namespaces["svg"]: self.adjustSVGTagNames(token) self.parser.adjustSVGAttributes(token) self.parser.adjustForeignAttributes(token) token["namespace"] = currentNode.namespace self.tree.insertElement(token) if token["selfClosing"]: self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def processEndTag(self, token): nodeIndex = len(self.tree.openElements) - 1 node = self.tree.openElements[-1] if node.name.translate(asciiUpper2Lower) != token["name"]: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) while True: if node.name.translate(asciiUpper2Lower) == token["name"]: # XXX this isn't in the spec but it seems necessary if self.parser.phase == self.parser.phases["inTableText"]: self.parser.phase.flushCharacters() self.parser.phase = self.parser.phase.originalPhase while self.tree.openElements.pop() != node: assert self.tree.openElements new_token = None break nodeIndex -= 1 node = self.tree.openElements[nodeIndex] if node.namespace != self.tree.defaultNamespace: continue else: new_token = self.parser.phase.processEndTag(token) break return new_token class AfterBodyPhase(Phase): __slots__ = tuple() def processEOF(self): # Stop parsing pass def processComment(self, token): # This is needed because data is to be appended to the <html> element # here and not to whatever is currently open. self.tree.insertComment(token, self.tree.openElements[0]) def processCharacters(self, token): self.parser.parseError("unexpected-char-after-body") self.parser.phase = self.parser.phases["inBody"] return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-after-body", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token def endTagHtml(self, name): if self.parser.innerHTML: self.parser.parseError("unexpected-end-tag-after-body-innerhtml") else: self.parser.phase = self.parser.phases["afterAfterBody"] def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-after-body", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token startTagHandler = _utils.MethodDispatcher([ ("html", startTagHtml) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([("html", endTagHtml)]) endTagHandler.default = endTagOther class InFramesetPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset __slots__ = tuple() def processEOF(self): if self.tree.openElements[-1].name != "html": self.parser.parseError("eof-in-frameset") else: assert self.parser.innerHTML def processCharacters(self, token): self.parser.parseError("unexpected-char-in-frameset") def startTagFrameset(self, token): self.tree.insertElement(token) def startTagFrame(self, token): self.tree.insertElement(token) self.tree.openElements.pop() def startTagNoframes(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-in-frameset", {"name": token["name"]}) def endTagFrameset(self, token): if self.tree.openElements[-1].name == "html": # innerHTML case self.parser.parseError("unexpected-frameset-in-frameset-innerhtml") else: self.tree.openElements.pop() if (not self.parser.innerHTML and self.tree.openElements[-1].name != "frameset"): # If we're not in innerHTML mode and the current node is not a # "frameset" element (anymore) then switch. self.parser.phase = self.parser.phases["afterFrameset"] def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-in-frameset", {"name": token["name"]}) startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), ("frameset", startTagFrameset), ("frame", startTagFrame), ("noframes", startTagNoframes) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("frameset", endTagFrameset) ]) endTagHandler.default = endTagOther class AfterFramesetPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#after3 __slots__ = tuple() def processEOF(self): # Stop parsing pass def processCharacters(self, token): self.parser.parseError("unexpected-char-after-frameset") def startTagNoframes(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-after-frameset", {"name": token["name"]}) def endTagHtml(self, token): self.parser.phase = self.parser.phases["afterAfterFrameset"] def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-after-frameset", {"name": token["name"]}) startTagHandler = _utils.MethodDispatcher([ ("html", Phase.startTagHtml), ("noframes", startTagNoframes) ]) startTagHandler.default = startTagOther endTagHandler = _utils.MethodDispatcher([ ("html", endTagHtml) ]) endTagHandler.default = endTagOther class AfterAfterBodyPhase(Phase): __slots__ = tuple() def processEOF(self): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): return self.parser.phases["inBody"].processSpaceCharacters(token) def processCharacters(self, token): self.parser.parseError("expected-eof-but-got-char") self.parser.phase = self.parser.phases["inBody"] return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("expected-eof-but-got-start-tag", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token def processEndTag(self, token): self.parser.parseError("expected-eof-but-got-end-tag", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token startTagHandler = _utils.MethodDispatcher([ ("html", startTagHtml) ]) startTagHandler.default = startTagOther class AfterAfterFramesetPhase(Phase): __slots__ = tuple() def processEOF(self): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): return self.parser.phases["inBody"].processSpaceCharacters(token) def processCharacters(self, token): self.parser.parseError("expected-eof-but-got-char") def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagNoFrames(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("expected-eof-but-got-start-tag", {"name": token["name"]}) def processEndTag(self, token): self.parser.parseError("expected-eof-but-got-end-tag", {"name": token["name"]}) startTagHandler = _utils.MethodDispatcher([ ("html", startTagHtml), ("noframes", startTagNoFrames) ]) startTagHandler.default = startTagOther # pylint:enable=unused-argument return { "initial": InitialPhase, "beforeHtml": BeforeHtmlPhase, "beforeHead": BeforeHeadPhase, "inHead": InHeadPhase, "inHeadNoscript": InHeadNoscriptPhase, "afterHead": AfterHeadPhase, "inBody": InBodyPhase, "text": TextPhase, "inTable": InTablePhase, "inTableText": InTableTextPhase, "inCaption": InCaptionPhase, "inColumnGroup": InColumnGroupPhase, "inTableBody": InTableBodyPhase, "inRow": InRowPhase, "inCell": InCellPhase, "inSelect": InSelectPhase, "inSelectInTable": InSelectInTablePhase, "inForeignContent": InForeignContentPhase, "afterBody": AfterBodyPhase, "inFrameset": InFramesetPhase, "afterFrameset": AfterFramesetPhase, "afterAfterBody": AfterAfterBodyPhase, "afterAfterFrameset": AfterAfterFramesetPhase, # XXX after after frameset } def adjust_attributes(token, replacements): needs_adjustment = viewkeys(token['data']) & viewkeys(replacements) if needs_adjustment: token['data'] = type(token['data'])((replacements.get(k, k), v) for k, v in token['data'].items()) def impliedTagToken(name, type="EndTag", attributes=None, selfClosing=False): if attributes is None: attributes = {} return {"type": tokenTypes[type], "name": name, "data": attributes, "selfClosing": selfClosing} class ParseError(Exception): """Error in parsed document""" pass
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib/__init__.py
""" HTML parsing library based on the `WHATWG HTML specification <https://whatwg.org/html>`_. The parser is designed to be compatible with existing HTML found in the wild and implements well-defined error recovery that is largely compatible with modern desktop web browsers. Example usage:: from pip._vendor import html5lib with open("my_document.html", "rb") as f: tree = html5lib.parse(f) For convenience, this module re-exports the following names: * :func:`~.html5parser.parse` * :func:`~.html5parser.parseFragment` * :class:`~.html5parser.HTMLParser` * :func:`~.treebuilders.getTreeBuilder` * :func:`~.treewalkers.getTreeWalker` * :func:`~.serializer.serialize` """ from __future__ import absolute_import, division, unicode_literals from .html5parser import HTMLParser, parse, parseFragment from .treebuilders import getTreeBuilder from .treewalkers import getTreeWalker from .serializer import serialize __all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", "getTreeWalker", "serialize"] # this has to be at the top level, see how setup.py parses this #: Distribution version number. __version__ = "1.1"
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib/serializer.py
from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type import re from codecs import register_error, xmlcharrefreplace_errors from .constants import voidElements, booleanAttributes, spaceCharacters from .constants import rcdataElements, entities, xmlEntities from . import treewalkers, _utils from xml.sax.saxutils import escape _quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`" _quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]") _quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n" "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15" "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000" "\u2001\u2002\u2003\u2004\u2005\u2006\u2007" "\u2008\u2009\u200a\u2028\u2029\u202f\u205f" "\u3000]") _encode_entity_map = {} _is_ucs4 = len("\U0010FFFF") == 1 for k, v in list(entities.items()): # skip multi-character entities if ((_is_ucs4 and len(v) > 1) or (not _is_ucs4 and len(v) > 2)): continue if v != "&": if len(v) == 2: v = _utils.surrogatePairToCodepoint(v) else: v = ord(v) if v not in _encode_entity_map or k.islower(): # prefer &lt; over &LT; and similarly for &amp;, &gt;, etc. _encode_entity_map[v] = k def htmlentityreplace_errors(exc): if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): res = [] codepoints = [] skip = False for i, c in enumerate(exc.object[exc.start:exc.end]): if skip: skip = False continue index = i + exc.start if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2]) skip = True else: codepoint = ord(c) codepoints.append(codepoint) for cp in codepoints: e = _encode_entity_map.get(cp) if e: res.append("&") res.append(e) if not e.endswith(";"): res.append(";") else: res.append("&#x%s;" % (hex(cp)[2:])) return ("".join(res), exc.end) else: return xmlcharrefreplace_errors(exc) register_error("htmlentityreplace", htmlentityreplace_errors) def serialize(input, tree="etree", encoding=None, **serializer_opts): """Serializes the input token stream using the specified treewalker :arg input: the token stream to serialize :arg tree: the treewalker to use :arg encoding: the encoding to use :arg serializer_opts: any options to pass to the :py:class:`html5lib.serializer.HTMLSerializer` that gets created :returns: the tree serialized as a string Example: >>> from html5lib.html5parser import parse >>> from html5lib.serializer import serialize >>> token_stream = parse('<html><body><p>Hi!</p></body></html>') >>> serialize(token_stream, omit_optional_tags=False) '<html><head></head><body><p>Hi!</p></body></html>' """ # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding) class HTMLSerializer(object): # attribute quoting options quote_attr_values = "legacy" # be secure by default quote_char = '"' use_best_quote_char = True # tag syntax options omit_optional_tags = True minimize_boolean_attributes = True use_trailing_solidus = False space_before_trailing_solidus = True # escaping options escape_lt_in_attrs = False escape_rcdata = False resolve_entities = True # miscellaneous options alphabetical_attributes = False inject_meta_charset = True strip_whitespace = False sanitize = False options = ("quote_attr_values", "quote_char", "use_best_quote_char", "omit_optional_tags", "minimize_boolean_attributes", "use_trailing_solidus", "space_before_trailing_solidus", "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", "alphabetical_attributes", "inject_meta_charset", "strip_whitespace", "sanitize") def __init__(self, **kwargs): """Initialize HTMLSerializer :arg inject_meta_charset: Whether or not to inject the meta charset. Defaults to ``True``. :arg quote_attr_values: Whether to quote attribute values that don't require quoting per legacy browser behavior (``"legacy"``), when required by the standard (``"spec"``), or always (``"always"``). Defaults to ``"legacy"``. :arg quote_char: Use given quote character for attribute quoting. Defaults to ``"`` which will use double quotes unless attribute value contains a double quote, in which case single quotes are used. :arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute values. Defaults to ``False``. :arg escape_rcdata: Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. Defaults to ``False``. :arg resolve_entities: Whether to resolve named character entities that appear in the source tree. The XML predefined entities &lt; &gt; &amp; &quot; &apos; are unaffected by this setting. Defaults to ``True``. :arg strip_whitespace: Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within ``pre``.) Defaults to ``False``. :arg minimize_boolean_attributes: Shortens boolean attributes to give just the attribute value, for example:: <input disabled="disabled"> becomes:: <input disabled> Defaults to ``True``. :arg use_trailing_solidus: Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g. ``<hr/>``. Defaults to ``False``. :arg space_before_trailing_solidus: Places a space immediately before the closing slash in a tag using a trailing solidus. E.g. ``<hr />``. Requires ``use_trailing_solidus=True``. Defaults to ``True``. :arg sanitize: Strip all unsafe or unknown constructs from output. See :py:class:`html5lib.filters.sanitizer.Filter`. Defaults to ``False``. :arg omit_optional_tags: Omit start/end tags that are optional. Defaults to ``True``. :arg alphabetical_attributes: Reorder attributes to be in alphabetical order. Defaults to ``False``. """ unexpected_args = frozenset(kwargs) - frozenset(self.options) if len(unexpected_args) > 0: raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args))) if 'quote_char' in kwargs: self.use_best_quote_char = False for attr in self.options: setattr(self, attr, kwargs.get(attr, getattr(self, attr))) self.errors = [] self.strict = False def encode(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, "htmlentityreplace") else: return string def encodeStrict(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, "strict") else: return string def serialize(self, treewalker, encoding=None): # pylint:disable=too-many-nested-blocks self.encoding = encoding in_cdata = False self.errors = [] if encoding and self.inject_meta_charset: from .filters.inject_meta_charset import Filter treewalker = Filter(treewalker, encoding) # Alphabetical attributes is here under the assumption that none of # the later filters add or change order of attributes; it needs to be # before the sanitizer so escaped elements come out correctly if self.alphabetical_attributes: from .filters.alphabeticalattributes import Filter treewalker = Filter(treewalker) # WhitespaceFilter should be used before OptionalTagFilter # for maximum efficiently of this latter filter if self.strip_whitespace: from .filters.whitespace import Filter treewalker = Filter(treewalker) if self.sanitize: from .filters.sanitizer import Filter treewalker = Filter(treewalker) if self.omit_optional_tags: from .filters.optionaltags import Filter treewalker = Filter(treewalker) for token in treewalker: type = token["type"] if type == "Doctype": doctype = "<!DOCTYPE %s" % token["name"] if token["publicId"]: doctype += ' PUBLIC "%s"' % token["publicId"] elif token["systemId"]: doctype += " SYSTEM" if token["systemId"]: if token["systemId"].find('"') >= 0: if token["systemId"].find("'") >= 0: self.serializeError("System identifier contains both single and double quote characters") quote_char = "'" else: quote_char = '"' doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) doctype += ">" yield self.encodeStrict(doctype) elif type in ("Characters", "SpaceCharacters"): if type == "SpaceCharacters" or in_cdata: if in_cdata and token["data"].find("</") >= 0: self.serializeError("Unexpected </ in CDATA") yield self.encode(token["data"]) else: yield self.encode(escape(token["data"])) elif type in ("StartTag", "EmptyTag"): name = token["name"] yield self.encodeStrict("<%s" % name) if name in rcdataElements and not self.escape_rcdata: in_cdata = True elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") for (_, attr_name), attr_value in token["data"].items(): # TODO: Add namespace support here k = attr_name v = attr_value yield self.encodeStrict(' ') yield self.encodeStrict(k) if not self.minimize_boolean_attributes or \ (k not in booleanAttributes.get(name, tuple()) and k not in booleanAttributes.get("", tuple())): yield self.encodeStrict("=") if self.quote_attr_values == "always" or len(v) == 0: quote_attr = True elif self.quote_attr_values == "spec": quote_attr = _quoteAttributeSpec.search(v) is not None elif self.quote_attr_values == "legacy": quote_attr = _quoteAttributeLegacy.search(v) is not None else: raise ValueError("quote_attr_values must be one of: " "'always', 'spec', or 'legacy'") v = v.replace("&", "&amp;") if self.escape_lt_in_attrs: v = v.replace("<", "&lt;") if quote_attr: quote_char = self.quote_char if self.use_best_quote_char: if "'" in v and '"' not in v: quote_char = '"' elif '"' in v and "'" not in v: quote_char = "'" if quote_char == "'": v = v.replace("'", "&#39;") else: v = v.replace('"', "&quot;") yield self.encodeStrict(quote_char) yield self.encode(v) yield self.encodeStrict(quote_char) else: yield self.encode(v) if name in voidElements and self.use_trailing_solidus: if self.space_before_trailing_solidus: yield self.encodeStrict(" /") else: yield self.encodeStrict("/") yield self.encode(">") elif type == "EndTag": name = token["name"] if name in rcdataElements: in_cdata = False elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") yield self.encodeStrict("</%s>" % name) elif type == "Comment": data = token["data"] if data.find("--") >= 0: self.serializeError("Comment contains --") yield self.encodeStrict("<!--%s-->" % token["data"]) elif type == "Entity": name = token["name"] key = name + ";" if key not in entities: self.serializeError("Entity %s not recognized" % name) if self.resolve_entities and key not in xmlEntities: data = entities[key] else: data = "&%s;" % name yield self.encodeStrict(data) else: self.serializeError(token["data"]) def render(self, treewalker, encoding=None): """Serializes the stream from the treewalker into a string :arg treewalker: the treewalker to serialize :arg encoding: the string encoding to use :returns: the serialized tree Example: >>> from html5lib import parse, getTreeWalker >>> from html5lib.serializer import HTMLSerializer >>> token_stream = parse('<html><body>Hi!</body></html>') >>> walker = getTreeWalker('etree') >>> serializer = HTMLSerializer(omit_optional_tags=False) >>> serializer.render(walker(token_stream)) '<html><head></head><body>Hi!</body></html>' """ if encoding: return b"".join(list(self.serialize(treewalker, encoding))) else: return "".join(list(self.serialize(treewalker))) def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): # XXX The idea is to make data mandatory. self.errors.append(data) if self.strict: raise SerializeError class SerializeError(Exception): """Error in serialized tree""" pass
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py
from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import unichr as chr from collections import deque, OrderedDict from sys import version_info from .constants import spaceCharacters from .constants import entities from .constants import asciiLetters, asciiUpper2Lower from .constants import digits, hexDigits, EOF from .constants import tokenTypes, tagTokenTypes from .constants import replacementCharacters from ._inputstream import HTMLInputStream from ._trie import Trie entitiesTrie = Trie(entities) if version_info >= (3, 7): attributeMap = dict else: attributeMap = OrderedDict class HTMLTokenizer(object): """ This class takes care of tokenizing HTML. * self.currentToken Holds the token that is currently being processed. * self.state Holds a reference to the method to be invoked... XXX * self.stream Points to HTMLInputStream object. """ def __init__(self, stream, parser=None, **kwargs): self.stream = HTMLInputStream(stream, **kwargs) self.parser = parser # Setup the initial tokenizer state self.escapeFlag = False self.lastFourChars = [] self.state = self.dataState self.escape = False # The current token being created self.currentToken = None super(HTMLTokenizer, self).__init__() def __iter__(self): """ This is where the magic happens. We do our usually processing through the states and when we have a token to return we yield the token which pauses processing until the next token is requested. """ self.tokenQueue = deque([]) # Start processing. When EOF is reached self.state will return False # instead of True and the loop will terminate. while self.state(): while self.stream.errors: yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} while self.tokenQueue: yield self.tokenQueue.popleft() def consumeNumberEntity(self, isHex): """This function returns either U+FFFD or the character based on the decimal or hexadecimal representation. It also discards ";" if present. If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. """ allowed = digits radix = 10 if isHex: allowed = hexDigits radix = 16 charStack = [] # Consume all the characters that are in range while making sure we # don't hit an EOF. c = self.stream.char() while c in allowed and c is not EOF: charStack.append(c) c = self.stream.char() # Convert the set of characters consumed to an int. charAsInt = int("".join(charStack), radix) # Certain characters get replaced with others if charAsInt in replacementCharacters: char = replacementCharacters[charAsInt] self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) elif ((0xD800 <= charAsInt <= 0xDFFF) or (charAsInt > 0x10FFFF)): char = "\uFFFD" self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) else: # Should speed up this check somehow (e.g. move the set to a constant) if ((0x0001 <= charAsInt <= 0x0008) or (0x000E <= charAsInt <= 0x001F) or (0x007F <= charAsInt <= 0x009F) or (0xFDD0 <= charAsInt <= 0xFDEF) or charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF])): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) try: # Try/except needed as UCS-2 Python builds' unichar only works # within the BMP. char = chr(charAsInt) except ValueError: v = charAsInt - 0x10000 char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) # Discard the ; if present. Otherwise, put it back on the queue and # invoke parseError on parser. if c != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "numeric-entity-without-semicolon"}) self.stream.unget(c) return char def consumeEntity(self, allowedChar=None, fromAttribute=False): # Initialise to the default output for when no entity is matched output = "&" charStack = [self.stream.char()] if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or (allowedChar is not None and allowedChar == charStack[0])): self.stream.unget(charStack[0]) elif charStack[0] == "#": # Read the next character to see if it's hex or decimal hex = False charStack.append(self.stream.char()) if charStack[-1] in ("x", "X"): hex = True charStack.append(self.stream.char()) # charStack[-1] should be the first digit if (hex and charStack[-1] in hexDigits) \ or (not hex and charStack[-1] in digits): # At least one digit found, so consume the whole number self.stream.unget(charStack[-1]) output = self.consumeNumberEntity(hex) else: # No digits found self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-numeric-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: # At this point in the process might have named entity. Entities # are stored in the global variable "entities". # # Consume characters and compare to these to a substring of the # entity names in the list until the substring no longer matches. while (charStack[-1] is not EOF): if not entitiesTrie.has_keys_with_prefix("".join(charStack)): break charStack.append(self.stream.char()) # At this point we have a string that starts with some characters # that may match an entity # Try to find the longest entity the string will match to take care # of &noti for instance. try: entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) entityLength = len(entityName) except KeyError: entityName = None if entityName is not None: if entityName[-1] != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "named-entity-without-semicolon"}) if (entityName[-1] != ";" and fromAttribute and (charStack[entityLength] in asciiLetters or charStack[entityLength] in digits or charStack[entityLength] == "=")): self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: output = entities[entityName] self.stream.unget(charStack.pop()) output += "".join(charStack[entityLength:]) else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-named-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) if fromAttribute: self.currentToken["data"][-1][1] += output else: if output in spaceCharacters: tokenType = "SpaceCharacters" else: tokenType = "Characters" self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) def processEntityInAttribute(self, allowedChar): """This method replaces the need for "entityInAttributeValueState". """ self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) def emitCurrentToken(self): """This method is a generic handler for emitting the tags. It also sets the state to "data" because that's what's needed after a token has been emitted. """ token = self.currentToken # Add token to the queue to be yielded if (token["type"] in tagTokenTypes): token["name"] = token["name"].translate(asciiUpper2Lower) if token["type"] == tokenTypes["StartTag"]: raw = token["data"] data = attributeMap(raw) if len(raw) > len(data): # we had some duplicated attribute, fix so first wins data.update(raw[::-1]) token["data"] = data if token["type"] == tokenTypes["EndTag"]: if token["data"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "attributes-in-end-tag"}) if token["selfClosing"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "self-closing-flag-on-end-tag"}) self.tokenQueue.append(token) self.state = self.dataState # Below are the various tokenizer states worked out. def dataState(self): data = self.stream.char() if data == "&": self.state = self.entityDataState elif data == "<": self.state = self.tagOpenState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\u0000"}) elif data is EOF: # Tokenization ends. return False elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def entityDataState(self): self.consumeEntity() self.state = self.dataState return True def rcdataState(self): data = self.stream.char() if data == "&": self.state = self.characterReferenceInRcdata elif data == "<": self.state = self.rcdataLessThanSignState elif data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def characterReferenceInRcdata(self): self.consumeEntity() self.state = self.rcdataState return True def rawtextState(self): data = self.stream.char() if data == "<": self.state = self.rawtextLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataState(self): data = self.stream.char() if data == "<": self.state = self.scriptDataLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def plaintextState(self): data = self.stream.char() if data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + self.stream.charsUntil("\u0000")}) return True def tagOpenState(self): data = self.stream.char() if data == "!": self.state = self.markupDeclarationOpenState elif data == "/": self.state = self.closeTagOpenState elif data in asciiLetters: self.currentToken = {"type": tokenTypes["StartTag"], "name": data, "data": [], "selfClosing": False, "selfClosingAcknowledged": False} self.state = self.tagNameState elif data == ">": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-right-bracket"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) self.state = self.dataState elif data == "?": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-question-mark"}) self.stream.unget(data) self.state = self.bogusCommentState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.dataState return True def closeTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.currentToken = {"type": tokenTypes["EndTag"], "name": data, "data": [], "selfClosing": False} self.state = self.tagNameState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-right-bracket"}) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-eof"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.state = self.dataState else: # XXX data can be _'_... self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-char", "datavars": {"data": data}}) self.stream.unget(data) self.state = self.bogusCommentState return True def tagNameState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-tag-name"}) self.state = self.dataState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" else: self.currentToken["name"] += data # (Don't use charsUntil here, because tag names are # very short and it's faster to not do anything fancy) return True def rcdataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rcdataEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rcdataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rcdataState return True def rawtextLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rawtextEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rawtextEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rawtextState return True def scriptDataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEndTagOpenState elif data == "!": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"}) self.state = self.scriptDataEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.scriptDataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapeStartDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.state = self.dataState else: chars = self.stream.charsUntil(("<", "-", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEscapedEndTagOpenState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) self.temporaryBuffer = data self.state = self.scriptDataDoubleEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer = data self.state = self.scriptDataEscapedEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapeStartState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataDoubleEscapedState else: self.state = self.scriptDataEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) return True def scriptDataDoubleEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) self.temporaryBuffer = "" self.state = self.scriptDataDoubleEscapeEndState else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapeEndState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataEscapedState else: self.state = self.scriptDataDoubleEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def beforeAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data in ("'", '"', "=", "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-name-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def attributeNameState(self): data = self.stream.char() leavingThisState = True emitToken = False if data == "=": self.state = self.beforeAttributeValueState elif data in asciiLetters: self.currentToken["data"][-1][0] += data +\ self.stream.charsUntil(asciiLetters, True) leavingThisState = False elif data == ">": # XXX If we emit here the attributes are converted to a dict # without being checked and when the code below runs we error # because data is a dict not a list emitToken = True elif data in spaceCharacters: self.state = self.afterAttributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][0] += "\uFFFD" leavingThisState = False elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"][-1][0] += data leavingThisState = False elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-name"}) self.state = self.dataState else: self.currentToken["data"][-1][0] += data leavingThisState = False if leavingThisState: # Attributes are not dropped at this stage. That happens when the # start tag token is emitted so values can still be safely appended # to attributes, but we do want to report the parse error in time. self.currentToken["data"][-1][0] = ( self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) for name, _ in self.currentToken["data"][:-1]: if self.currentToken["data"][-1][0] == name: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "duplicate-attribute"}) break # XXX Fix for above XXX if emitToken: self.emitCurrentToken() return True def afterAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "=": self.state = self.beforeAttributeValueState elif data == ">": self.emitCurrentToken() elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-after-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-end-of-tag-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def beforeAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "\"": self.state = self.attributeValueDoubleQuotedState elif data == "&": self.state = self.attributeValueUnQuotedState self.stream.unget(data) elif data == "'": self.state = self.attributeValueSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-right-bracket"}) self.emitCurrentToken() elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" self.state = self.attributeValueUnQuotedState elif data in ("=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "equals-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState return True def attributeValueDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute('"') elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-double-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("\"", "&", "\u0000")) return True def attributeValueSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute("'") elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-single-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("'", "&", "\u0000")) return True def attributeValueUnQuotedState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == "&": self.processEntityInAttribute(">") elif data == ">": self.emitCurrentToken() elif data in ('"', "'", "=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-no-quotes"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data + self.stream.charsUntil( frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) return True def afterAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-attribute-value"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-attribute-value"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def selfClosingStartTagState(self): data = self.stream.char() if data == ">": self.currentToken["selfClosing"] = True self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def bogusCommentState(self): # Make a new comment token and give it as value all the characters # until the first > or EOF (charsUntil checks for EOF automatically) # and emit it. data = self.stream.charsUntil(">") data = data.replace("\u0000", "\uFFFD") self.tokenQueue.append( {"type": tokenTypes["Comment"], "data": data}) # Eat the character directly after the bogus comment which is either a # ">" or an EOF. self.stream.char() self.state = self.dataState return True def markupDeclarationOpenState(self): charStack = [self.stream.char()] if charStack[-1] == "-": charStack.append(self.stream.char()) if charStack[-1] == "-": self.currentToken = {"type": tokenTypes["Comment"], "data": ""} self.state = self.commentStartState return True elif charStack[-1] in ('d', 'D'): matched = True for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), ('y', 'Y'), ('p', 'P'), ('e', 'E')): charStack.append(self.stream.char()) if charStack[-1] not in expected: matched = False break if matched: self.currentToken = {"type": tokenTypes["Doctype"], "name": "", "publicId": None, "systemId": None, "correct": True} self.state = self.doctypeState return True elif (charStack[-1] == "[" and self.parser is not None and self.parser.tree.openElements and self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): matched = True for expected in ["C", "D", "A", "T", "A", "["]: charStack.append(self.stream.char()) if charStack[-1] != expected: matched = False break if matched: self.state = self.cdataSectionState return True self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-dashes-or-doctype"}) while charStack: self.stream.unget(charStack.pop()) self.state = self.bogusCommentState return True def commentStartState(self): data = self.stream.char() if data == "-": self.state = self.commentStartDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data self.state = self.commentState return True def commentStartDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentState(self): data = self.stream.char() if data == "-": self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data + \ self.stream.charsUntil(("-", "\u0000")) return True def commentEndDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentEndState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--\uFFFD" self.state = self.commentState elif data == "!": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-bang-after-double-dash-in-comment"}) self.state = self.commentEndBangState elif data == "-": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-dash-after-double-dash-in-comment"}) self.currentToken["data"] += data elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-double-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-comment"}) self.currentToken["data"] += "--" + data self.state = self.commentState return True def commentEndBangState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "-": self.currentToken["data"] += "--!" self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--!\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-bang-state"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "--!" + data self.state = self.commentState return True def doctypeState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "need-space-after-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeNameState return True def beforeDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-right-bracket"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] = "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] = data self.state = self.doctypeNameState return True def doctypeNameState(self): data = self.stream.char() if data in spaceCharacters: self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.state = self.afterDoctypeNameState elif data == ">": self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype-name"}) self.currentToken["correct"] = False self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] += data return True def afterDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.currentToken["correct"] = False self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: if data in ("p", "P"): matched = True for expected in (("u", "U"), ("b", "B"), ("l", "L"), ("i", "I"), ("c", "C")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypePublicKeywordState return True elif data in ("s", "S"): matched = True for expected in (("y", "Y"), ("s", "S"), ("t", "T"), ("e", "E"), ("m", "M")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypeSystemKeywordState return True # All the characters read before the current 'data' will be # [a-zA-Z], so they're garbage in the bogus doctype and can be # discarded; only the latest character might be '>' or EOF # and needs to be ungetted self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-space-or-right-bracket-in-doctype", "datavars": {"data": data}}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypePublicKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypePublicIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState return True def beforeDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierDoubleQuotedState elif data == "'": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypePublicIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def doctypePublicIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def afterDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.betweenDoctypePublicAndSystemIdentifiersState elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def betweenDoctypePublicAndSystemIdentifiersState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypeSystemKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeSystemIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState return True def beforeDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypeSystemIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def doctypeSystemIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def afterDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.state = self.bogusDoctypeState return True def bogusDoctypeState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: # XXX EMIT self.stream.unget(data) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: pass return True def cdataSectionState(self): data = [] while True: data.append(self.stream.charsUntil("]")) data.append(self.stream.charsUntil(">")) char = self.stream.char() if char == EOF: break else: assert char == ">" if data[-1][-2:] == "]]": data[-1] = data[-1][:-2] break else: data.append(char) data = "".join(data) # pylint:disable=redefined-variable-type # Deal with null here rather than in the parser nullCount = data.count("\u0000") if nullCount > 0: for _ in range(nullCount): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) data = data.replace("\u0000", "\uFFFD") if data: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.dataState return True
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib/_utils.py
from __future__ import absolute_import, division, unicode_literals from types import ModuleType try: from collections.abc import Mapping except ImportError: from collections import Mapping from pip._vendor.six import text_type, PY3 if PY3: import xml.etree.ElementTree as default_etree else: try: import xml.etree.cElementTree as default_etree except ImportError: import xml.etree.ElementTree as default_etree __all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair", "surrogatePairToCodepoint", "moduleFactoryFactory", "supports_lone_surrogates"] # Platforms not supporting lone surrogates (\uD800-\uDFFF) should be # caught by the below test. In general this would be any platform # using UTF-16 as its encoding of unicode strings, such as # Jython. This is because UTF-16 itself is based on the use of such # surrogates, and there is no mechanism to further escape such # escapes. try: _x = eval('"\\uD800"') # pylint:disable=eval-used if not isinstance(_x, text_type): # We need this with u"" because of http://bugs.jython.org/issue2039 _x = eval('u"\\uD800"') # pylint:disable=eval-used assert isinstance(_x, text_type) except Exception: supports_lone_surrogates = False else: supports_lone_surrogates = True class MethodDispatcher(dict): """Dict with 2 special properties: On initiation, keys that are lists, sets or tuples are converted to multiple keys so accessing any one of the items in the original list-like object returns the matching value md = MethodDispatcher({("foo", "bar"):"baz"}) md["foo"] == "baz" A default value which can be set through the default attribute. """ def __init__(self, items=()): _dictEntries = [] for name, value in items: if isinstance(name, (list, tuple, frozenset, set)): for item in name: _dictEntries.append((item, value)) else: _dictEntries.append((name, value)) dict.__init__(self, _dictEntries) assert len(self) == len(_dictEntries) self.default = None def __getitem__(self, key): return dict.get(self, key, self.default) def __get__(self, instance, owner=None): return BoundMethodDispatcher(instance, self) class BoundMethodDispatcher(Mapping): """Wraps a MethodDispatcher, binding its return values to `instance`""" def __init__(self, instance, dispatcher): self.instance = instance self.dispatcher = dispatcher def __getitem__(self, key): # see https://docs.python.org/3/reference/datamodel.html#object.__get__ # on a function, __get__ is used to bind a function to an instance as a bound method return self.dispatcher[key].__get__(self.instance) def get(self, key, default): if key in self.dispatcher: return self[key] else: return default def __iter__(self): return iter(self.dispatcher) def __len__(self): return len(self.dispatcher) def __contains__(self, key): return key in self.dispatcher # Some utility functions to deal with weirdness around UCS2 vs UCS4 # python builds def isSurrogatePair(data): return (len(data) == 2 and ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF) def surrogatePairToCodepoint(data): char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 + (ord(data[1]) - 0xDC00)) return char_val # Module Factory Factory (no, this isn't Java, I know) # Here to stop this being duplicated all over the place. def moduleFactoryFactory(factory): moduleCache = {} def moduleFactory(baseModule, *args, **kwargs): if isinstance(ModuleType.__name__, type("")): name = "_%s_factory" % baseModule.__name__ else: name = b"_%s_factory" % baseModule.__name__ kwargs_tuple = tuple(kwargs.items()) try: return moduleCache[name][args][kwargs_tuple] except KeyError: mod = ModuleType(name) objs = factory(baseModule, *args, **kwargs) mod.__dict__.update(objs) if "name" not in moduleCache: moduleCache[name] = {} if "args" not in moduleCache[name]: moduleCache[name][args] = {} if "kwargs" not in moduleCache[name][args]: moduleCache[name][args][kwargs_tuple] = {} moduleCache[name][args][kwargs_tuple] = mod return mod return moduleFactory def memoize(func): cache = {} def wrapped(*args, **kwargs): key = (tuple(args), tuple(kwargs.items())) if key not in cache: cache[key] = func(*args, **kwargs) return cache[key] return wrapped
0
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib
qxf2_public_repos/what-is-confusing-backend/venv/Lib/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
"""Deprecated from html5lib 1.1. See `here <https://github.com/html5lib/html5lib-python/issues/443>`_ for information about its deprecation; `Bleach <https://github.com/mozilla/bleach>`_ is recommended as a replacement. Please let us know in the aforementioned issue if Bleach is unsuitable for your needs. """ from __future__ import absolute_import, division, unicode_literals import re import warnings from xml.sax.saxutils import escape, unescape from pip._vendor.six.moves import urllib_parse as urlparse from . import base from ..constants import namespaces, prefixes __all__ = ["Filter"] _deprecation_msg = ( "html5lib's sanitizer is deprecated; see " + "https://github.com/html5lib/html5lib-python/issues/443 and please let " + "us know if Bleach is unsuitable for your needs" ) warnings.warn(_deprecation_msg, DeprecationWarning) allowed_elements = frozenset(( (namespaces['html'], 'a'), (namespaces['html'], 'abbr'), (namespaces['html'], 'acronym'), (namespaces['html'], 'address'), (namespaces['html'], 'area'), (namespaces['html'], 'article'), (namespaces['html'], 'aside'), (namespaces['html'], 'audio'), (namespaces['html'], 'b'), (namespaces['html'], 'big'), (namespaces['html'], 'blockquote'), (namespaces['html'], 'br'), (namespaces['html'], 'button'), (namespaces['html'], 'canvas'), (namespaces['html'], 'caption'), (namespaces['html'], 'center'), (namespaces['html'], 'cite'), (namespaces['html'], 'code'), (namespaces['html'], 'col'), (namespaces['html'], 'colgroup'), (namespaces['html'], 'command'), (namespaces['html'], 'datagrid'), (namespaces['html'], 'datalist'), (namespaces['html'], 'dd'), (namespaces['html'], 'del'), (namespaces['html'], 'details'), (namespaces['html'], 'dfn'), (namespaces['html'], 'dialog'), (namespaces['html'], 'dir'), (namespaces['html'], 'div'), (namespaces['html'], 'dl'), (namespaces['html'], 'dt'), (namespaces['html'], 'em'), (namespaces['html'], 'event-source'), (namespaces['html'], 'fieldset'), (namespaces['html'], 'figcaption'), (namespaces['html'], 'figure'), (namespaces['html'], 'footer'), (namespaces['html'], 'font'), (namespaces['html'], 'form'), (namespaces['html'], 'header'), (namespaces['html'], 'h1'), (namespaces['html'], 'h2'), (namespaces['html'], 'h3'), (namespaces['html'], 'h4'), (namespaces['html'], 'h5'), (namespaces['html'], 'h6'), (namespaces['html'], 'hr'), (namespaces['html'], 'i'), (namespaces['html'], 'img'), (namespaces['html'], 'input'), (namespaces['html'], 'ins'), (namespaces['html'], 'keygen'), (namespaces['html'], 'kbd'), (namespaces['html'], 'label'), (namespaces['html'], 'legend'), (namespaces['html'], 'li'), (namespaces['html'], 'm'), (namespaces['html'], 'map'), (namespaces['html'], 'menu'), (namespaces['html'], 'meter'), (namespaces['html'], 'multicol'), (namespaces['html'], 'nav'), (namespaces['html'], 'nextid'), (namespaces['html'], 'ol'), (namespaces['html'], 'output'), (namespaces['html'], 'optgroup'), (namespaces['html'], 'option'), (namespaces['html'], 'p'), (namespaces['html'], 'pre'), (namespaces['html'], 'progress'), (namespaces['html'], 'q'), (namespaces['html'], 's'), (namespaces['html'], 'samp'), (namespaces['html'], 'section'), (namespaces['html'], 'select'), (namespaces['html'], 'small'), (namespaces['html'], 'sound'), (namespaces['html'], 'source'), (namespaces['html'], 'spacer'), (namespaces['html'], 'span'), (namespaces['html'], 'strike'), (namespaces['html'], 'strong'), (namespaces['html'], 'sub'), (namespaces['html'], 'sup'), (namespaces['html'], 'table'), (namespaces['html'], 'tbody'), (namespaces['html'], 'td'), (namespaces['html'], 'textarea'), (namespaces['html'], 'time'), (namespaces['html'], 'tfoot'), (namespaces['html'], 'th'), (namespaces['html'], 'thead'), (namespaces['html'], 'tr'), (namespaces['html'], 'tt'), (namespaces['html'], 'u'), (namespaces['html'], 'ul'), (namespaces['html'], 'var'), (namespaces['html'], 'video'), (namespaces['mathml'], 'maction'), (namespaces['mathml'], 'math'), (namespaces['mathml'], 'merror'), (namespaces['mathml'], 'mfrac'), (namespaces['mathml'], 'mi'), (namespaces['mathml'], 'mmultiscripts'), (namespaces['mathml'], 'mn'), (namespaces['mathml'], 'mo'), (namespaces['mathml'], 'mover'), (namespaces['mathml'], 'mpadded'), (namespaces['mathml'], 'mphantom'), (namespaces['mathml'], 'mprescripts'), (namespaces['mathml'], 'mroot'), (namespaces['mathml'], 'mrow'), (namespaces['mathml'], 'mspace'), (namespaces['mathml'], 'msqrt'), (namespaces['mathml'], 'mstyle'), (namespaces['mathml'], 'msub'), (namespaces['mathml'], 'msubsup'), (namespaces['mathml'], 'msup'), (namespaces['mathml'], 'mtable'), (namespaces['mathml'], 'mtd'), (namespaces['mathml'], 'mtext'), (namespaces['mathml'], 'mtr'), (namespaces['mathml'], 'munder'), (namespaces['mathml'], 'munderover'), (namespaces['mathml'], 'none'), (namespaces['svg'], 'a'), (namespaces['svg'], 'animate'), (namespaces['svg'], 'animateColor'), (namespaces['svg'], 'animateMotion'), (namespaces['svg'], 'animateTransform'), (namespaces['svg'], 'clipPath'), (namespaces['svg'], 'circle'), (namespaces['svg'], 'defs'), (namespaces['svg'], 'desc'), (namespaces['svg'], 'ellipse'), (namespaces['svg'], 'font-face'), (namespaces['svg'], 'font-face-name'), (namespaces['svg'], 'font-face-src'), (namespaces['svg'], 'g'), (namespaces['svg'], 'glyph'), (namespaces['svg'], 'hkern'), (namespaces['svg'], 'linearGradient'), (namespaces['svg'], 'line'), (namespaces['svg'], 'marker'), (namespaces['svg'], 'metadata'), (namespaces['svg'], 'missing-glyph'), (namespaces['svg'], 'mpath'), (namespaces['svg'], 'path'), (namespaces['svg'], 'polygon'), (namespaces['svg'], 'polyline'), (namespaces['svg'], 'radialGradient'), (namespaces['svg'], 'rect'), (namespaces['svg'], 'set'), (namespaces['svg'], 'stop'), (namespaces['svg'], 'svg'), (namespaces['svg'], 'switch'), (namespaces['svg'], 'text'), (namespaces['svg'], 'title'), (namespaces['svg'], 'tspan'), (namespaces['svg'], 'use'), )) allowed_attributes = frozenset(( # HTML attributes (None, 'abbr'), (None, 'accept'), (None, 'accept-charset'), (None, 'accesskey'), (None, 'action'), (None, 'align'), (None, 'alt'), (None, 'autocomplete'), (None, 'autofocus'), (None, 'axis'), (None, 'background'), (None, 'balance'), (None, 'bgcolor'), (None, 'bgproperties'), (None, 'border'), (None, 'bordercolor'), (None, 'bordercolordark'), (None, 'bordercolorlight'), (None, 'bottompadding'), (None, 'cellpadding'), (None, 'cellspacing'), (None, 'ch'), (None, 'challenge'), (None, 'char'), (None, 'charoff'), (None, 'choff'), (None, 'charset'), (None, 'checked'), (None, 'cite'), (None, 'class'), (None, 'clear'), (None, 'color'), (None, 'cols'), (None, 'colspan'), (None, 'compact'), (None, 'contenteditable'), (None, 'controls'), (None, 'coords'), (None, 'data'), (None, 'datafld'), (None, 'datapagesize'), (None, 'datasrc'), (None, 'datetime'), (None, 'default'), (None, 'delay'), (None, 'dir'), (None, 'disabled'), (None, 'draggable'), (None, 'dynsrc'), (None, 'enctype'), (None, 'end'), (None, 'face'), (None, 'for'), (None, 'form'), (None, 'frame'), (None, 'galleryimg'), (None, 'gutter'), (None, 'headers'), (None, 'height'), (None, 'hidefocus'), (None, 'hidden'), (None, 'high'), (None, 'href'), (None, 'hreflang'), (None, 'hspace'), (None, 'icon'), (None, 'id'), (None, 'inputmode'), (None, 'ismap'), (None, 'keytype'), (None, 'label'), (None, 'leftspacing'), (None, 'lang'), (None, 'list'), (None, 'longdesc'), (None, 'loop'), (None, 'loopcount'), (None, 'loopend'), (None, 'loopstart'), (None, 'low'), (None, 'lowsrc'), (None, 'max'), (None, 'maxlength'), (None, 'media'), (None, 'method'), (None, 'min'), (None, 'multiple'), (None, 'name'), (None, 'nohref'), (None, 'noshade'), (None, 'nowrap'), (None, 'open'), (None, 'optimum'), (None, 'pattern'), (None, 'ping'), (None, 'point-size'), (None, 'poster'), (None, 'pqg'), (None, 'preload'), (None, 'prompt'), (None, 'radiogroup'), (None, 'readonly'), (None, 'rel'), (None, 'repeat-max'), (None, 'repeat-min'), (None, 'replace'), (None, 'required'), (None, 'rev'), (None, 'rightspacing'), (None, 'rows'), (None, 'rowspan'), (None, 'rules'), (None, 'scope'), (None, 'selected'), (None, 'shape'), (None, 'size'), (None, 'span'), (None, 'src'), (None, 'start'), (None, 'step'), (None, 'style'), (None, 'summary'), (None, 'suppress'), (None, 'tabindex'), (None, 'target'), (None, 'template'), (None, 'title'), (None, 'toppadding'), (None, 'type'), (None, 'unselectable'), (None, 'usemap'), (None, 'urn'), (None, 'valign'), (None, 'value'), (None, 'variable'), (None, 'volume'), (None, 'vspace'), (None, 'vrml'), (None, 'width'), (None, 'wrap'), (namespaces['xml'], 'lang'), # MathML attributes (None, 'actiontype'), (None, 'align'), (None, 'columnalign'), (None, 'columnalign'), (None, 'columnalign'), (None, 'columnlines'), (None, 'columnspacing'), (None, 'columnspan'), (None, 'depth'), (None, 'display'), (None, 'displaystyle'), (None, 'equalcolumns'), (None, 'equalrows'), (None, 'fence'), (None, 'fontstyle'), (None, 'fontweight'), (None, 'frame'), (None, 'height'), (None, 'linethickness'), (None, 'lspace'), (None, 'mathbackground'), (None, 'mathcolor'), (None, 'mathvariant'), (None, 'mathvariant'), (None, 'maxsize'), (None, 'minsize'), (None, 'other'), (None, 'rowalign'), (None, 'rowalign'), (None, 'rowalign'), (None, 'rowlines'), (None, 'rowspacing'), (None, 'rowspan'), (None, 'rspace'), (None, 'scriptlevel'), (None, 'selection'), (None, 'separator'), (None, 'stretchy'), (None, 'width'), (None, 'width'), (namespaces['xlink'], 'href'), (namespaces['xlink'], 'show'), (namespaces['xlink'], 'type'), # SVG attributes (None, 'accent-height'), (None, 'accumulate'), (None, 'additive'), (None, 'alphabetic'), (None, 'arabic-form'), (None, 'ascent'), (None, 'attributeName'), (None, 'attributeType'), (None, 'baseProfile'), (None, 'bbox'), (None, 'begin'), (None, 'by'), (None, 'calcMode'), (None, 'cap-height'), (None, 'class'), (None, 'clip-path'), (None, 'color'), (None, 'color-rendering'), (None, 'content'), (None, 'cx'), (None, 'cy'), (None, 'd'), (None, 'dx'), (None, 'dy'), (None, 'descent'), (None, 'display'), (None, 'dur'), (None, 'end'), (None, 'fill'), (None, 'fill-opacity'), (None, 'fill-rule'), (None, 'font-family'), (None, 'font-size'), (None, 'font-stretch'), (None, 'font-style'), (None, 'font-variant'), (None, 'font-weight'), (None, 'from'), (None, 'fx'), (None, 'fy'), (None, 'g1'), (None, 'g2'), (None, 'glyph-name'), (None, 'gradientUnits'), (None, 'hanging'), (None, 'height'), (None, 'horiz-adv-x'), (None, 'horiz-origin-x'), (None, 'id'), (None, 'ideographic'), (None, 'k'), (None, 'keyPoints'), (None, 'keySplines'), (None, 'keyTimes'), (None, 'lang'), (None, 'marker-end'), (None, 'marker-mid'), (None, 'marker-start'), (None, 'markerHeight'), (None, 'markerUnits'), (None, 'markerWidth'), (None, 'mathematical'), (None, 'max'), (None, 'min'), (None, 'name'), (None, 'offset'), (None, 'opacity'), (None, 'orient'), (None, 'origin'), (None, 'overline-position'), (None, 'overline-thickness'), (None, 'panose-1'), (None, 'path'), (None, 'pathLength'), (None, 'points'), (None, 'preserveAspectRatio'), (None, 'r'), (None, 'refX'), (None, 'refY'), (None, 'repeatCount'), (None, 'repeatDur'), (None, 'requiredExtensions'), (None, 'requiredFeatures'), (None, 'restart'), (None, 'rotate'), (None, 'rx'), (None, 'ry'), (None, 'slope'), (None, 'stemh'), (None, 'stemv'), (None, 'stop-color'), (None, 'stop-opacity'), (None, 'strikethrough-position'), (None, 'strikethrough-thickness'), (None, 'stroke'), (None, 'stroke-dasharray'), (None, 'stroke-dashoffset'), (None, 'stroke-linecap'), (None, 'stroke-linejoin'), (None, 'stroke-miterlimit'), (None, 'stroke-opacity'), (None, 'stroke-width'), (None, 'systemLanguage'), (None, 'target'), (None, 'text-anchor'), (None, 'to'), (None, 'transform'), (None, 'type'), (None, 'u1'), (None, 'u2'), (None, 'underline-position'), (None, 'underline-thickness'), (None, 'unicode'), (None, 'unicode-range'), (None, 'units-per-em'), (None, 'values'), (None, 'version'), (None, 'viewBox'), (None, 'visibility'), (None, 'width'), (None, 'widths'), (None, 'x'), (None, 'x-height'), (None, 'x1'), (None, 'x2'), (namespaces['xlink'], 'actuate'), (namespaces['xlink'], 'arcrole'), (namespaces['xlink'], 'href'), (namespaces['xlink'], 'role'), (namespaces['xlink'], 'show'), (namespaces['xlink'], 'title'), (namespaces['xlink'], 'type'), (namespaces['xml'], 'base'), (namespaces['xml'], 'lang'), (namespaces['xml'], 'space'), (None, 'y'), (None, 'y1'), (None, 'y2'), (None, 'zoomAndPan'), )) attr_val_is_uri = frozenset(( (None, 'href'), (None, 'src'), (None, 'cite'), (None, 'action'), (None, 'longdesc'), (None, 'poster'), (None, 'background'), (None, 'datasrc'), (None, 'dynsrc'), (None, 'lowsrc'), (None, 'ping'), (namespaces['xlink'], 'href'), (namespaces['xml'], 'base'), )) svg_attr_val_allows_ref = frozenset(( (None, 'clip-path'), (None, 'color-profile'), (None, 'cursor'), (None, 'fill'), (None, 'filter'), (None, 'marker'), (None, 'marker-start'), (None, 'marker-mid'), (None, 'marker-end'), (None, 'mask'), (None, 'stroke'), )) svg_allow_local_href = frozenset(( (None, 'altGlyph'), (None, 'animate'), (None, 'animateColor'), (None, 'animateMotion'), (None, 'animateTransform'), (None, 'cursor'), (None, 'feImage'), (None, 'filter'), (None, 'linearGradient'), (None, 'pattern'), (None, 'radialGradient'), (None, 'textpath'), (None, 'tref'), (None, 'set'), (None, 'use') )) allowed_css_properties = frozenset(( 'azimuth', 'background-color', 'border-bottom-color', 'border-collapse', 'border-color', 'border-left-color', 'border-right-color', 'border-top-color', 'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', 'white-space', 'width', )) allowed_css_keywords = frozenset(( 'auto', 'aqua', 'black', 'block', 'blue', 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', 'transparent', 'underline', 'white', 'yellow', )) allowed_svg_properties = frozenset(( 'fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', 'stroke-opacity', )) allowed_protocols = frozenset(( 'ed2k', 'ftp', 'http', 'https', 'irc', 'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal', 'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag', 'ssh', 'sftp', 'rtsp', 'afs', 'data', )) allowed_content_types = frozenset(( 'image/png', 'image/jpeg', 'image/gif', 'image/webp', 'image/bmp', 'text/plain', )) data_content_type = re.compile(r''' ^ # Match a content type <application>/<type> (?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+) # Match any character set and encoding (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?) |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?) # Assume the rest is data ,.* $ ''', re.VERBOSE) class Filter(base.Filter): """Sanitizes token stream of XHTML+MathML+SVG and of inline style attributes""" def __init__(self, source, allowed_elements=allowed_elements, allowed_attributes=allowed_attributes, allowed_css_properties=allowed_css_properties, allowed_css_keywords=allowed_css_keywords, allowed_svg_properties=allowed_svg_properties, allowed_protocols=allowed_protocols, allowed_content_types=allowed_content_types, attr_val_is_uri=attr_val_is_uri, svg_attr_val_allows_ref=svg_attr_val_allows_ref, svg_allow_local_href=svg_allow_local_href): """Creates a Filter :arg allowed_elements: set of elements to allow--everything else will be escaped :arg allowed_attributes: set of attributes to allow in elements--everything else will be stripped :arg allowed_css_properties: set of CSS properties to allow--everything else will be stripped :arg allowed_css_keywords: set of CSS keywords to allow--everything else will be stripped :arg allowed_svg_properties: set of SVG properties to allow--everything else will be removed :arg allowed_protocols: set of allowed protocols for URIs :arg allowed_content_types: set of allowed content types for ``data`` URIs. :arg attr_val_is_uri: set of attributes that have URI values--values that have a scheme not listed in ``allowed_protocols`` are removed :arg svg_attr_val_allows_ref: set of SVG attributes that can have references :arg svg_allow_local_href: set of SVG elements that can have local hrefs--these are removed """ super(Filter, self).__init__(source) warnings.warn(_deprecation_msg, DeprecationWarning) self.allowed_elements = allowed_elements self.allowed_attributes = allowed_attributes self.allowed_css_properties = allowed_css_properties self.allowed_css_keywords = allowed_css_keywords self.allowed_svg_properties = allowed_svg_properties self.allowed_protocols = allowed_protocols self.allowed_content_types = allowed_content_types self.attr_val_is_uri = attr_val_is_uri self.svg_attr_val_allows_ref = svg_attr_val_allows_ref self.svg_allow_local_href = svg_allow_local_href def __iter__(self): for token in base.Filter.__iter__(self): token = self.sanitize_token(token) if token: yield token # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and # stripping out all attributes not in ALLOWED_ATTRIBUTES. Style attributes # are parsed, and a restricted set, specified by ALLOWED_CSS_PROPERTIES and # ALLOWED_CSS_KEYWORDS, are allowed through. attributes in ATTR_VAL_IS_URI # are scanned, and only URI schemes specified in ALLOWED_PROTOCOLS are # allowed. # # sanitize_html('<script> do_nasty_stuff() </script>') # => &lt;script> do_nasty_stuff() &lt;/script> # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>') # => <a>Click here for $100</a> def sanitize_token(self, token): # accommodate filters which use token_type differently token_type = token["type"] if token_type in ("StartTag", "EndTag", "EmptyTag"): name = token["name"] namespace = token["namespace"] if ((namespace, name) in self.allowed_elements or (namespace is None and (namespaces["html"], name) in self.allowed_elements)): return self.allowed_token(token) else: return self.disallowed_token(token) elif token_type == "Comment": pass else: return token def allowed_token(self, token): if "data" in token: attrs = token["data"] attr_names = set(attrs.keys()) # Remove forbidden attributes for to_remove in (attr_names - self.allowed_attributes): del token["data"][to_remove] attr_names.remove(to_remove) # Remove attributes with disallowed URL values for attr in (attr_names & self.attr_val_is_uri): assert attr in attrs # I don't have a clue where this regexp comes from or why it matches those # characters, nor why we call unescape. I just know it's always been here. # Should you be worried by this comment in a sanitizer? Yes. On the other hand, all # this will do is remove *more* than it otherwise would. val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\\s]+", '', unescape(attrs[attr])).lower() # remove replacement characters from unescaped characters val_unescaped = val_unescaped.replace("\ufffd", "") try: uri = urlparse.urlparse(val_unescaped) except ValueError: uri = None del attrs[attr] if uri and uri.scheme: if uri.scheme not in self.allowed_protocols: del attrs[attr] if uri.scheme == 'data': m = data_content_type.match(uri.path) if not m: del attrs[attr] elif m.group('content_type') not in self.allowed_content_types: del attrs[attr] for attr in self.svg_attr_val_allows_ref: if attr in attrs: attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', ' ', unescape(attrs[attr])) if (token["name"] in self.svg_allow_local_href and (namespaces['xlink'], 'href') in attrs and re.search(r'^\s*[^#\s].*', attrs[(namespaces['xlink'], 'href')])): del attrs[(namespaces['xlink'], 'href')] if (None, 'style') in attrs: attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')]) token["data"] = attrs return token def disallowed_token(self, token): token_type = token["type"] if token_type == "EndTag": token["data"] = "</%s>" % token["name"] elif token["data"]: assert token_type in ("StartTag", "EmptyTag") attrs = [] for (ns, name), v in token["data"].items(): attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v))) token["data"] = "<%s%s>" % (token["name"], ''.join(attrs)) else: token["data"] = "<%s>" % token["name"] if token.get("selfClosing"): token["data"] = token["data"][:-1] + "/>" token["type"] = "Characters" del token["name"] return token def sanitize_css(self, style): # disallow urls style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) # gauntlet if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' if not re.match(r"^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return '' clean = [] for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style): if not value: continue if prop.lower() in self.allowed_css_properties: clean.append(prop + ': ' + value + ';') elif prop.split('-')[0].lower() in ['background', 'border', 'margin', 'padding']: for keyword in value.split(): if keyword not in self.allowed_css_keywords and \ not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa break else: clean.append(prop + ': ' + value + ';') elif prop.lower() in self.allowed_svg_properties: clean.append(prop + ': ' + value + ';') return ' '.join(clean)
0