content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_l_o_c_a.cpython-313.pyc
_l_o_c_a.cpython-313.pyc
Other
4,292
0.8
0
0
awesome-app
916
2025-07-03T08:19:18.244150
GPL-3.0
false
156f5271949381c8221cea042f7308fe
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_l_t_a_g.cpython-313.pyc
_l_t_a_g.cpython-313.pyc
Other
4,517
0.8
0.02381
0
awesome-app
402
2024-11-17T10:17:54.795514
Apache-2.0
false
e7969df854e019f5ad544285e1e58a15
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_m_a_x_p.cpython-313.pyc
_m_a_x_p.cpython-313.pyc
Other
6,637
0.95
0.032787
0.086207
react-lib
12
2023-07-28T00:40:13.848857
GPL-3.0
false
2df73c6a39df06df11aad9b8ef6b69f0
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_m_e_t_a.cpython-313.pyc
_m_e_t_a.cpython-313.pyc
Other
5,522
0.8
0.016393
0
awesome-app
252
2025-03-18T07:29:09.545825
Apache-2.0
false
7ee83369642c3fa2211b0ad58bf5a469
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_m_o_r_t.cpython-313.pyc
_m_o_r_t.cpython-313.pyc
Other
803
0.8
0.222222
0
react-lib
501
2023-09-13T07:56:27.419821
MIT
false
e515cf5cef9ee6304a7ce4ce33f11654
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_m_o_r_x.cpython-313.pyc
_m_o_r_x.cpython-313.pyc
Other
860
0.8
0.25
0
awesome-app
682
2023-12-20T08:23:26.416865
BSD-3-Clause
false
72b972a01a3e497e4749c3424077e79b
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_n_a_m_e.cpython-313.pyc
_n_a_m_e.cpython-313.pyc
Other
46,313
0.95
0.064267
0
vue-tools
634
2023-12-09T02:20:55.242738
MIT
false
dcfb6c1a2c2c630e07efb2007f5ce1ec
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_o_p_b_d.cpython-313.pyc
_o_p_b_d.cpython-313.pyc
Other
761
0.8
0.222222
0
node-utils
986
2024-04-13T22:32:25.737802
BSD-3-Clause
false
e6ad2d34e7d729680c308f84464f194f
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_p_o_s_t.cpython-313.pyc
_p_o_s_t.cpython-313.pyc
Other
14,600
0.95
0.055944
0.043478
vue-tools
693
2025-06-06T17:38:52.449090
BSD-3-Clause
false
0108f73b250c9c3cca9948233efedf6c
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_p_r_e_p.cpython-313.pyc
_p_r_e_p.cpython-313.pyc
Other
870
0.8
0
0
vue-tools
782
2025-01-26T02:03:34.844225
Apache-2.0
false
28f003445d8c46353460b24a8e9cb724
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_p_r_o_p.cpython-313.pyc
_p_r_o_p.cpython-313.pyc
Other
744
0.8
0
0
awesome-app
544
2023-08-17T20:05:10.483862
MIT
false
c685d8906984a06e6d98fce09dddbdb9
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_s_b_i_x.cpython-313.pyc
_s_b_i_x.cpython-313.pyc
Other
6,291
0.8
0.063492
0.133333
python-kit
631
2023-07-20T02:07:01.126067
BSD-3-Clause
false
472a596db76edc6ec96c356c13c3973e
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_t_r_a_k.cpython-313.pyc
_t_r_a_k.cpython-313.pyc
Other
15,664
0.8
0
0
awesome-app
860
2024-07-27T17:39:20.401585
BSD-3-Clause
false
fcaf2d277a2e74f059ae30809603a038
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_v_h_e_a.cpython-313.pyc
_v_h_e_a.cpython-313.pyc
Other
6,027
0.95
0.025974
0
python-kit
656
2023-08-26T00:36:40.329045
Apache-2.0
false
36308a4cefac1423774835c4b760fa50
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\_v_m_t_x.cpython-313.pyc
_v_m_t_x.cpython-313.pyc
Other
959
0.8
0.1
0
vue-tools
136
2024-09-19T20:33:21.957326
MIT
false
57b0a5a778ed5513ee22e2c699749bcb
\n\n
.venv\Lib\site-packages\fontTools\ttLib\tables\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
3,907
0.95
0.047619
0
node-utils
103
2023-11-01T22:13:06.802852
BSD-3-Clause
false
3772e02c57309047b28ebf447ee9cab5
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\macUtils.cpython-313.pyc
macUtils.cpython-313.pyc
Other
2,787
0.8
0.043478
0
node-utils
458
2024-12-22T07:31:19.918885
GPL-3.0
false
6a2c06ccf0d39df8ccc5fb711502e60d
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\removeOverlaps.cpython-313.pyc
removeOverlaps.cpython-313.pyc
Other
15,492
0.8
0.067901
0.013158
node-utils
443
2025-03-17T09:27:15.447311
Apache-2.0
false
4ad65fb3f102912527f5269cda8717dc
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\reorderGlyphs.cpython-313.pyc
reorderGlyphs.cpython-313.pyc
Other
13,310
0.8
0
0.017699
awesome-app
744
2025-03-14T07:26:52.788342
Apache-2.0
false
15fee8a81eb3a7967f60ecdafeec90bd
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\scaleUpem.cpython-313.pyc
scaleUpem.cpython-313.pyc
Other
18,291
0.8
0
0
vue-tools
275
2023-10-15T16:48:07.229029
GPL-3.0
false
557a9d9b00e879031d73bbfec9815559
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\sfnt.cpython-313.pyc
sfnt.cpython-313.pyc
Other
30,591
0.95
0.016129
0.017167
vue-tools
293
2025-05-14T01:36:14.593619
GPL-3.0
false
e23754f8f095031aec87277ec25bd6dc
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\standardGlyphOrder.cpython-313.pyc
standardGlyphOrder.cpython-313.pyc
Other
2,347
0.7
0
0
react-lib
330
2024-01-25T13:11:27.170593
GPL-3.0
false
38b543ed33e448c36982974b8ed515ca
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\ttCollection.cpython-313.pyc
ttCollection.cpython-313.pyc
Other
5,813
0.8
0.066667
0
awesome-app
275
2023-12-29T02:20:20.967532
BSD-3-Clause
false
6a8c4338c3d0eb00722d88c6e7b36efd
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\ttFont.cpython-313.pyc
ttFont.cpython-313.pyc
Other
43,584
0.95
0.085393
0.002519
react-lib
749
2024-05-15T14:54:58.756243
BSD-3-Clause
false
1b70dd2780f533029c27bea1e4635f03
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\ttGlyphSet.cpython-313.pyc
ttGlyphSet.cpython-313.pyc
Other
25,867
0.95
0.013889
0
awesome-app
14
2025-03-24T01:03:15.070832
GPL-3.0
false
1cce9ac583ef06c8737baa633dd1c553
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\ttVisitor.cpython-313.pyc
ttVisitor.cpython-313.pyc
Other
1,768
0.8
0
0
vue-tools
858
2025-06-21T08:35:28.483700
BSD-3-Clause
false
b74a2f255a162e359d0c04aa4cdc8412
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\woff2.cpython-313.pyc
woff2.cpython-313.pyc
Other
73,981
0.75
0.026906
0.00155
vue-tools
437
2023-10-29T11:55:04.396880
GPL-3.0
false
d6e6e285976b5c30d78b533b792393b0
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,523
0.8
0.076923
0
react-lib
493
2024-10-20T08:31:05.866765
GPL-3.0
false
5a006248c03fa5ea83de79991c5ef9da
\n\n
.venv\Lib\site-packages\fontTools\ttLib\__pycache__\__main__.cpython-313.pyc
__main__.cpython-313.pyc
Other
5,309
0.8
0.035294
0
python-kit
517
2024-09-20T23:58:40.987427
BSD-3-Clause
false
c86b3dc1b5b04ffe6a65d4f1d27e5dbb
"""\nFunctions for converting UFO1 or UFO2 files into UFO3 format.\n\nCurrently provides functionality for converting kerning rules\nand kerning groups. Conversion is only supported _from_ UFO1\nor UFO2, and _to_ UFO3.\n"""\n\n# adapted from the UFO spec\n\n\ndef convertUFO1OrUFO2KerningToUFO3Kerning(kerning, groups, glyphSet=()):\n """Convert kerning data in UFO1 or UFO2 syntax into UFO3 syntax.\n\n Args:\n kerning:\n A dictionary containing the kerning rules defined in\n the UFO font, as used in :class:`.UFOReader` objects.\n groups:\n A dictionary containing the groups defined in the UFO\n font, as used in :class:`.UFOReader` objects.\n glyphSet:\n Optional; a set of glyph objects to skip (default: None).\n\n Returns:\n 1. A dictionary representing the converted kerning data.\n 2. A copy of the groups dictionary, with all groups renamed to UFO3 syntax.\n 3. A dictionary containing the mapping of old group names to new group names.\n\n """\n # gather known kerning groups based on the prefixes\n firstReferencedGroups, secondReferencedGroups = findKnownKerningGroups(groups)\n # Make lists of groups referenced in kerning pairs.\n for first, seconds in list(kerning.items()):\n if first in groups and first not in glyphSet:\n if not first.startswith("public.kern1."):\n firstReferencedGroups.add(first)\n for second in list(seconds.keys()):\n if second in groups and second not in glyphSet:\n if not second.startswith("public.kern2."):\n secondReferencedGroups.add(second)\n # Create new names for these groups.\n firstRenamedGroups = {}\n for first in firstReferencedGroups:\n # Make a list of existing group names.\n existingGroupNames = list(groups.keys()) + list(firstRenamedGroups.keys())\n # Remove the old prefix from the name\n newName = first.replace("@MMK_L_", "")\n # Add the new prefix to the name.\n newName = "public.kern1." + newName\n # Make a unique group name.\n newName = makeUniqueGroupName(newName, existingGroupNames)\n # Store for use later.\n firstRenamedGroups[first] = newName\n secondRenamedGroups = {}\n for second in secondReferencedGroups:\n # Make a list of existing group names.\n existingGroupNames = list(groups.keys()) + list(secondRenamedGroups.keys())\n # Remove the old prefix from the name\n newName = second.replace("@MMK_R_", "")\n # Add the new prefix to the name.\n newName = "public.kern2." + newName\n # Make a unique group name.\n newName = makeUniqueGroupName(newName, existingGroupNames)\n # Store for use later.\n secondRenamedGroups[second] = newName\n # Populate the new group names into the kerning dictionary as needed.\n newKerning = {}\n for first, seconds in list(kerning.items()):\n first = firstRenamedGroups.get(first, first)\n newSeconds = {}\n for second, value in list(seconds.items()):\n second = secondRenamedGroups.get(second, second)\n newSeconds[second] = value\n newKerning[first] = newSeconds\n # Make copies of the referenced groups and store them\n # under the new names in the overall groups dictionary.\n allRenamedGroups = list(firstRenamedGroups.items())\n allRenamedGroups += list(secondRenamedGroups.items())\n for oldName, newName in allRenamedGroups:\n group = list(groups[oldName])\n groups[newName] = group\n # Return the kerning and the groups.\n return newKerning, groups, dict(side1=firstRenamedGroups, side2=secondRenamedGroups)\n\n\ndef findKnownKerningGroups(groups):\n """Find all kerning groups in a UFO1 or UFO2 font that use known prefixes.\n\n In some cases, not all kerning groups will be referenced\n by the kerning pairs in a UFO. The algorithm for locating\n groups in :func:`convertUFO1OrUFO2KerningToUFO3Kerning` will\n miss these unreferenced groups. By scanning for known prefixes,\n this function will catch all of the prefixed groups.\n\n The prefixes and sides by this function are:\n\n @MMK_L_ - side 1\n @MMK_R_ - side 2\n\n as defined in the UFO1 specification.\n\n Args:\n groups:\n A dictionary containing the groups defined in the UFO\n font, as read by :class:`.UFOReader`.\n\n Returns:\n Two sets; the first containing the names of all\n first-side kerning groups identified in the ``groups``\n dictionary, and the second containing the names of all\n second-side kerning groups identified.\n\n "First-side" and "second-side" are with respect to the\n writing direction of the script.\n\n Example::\n\n >>> testGroups = {\n ... "@MMK_L_1" : None,\n ... "@MMK_L_2" : None,\n ... "@MMK_L_3" : None,\n ... "@MMK_R_1" : None,\n ... "@MMK_R_2" : None,\n ... "@MMK_R_3" : None,\n ... "@MMK_l_1" : None,\n ... "@MMK_r_1" : None,\n ... "@MMK_X_1" : None,\n ... "foo" : None,\n ... }\n >>> first, second = findKnownKerningGroups(testGroups)\n >>> sorted(first) == ['@MMK_L_1', '@MMK_L_2', '@MMK_L_3']\n True\n >>> sorted(second) == ['@MMK_R_1', '@MMK_R_2', '@MMK_R_3']\n True\n """\n knownFirstGroupPrefixes = ["@MMK_L_"]\n knownSecondGroupPrefixes = ["@MMK_R_"]\n firstGroups = set()\n secondGroups = set()\n for groupName in list(groups.keys()):\n for firstPrefix in knownFirstGroupPrefixes:\n if groupName.startswith(firstPrefix):\n firstGroups.add(groupName)\n break\n for secondPrefix in knownSecondGroupPrefixes:\n if groupName.startswith(secondPrefix):\n secondGroups.add(groupName)\n break\n return firstGroups, secondGroups\n\n\ndef makeUniqueGroupName(name, groupNames, counter=0):\n """Make a kerning group name that will be unique within the set of group names.\n\n If the requested kerning group name already exists within the set, this\n will return a new name by adding an incremented counter to the end\n of the requested name.\n\n Args:\n name:\n The requested kerning group name.\n groupNames:\n A list of the existing kerning group names.\n counter:\n Optional; a counter of group names already seen (default: 0). If\n :attr:`.counter` is not provided, the function will recurse,\n incrementing the value of :attr:`.counter` until it finds the\n first unused ``name+counter`` combination, and return that result.\n\n Returns:\n A unique kerning group name composed of the requested name suffixed\n by the smallest available integer counter.\n """\n # Add a number to the name if the counter is higher than zero.\n newName = name\n if counter > 0:\n newName = "%s%d" % (newName, counter)\n # If the new name is in the existing group names, recurse.\n if newName in groupNames:\n return makeUniqueGroupName(name, groupNames, counter + 1)\n # Otherwise send back the new name.\n return newName\n\n\ndef test():\n """\n Tests for :func:`.convertUFO1OrUFO2KerningToUFO3Kerning`.\n\n No known prefixes.\n\n >>> testKerning = {\n ... "A" : {\n ... "A" : 1,\n ... "B" : 2,\n ... "CGroup" : 3,\n ... "DGroup" : 4\n ... },\n ... "BGroup" : {\n ... "A" : 5,\n ... "B" : 6,\n ... "CGroup" : 7,\n ... "DGroup" : 8\n ... },\n ... "CGroup" : {\n ... "A" : 9,\n ... "B" : 10,\n ... "CGroup" : 11,\n ... "DGroup" : 12\n ... },\n ... }\n >>> testGroups = {\n ... "BGroup" : ["B"],\n ... "CGroup" : ["C"],\n ... "DGroup" : ["D"],\n ... }\n >>> kerning, groups, maps = convertUFO1OrUFO2KerningToUFO3Kerning(\n ... testKerning, testGroups, [])\n >>> expected = {\n ... "A" : {\n ... "A": 1,\n ... "B": 2,\n ... "public.kern2.CGroup": 3,\n ... "public.kern2.DGroup": 4\n ... },\n ... "public.kern1.BGroup": {\n ... "A": 5,\n ... "B": 6,\n ... "public.kern2.CGroup": 7,\n ... "public.kern2.DGroup": 8\n ... },\n ... "public.kern1.CGroup": {\n ... "A": 9,\n ... "B": 10,\n ... "public.kern2.CGroup": 11,\n ... "public.kern2.DGroup": 12\n ... }\n ... }\n >>> kerning == expected\n True\n >>> expected = {\n ... "BGroup": ["B"],\n ... "CGroup": ["C"],\n ... "DGroup": ["D"],\n ... "public.kern1.BGroup": ["B"],\n ... "public.kern1.CGroup": ["C"],\n ... "public.kern2.CGroup": ["C"],\n ... "public.kern2.DGroup": ["D"],\n ... }\n >>> groups == expected\n True\n\n Known prefixes.\n\n >>> testKerning = {\n ... "A" : {\n ... "A" : 1,\n ... "B" : 2,\n ... "@MMK_R_CGroup" : 3,\n ... "@MMK_R_DGroup" : 4\n ... },\n ... "@MMK_L_BGroup" : {\n ... "A" : 5,\n ... "B" : 6,\n ... "@MMK_R_CGroup" : 7,\n ... "@MMK_R_DGroup" : 8\n ... },\n ... "@MMK_L_CGroup" : {\n ... "A" : 9,\n ... "B" : 10,\n ... "@MMK_R_CGroup" : 11,\n ... "@MMK_R_DGroup" : 12\n ... },\n ... }\n >>> testGroups = {\n ... "@MMK_L_BGroup" : ["B"],\n ... "@MMK_L_CGroup" : ["C"],\n ... "@MMK_L_XGroup" : ["X"],\n ... "@MMK_R_CGroup" : ["C"],\n ... "@MMK_R_DGroup" : ["D"],\n ... "@MMK_R_XGroup" : ["X"],\n ... }\n >>> kerning, groups, maps = convertUFO1OrUFO2KerningToUFO3Kerning(\n ... testKerning, testGroups, [])\n >>> expected = {\n ... "A" : {\n ... "A": 1,\n ... "B": 2,\n ... "public.kern2.CGroup": 3,\n ... "public.kern2.DGroup": 4\n ... },\n ... "public.kern1.BGroup": {\n ... "A": 5,\n ... "B": 6,\n ... "public.kern2.CGroup": 7,\n ... "public.kern2.DGroup": 8\n ... },\n ... "public.kern1.CGroup": {\n ... "A": 9,\n ... "B": 10,\n ... "public.kern2.CGroup": 11,\n ... "public.kern2.DGroup": 12\n ... }\n ... }\n >>> kerning == expected\n True\n >>> expected = {\n ... "@MMK_L_BGroup": ["B"],\n ... "@MMK_L_CGroup": ["C"],\n ... "@MMK_L_XGroup": ["X"],\n ... "@MMK_R_CGroup": ["C"],\n ... "@MMK_R_DGroup": ["D"],\n ... "@MMK_R_XGroup": ["X"],\n ... "public.kern1.BGroup": ["B"],\n ... "public.kern1.CGroup": ["C"],\n ... "public.kern1.XGroup": ["X"],\n ... "public.kern2.CGroup": ["C"],\n ... "public.kern2.DGroup": ["D"],\n ... "public.kern2.XGroup": ["X"],\n ... }\n >>> groups == expected\n True\n\n >>> from .validators import kerningValidator\n >>> kerningValidator(kerning)\n (True, None)\n\n Mixture of known prefixes and groups without prefixes.\n\n >>> testKerning = {\n ... "A" : {\n ... "A" : 1,\n ... "B" : 2,\n ... "@MMK_R_CGroup" : 3,\n ... "DGroup" : 4\n ... },\n ... "BGroup" : {\n ... "A" : 5,\n ... "B" : 6,\n ... "@MMK_R_CGroup" : 7,\n ... "DGroup" : 8\n ... },\n ... "@MMK_L_CGroup" : {\n ... "A" : 9,\n ... "B" : 10,\n ... "@MMK_R_CGroup" : 11,\n ... "DGroup" : 12\n ... },\n ... }\n >>> testGroups = {\n ... "BGroup" : ["B"],\n ... "@MMK_L_CGroup" : ["C"],\n ... "@MMK_R_CGroup" : ["C"],\n ... "DGroup" : ["D"],\n ... }\n >>> kerning, groups, maps = convertUFO1OrUFO2KerningToUFO3Kerning(\n ... testKerning, testGroups, [])\n >>> expected = {\n ... "A" : {\n ... "A": 1,\n ... "B": 2,\n ... "public.kern2.CGroup": 3,\n ... "public.kern2.DGroup": 4\n ... },\n ... "public.kern1.BGroup": {\n ... "A": 5,\n ... "B": 6,\n ... "public.kern2.CGroup": 7,\n ... "public.kern2.DGroup": 8\n ... },\n ... "public.kern1.CGroup": {\n ... "A": 9,\n ... "B": 10,\n ... "public.kern2.CGroup": 11,\n ... "public.kern2.DGroup": 12\n ... }\n ... }\n >>> kerning == expected\n True\n >>> expected = {\n ... "BGroup": ["B"],\n ... "@MMK_L_CGroup": ["C"],\n ... "@MMK_R_CGroup": ["C"],\n ... "DGroup": ["D"],\n ... "public.kern1.BGroup": ["B"],\n ... "public.kern1.CGroup": ["C"],\n ... "public.kern2.CGroup": ["C"],\n ... "public.kern2.DGroup": ["D"],\n ... }\n >>> groups == expected\n True\n """\n\n\nif __name__ == "__main__":\n import doctest\n\n doctest.testmod()\n
.venv\Lib\site-packages\fontTools\ufoLib\converters.py
converters.py
Python
13,442
0.95
0.09799
0.057851
node-utils
191
2025-07-03T07:57:41.539948
MIT
false
aa23fd672c9e68249ef8d3038deff054
"""DEPRECATED - This module is kept here only as a backward compatibility shim\nfor the old ufoLib.etree module, which was moved to :mod:`fontTools.misc.etree`.\nPlease use the latter instead.\n"""\n\nfrom fontTools.misc.etree import *\n
.venv\Lib\site-packages\fontTools\ufoLib\etree.py
etree.py
Python
237
0.85
0.166667
0
react-lib
415
2024-11-17T09:42:25.945908
MIT
false
f1d1a1c8f330642de1a6a3535fc43d79
"""\nGeneric module for reading and writing the .glif format.\n\nMore info about the .glif format (GLyphInterchangeFormat) can be found here:\n\n http://unifiedfontobject.org\n\nThe main class in this module is :class:`GlyphSet`. It manages a set of .glif files\nin a folder. It offers two ways to read glyph data, and one way to write\nglyph data. See the class doc string for details.\n"""\n\nfrom __future__ import annotations\n\nimport logging\nimport enum\nfrom warnings import warn\nfrom collections import OrderedDict\nimport fs\nimport fs.base\nimport fs.errors\nimport fs.osfs\nimport fs.path\nfrom fontTools.misc.textTools import tobytes\nfrom fontTools.misc import plistlib\nfrom fontTools.pens.pointPen import AbstractPointPen, PointToSegmentPen\nfrom fontTools.ufoLib.errors import GlifLibError\nfrom fontTools.ufoLib.filenames import userNameToFileName\nfrom fontTools.ufoLib.validators import (\n genericTypeValidator,\n colorValidator,\n guidelinesValidator,\n anchorsValidator,\n identifierValidator,\n imageValidator,\n glyphLibValidator,\n)\nfrom fontTools.misc import etree\nfrom fontTools.ufoLib import _UFOBaseIO, UFOFormatVersion\nfrom fontTools.ufoLib.utils import numberTypes, _VersionTupleEnumMixin\n\n\n__all__ = [\n "GlyphSet",\n "GlifLibError",\n "readGlyphFromString",\n "writeGlyphToString",\n "glyphNameToFileName",\n]\n\nlogger = logging.getLogger(__name__)\n\n\n# ---------\n# Constants\n# ---------\n\nCONTENTS_FILENAME = "contents.plist"\nLAYERINFO_FILENAME = "layerinfo.plist"\n\n\nclass GLIFFormatVersion(tuple, _VersionTupleEnumMixin, enum.Enum):\n """Class representing the versions of the .glif format supported by the UFO version in use.\n\n For a given :mod:`fontTools.ufoLib.UFOFormatVersion`, the :func:`supported_versions` method will\n return the supported versions of the GLIF file format. If the UFO version is unspecified, the\n :func:`supported_versions` method will return all available GLIF format versions.\n """\n\n FORMAT_1_0 = (1, 0)\n FORMAT_2_0 = (2, 0)\n\n @classmethod\n def default(cls, ufoFormatVersion=None):\n if ufoFormatVersion is not None:\n return max(cls.supported_versions(ufoFormatVersion))\n return super().default()\n\n @classmethod\n def supported_versions(cls, ufoFormatVersion=None):\n if ufoFormatVersion is None:\n # if ufo format unspecified, return all the supported GLIF formats\n return super().supported_versions()\n # else only return the GLIF formats supported by the given UFO format\n versions = {cls.FORMAT_1_0}\n if ufoFormatVersion >= UFOFormatVersion.FORMAT_3_0:\n versions.add(cls.FORMAT_2_0)\n return frozenset(versions)\n\n\n# workaround for py3.11, see https://github.com/fonttools/fonttools/pull/2655\nGLIFFormatVersion.__str__ = _VersionTupleEnumMixin.__str__\n\n\n# ------------\n# Simple Glyph\n# ------------\n\n\nclass Glyph:\n """\n Minimal glyph object. It has no glyph attributes until either\n the draw() or the drawPoints() method has been called.\n """\n\n def __init__(self, glyphName, glyphSet):\n self.glyphName = glyphName\n self.glyphSet = glyphSet\n\n def draw(self, pen, outputImpliedClosingLine=False):\n """\n Draw this glyph onto a *FontTools* Pen.\n """\n pointPen = PointToSegmentPen(\n pen, outputImpliedClosingLine=outputImpliedClosingLine\n )\n self.drawPoints(pointPen)\n\n def drawPoints(self, pointPen):\n """\n Draw this glyph onto a PointPen.\n """\n self.glyphSet.readGlyph(self.glyphName, self, pointPen)\n\n\n# ---------\n# Glyph Set\n# ---------\n\n\nclass GlyphSet(_UFOBaseIO):\n """\n GlyphSet manages a set of .glif files inside one directory.\n\n GlyphSet's constructor takes a path to an existing directory as it's\n first argument. Reading glyph data can either be done through the\n readGlyph() method, or by using GlyphSet's dictionary interface, where\n the keys are glyph names and the values are (very) simple glyph objects.\n\n To write a glyph to the glyph set, you use the writeGlyph() method.\n The simple glyph objects returned through the dict interface do not\n support writing, they are just a convenient way to get at the glyph data.\n """\n\n glyphClass = Glyph\n\n def __init__(\n self,\n path,\n glyphNameToFileNameFunc=None,\n ufoFormatVersion=None,\n validateRead=True,\n validateWrite=True,\n expectContentsFile=False,\n ):\n """\n 'path' should be a path (string) to an existing local directory, or\n an instance of fs.base.FS class.\n\n The optional 'glyphNameToFileNameFunc' argument must be a callback\n function that takes two arguments: a glyph name and a list of all\n existing filenames (if any exist). It should return a file name\n (including the .glif extension). The glyphNameToFileName function\n is called whenever a file name is created for a given glyph name.\n\n ``validateRead`` will validate read operations. Its default is ``True``.\n ``validateWrite`` will validate write operations. Its default is ``True``.\n ``expectContentsFile`` will raise a GlifLibError if a contents.plist file is\n not found on the glyph set file system. This should be set to ``True`` if you\n are reading an existing UFO and ``False`` if you create a fresh glyph set.\n """\n try:\n ufoFormatVersion = UFOFormatVersion(ufoFormatVersion)\n except ValueError as e:\n from fontTools.ufoLib.errors import UnsupportedUFOFormat\n\n raise UnsupportedUFOFormat(\n f"Unsupported UFO format: {ufoFormatVersion!r}"\n ) from e\n\n if hasattr(path, "__fspath__"): # support os.PathLike objects\n path = path.__fspath__()\n\n if isinstance(path, str):\n try:\n filesystem = fs.osfs.OSFS(path)\n except fs.errors.CreateFailed:\n raise GlifLibError("No glyphs directory '%s'" % path)\n self._shouldClose = True\n elif isinstance(path, fs.base.FS):\n filesystem = path\n try:\n filesystem.check()\n except fs.errors.FilesystemClosed:\n raise GlifLibError("the filesystem '%s' is closed" % filesystem)\n self._shouldClose = False\n else:\n raise TypeError(\n "Expected a path string or fs object, found %s" % type(path).__name__\n )\n try:\n path = filesystem.getsyspath("/")\n except fs.errors.NoSysPath:\n # network or in-memory FS may not map to the local one\n path = str(filesystem)\n # 'dirName' is kept for backward compatibility only, but it's DEPRECATED\n # as it's not guaranteed that it maps to an existing OSFS directory.\n # Client could use the FS api via the `self.fs` attribute instead.\n self.dirName = fs.path.parts(path)[-1]\n self.fs = filesystem\n # if glyphSet contains no 'contents.plist', we consider it empty\n self._havePreviousFile = filesystem.exists(CONTENTS_FILENAME)\n if expectContentsFile and not self._havePreviousFile:\n raise GlifLibError(f"{CONTENTS_FILENAME} is missing.")\n # attribute kept for backward compatibility\n self.ufoFormatVersion = ufoFormatVersion.major\n self.ufoFormatVersionTuple = ufoFormatVersion\n if glyphNameToFileNameFunc is None:\n glyphNameToFileNameFunc = glyphNameToFileName\n self.glyphNameToFileName = glyphNameToFileNameFunc\n self._validateRead = validateRead\n self._validateWrite = validateWrite\n self._existingFileNames: set[str] | None = None\n self._reverseContents = None\n\n self.rebuildContents()\n\n def rebuildContents(self, validateRead=None):\n """\n Rebuild the contents dict by loading contents.plist.\n\n ``validateRead`` will validate the data, by default it is set to the\n class's ``validateRead`` value, can be overridden.\n """\n if validateRead is None:\n validateRead = self._validateRead\n contents = self._getPlist(CONTENTS_FILENAME, {})\n # validate the contents\n if validateRead:\n invalidFormat = False\n if not isinstance(contents, dict):\n invalidFormat = True\n else:\n for name, fileName in contents.items():\n if not isinstance(name, str):\n invalidFormat = True\n if not isinstance(fileName, str):\n invalidFormat = True\n elif not self.fs.exists(fileName):\n raise GlifLibError(\n "%s references a file that does not exist: %s"\n % (CONTENTS_FILENAME, fileName)\n )\n if invalidFormat:\n raise GlifLibError("%s is not properly formatted" % CONTENTS_FILENAME)\n self.contents = contents\n self._existingFileNames = None\n self._reverseContents = None\n\n def getReverseContents(self):\n """\n Return a reversed dict of self.contents, mapping file names to\n glyph names. This is primarily an aid for custom glyph name to file\n name schemes that want to make sure they don't generate duplicate\n file names. The file names are converted to lowercase so we can\n reliably check for duplicates that only differ in case, which is\n important for case-insensitive file systems.\n """\n if self._reverseContents is None:\n d = {}\n for k, v in self.contents.items():\n d[v.lower()] = k\n self._reverseContents = d\n return self._reverseContents\n\n def writeContents(self):\n """\n Write the contents.plist file out to disk. Call this method when\n you're done writing glyphs.\n """\n self._writePlist(CONTENTS_FILENAME, self.contents)\n\n # layer info\n\n def readLayerInfo(self, info, validateRead=None):\n """\n ``validateRead`` will validate the data, by default it is set to the\n class's ``validateRead`` value, can be overridden.\n """\n if validateRead is None:\n validateRead = self._validateRead\n infoDict = self._getPlist(LAYERINFO_FILENAME, {})\n if validateRead:\n if not isinstance(infoDict, dict):\n raise GlifLibError("layerinfo.plist is not properly formatted.")\n infoDict = validateLayerInfoVersion3Data(infoDict)\n # populate the object\n for attr, value in infoDict.items():\n try:\n setattr(info, attr, value)\n except AttributeError:\n raise GlifLibError(\n "The supplied layer info object does not support setting a necessary attribute (%s)."\n % attr\n )\n\n def writeLayerInfo(self, info, validateWrite=None):\n """\n ``validateWrite`` will validate the data, by default it is set to the\n class's ``validateWrite`` value, can be overridden.\n """\n if validateWrite is None:\n validateWrite = self._validateWrite\n if self.ufoFormatVersionTuple.major < 3:\n raise GlifLibError(\n "layerinfo.plist is not allowed in UFO %d."\n % self.ufoFormatVersionTuple.major\n )\n # gather data\n infoData = {}\n for attr in layerInfoVersion3ValueData.keys():\n if hasattr(info, attr):\n try:\n value = getattr(info, attr)\n except AttributeError:\n raise GlifLibError(\n "The supplied info object does not support getting a necessary attribute (%s)."\n % attr\n )\n if value is None or (attr == "lib" and not value):\n continue\n infoData[attr] = value\n if infoData:\n # validate\n if validateWrite:\n infoData = validateLayerInfoVersion3Data(infoData)\n # write file\n self._writePlist(LAYERINFO_FILENAME, infoData)\n elif self._havePreviousFile and self.fs.exists(LAYERINFO_FILENAME):\n # data empty, remove existing file\n self.fs.remove(LAYERINFO_FILENAME)\n\n def getGLIF(self, glyphName):\n """\n Get the raw GLIF text for a given glyph name. This only works\n for GLIF files that are already on disk.\n\n This method is useful in situations when the raw XML needs to be\n read from a glyph set for a particular glyph before fully parsing\n it into an object structure via the readGlyph method.\n\n Raises KeyError if 'glyphName' is not in contents.plist, or\n GlifLibError if the file associated with can't be found.\n """\n fileName = self.contents[glyphName]\n try:\n return self.fs.readbytes(fileName)\n except fs.errors.ResourceNotFound:\n raise GlifLibError(\n "The file '%s' associated with glyph '%s' in contents.plist "\n "does not exist on %s" % (fileName, glyphName, self.fs)\n )\n\n def getGLIFModificationTime(self, glyphName):\n """\n Returns the modification time for the GLIF file with 'glyphName', as\n a floating point number giving the number of seconds since the epoch.\n Return None if the associated file does not exist or the underlying\n filesystem does not support getting modified times.\n Raises KeyError if the glyphName is not in contents.plist.\n """\n fileName = self.contents[glyphName]\n return self.getFileModificationTime(fileName)\n\n # reading/writing API\n\n def readGlyph(self, glyphName, glyphObject=None, pointPen=None, validate=None):\n """\n Read a .glif file for 'glyphName' from the glyph set. The\n 'glyphObject' argument can be any kind of object (even None);\n the readGlyph() method will attempt to set the following\n attributes on it:\n\n width\n the advance width of the glyph\n height\n the advance height of the glyph\n unicodes\n a list of unicode values for this glyph\n note\n a string\n lib\n a dictionary containing custom data\n image\n a dictionary containing image data\n guidelines\n a list of guideline data dictionaries\n anchors\n a list of anchor data dictionaries\n\n All attributes are optional, in two ways:\n\n 1) An attribute *won't* be set if the .glif file doesn't\n contain data for it. 'glyphObject' will have to deal\n with default values itself.\n 2) If setting the attribute fails with an AttributeError\n (for example if the 'glyphObject' attribute is read-\n only), readGlyph() will not propagate that exception,\n but ignore that attribute.\n\n To retrieve outline information, you need to pass an object\n conforming to the PointPen protocol as the 'pointPen' argument.\n This argument may be None if you don't need the outline data.\n\n readGlyph() will raise KeyError if the glyph is not present in\n the glyph set.\n\n ``validate`` will validate the data, by default it is set to the\n class's ``validateRead`` value, can be overridden.\n """\n if validate is None:\n validate = self._validateRead\n text = self.getGLIF(glyphName)\n try:\n tree = _glifTreeFromString(text)\n formatVersions = GLIFFormatVersion.supported_versions(\n self.ufoFormatVersionTuple\n )\n _readGlyphFromTree(\n tree,\n glyphObject,\n pointPen,\n formatVersions=formatVersions,\n validate=validate,\n )\n except GlifLibError as glifLibError:\n # Re-raise with a note that gives extra context, describing where\n # the error occurred.\n fileName = self.contents[glyphName]\n try:\n glifLocation = f"'{self.fs.getsyspath(fileName)}'"\n except fs.errors.NoSysPath:\n # Network or in-memory FS may not map to a local path, so use\n # the best string representation we have.\n glifLocation = f"'{fileName}' from '{str(self.fs)}'"\n\n glifLibError._add_note(\n f"The issue is in glyph '{glyphName}', located in {glifLocation}."\n )\n raise\n\n def writeGlyph(\n self,\n glyphName,\n glyphObject=None,\n drawPointsFunc=None,\n formatVersion=None,\n validate=None,\n ):\n """\n Write a .glif file for 'glyphName' to the glyph set. The\n 'glyphObject' argument can be any kind of object (even None);\n the writeGlyph() method will attempt to get the following\n attributes from it:\n\n width\n the advance width of the glyph\n height\n the advance height of the glyph\n unicodes\n a list of unicode values for this glyph\n note\n a string\n lib\n a dictionary containing custom data\n image\n a dictionary containing image data\n guidelines\n a list of guideline data dictionaries\n anchors\n a list of anchor data dictionaries\n\n All attributes are optional: if 'glyphObject' doesn't\n have the attribute, it will simply be skipped.\n\n To write outline data to the .glif file, writeGlyph() needs\n a function (any callable object actually) that will take one\n argument: an object that conforms to the PointPen protocol.\n The function will be called by writeGlyph(); it has to call the\n proper PointPen methods to transfer the outline to the .glif file.\n\n The GLIF format version will be chosen based on the ufoFormatVersion\n passed during the creation of this object. If a particular format\n version is desired, it can be passed with the formatVersion argument.\n The formatVersion argument accepts either a tuple of integers for\n (major, minor), or a single integer for the major digit only (with\n minor digit implied as 0).\n\n An UnsupportedGLIFFormat exception is raised if the requested GLIF\n formatVersion is not supported.\n\n ``validate`` will validate the data, by default it is set to the\n class's ``validateWrite`` value, can be overridden.\n """\n if formatVersion is None:\n formatVersion = GLIFFormatVersion.default(self.ufoFormatVersionTuple)\n else:\n try:\n formatVersion = GLIFFormatVersion(formatVersion)\n except ValueError as e:\n from fontTools.ufoLib.errors import UnsupportedGLIFFormat\n\n raise UnsupportedGLIFFormat(\n f"Unsupported GLIF format version: {formatVersion!r}"\n ) from e\n if formatVersion not in GLIFFormatVersion.supported_versions(\n self.ufoFormatVersionTuple\n ):\n from fontTools.ufoLib.errors import UnsupportedGLIFFormat\n\n raise UnsupportedGLIFFormat(\n f"Unsupported GLIF format version ({formatVersion!s}) "\n f"for UFO format version {self.ufoFormatVersionTuple!s}."\n )\n if validate is None:\n validate = self._validateWrite\n fileName = self.contents.get(glyphName)\n if fileName is None:\n if self._existingFileNames is None:\n self._existingFileNames = {\n fileName.lower() for fileName in self.contents.values()\n }\n fileName = self.glyphNameToFileName(glyphName, self._existingFileNames)\n self.contents[glyphName] = fileName\n self._existingFileNames.add(fileName.lower())\n if self._reverseContents is not None:\n self._reverseContents[fileName.lower()] = glyphName\n data = _writeGlyphToBytes(\n glyphName,\n glyphObject,\n drawPointsFunc,\n formatVersion=formatVersion,\n validate=validate,\n )\n if (\n self._havePreviousFile\n and self.fs.exists(fileName)\n and data == self.fs.readbytes(fileName)\n ):\n return\n self.fs.writebytes(fileName, data)\n\n def deleteGlyph(self, glyphName):\n """Permanently delete the glyph from the glyph set on disk. Will\n raise KeyError if the glyph is not present in the glyph set.\n """\n fileName = self.contents[glyphName]\n self.fs.remove(fileName)\n if self._existingFileNames is not None:\n self._existingFileNames.remove(fileName.lower())\n if self._reverseContents is not None:\n del self._reverseContents[fileName.lower()]\n del self.contents[glyphName]\n\n # dict-like support\n\n def keys(self):\n return list(self.contents.keys())\n\n def has_key(self, glyphName):\n return glyphName in self.contents\n\n __contains__ = has_key\n\n def __len__(self):\n return len(self.contents)\n\n def __getitem__(self, glyphName):\n if glyphName not in self.contents:\n raise KeyError(glyphName)\n return self.glyphClass(glyphName, self)\n\n # quickly fetch unicode values\n\n def getUnicodes(self, glyphNames=None):\n """\n Return a dictionary that maps glyph names to lists containing\n the unicode value[s] for that glyph, if any. This parses the .glif\n files partially, so it is a lot faster than parsing all files completely.\n By default this checks all glyphs, but a subset can be passed with glyphNames.\n """\n unicodes = {}\n if glyphNames is None:\n glyphNames = self.contents.keys()\n for glyphName in glyphNames:\n text = self.getGLIF(glyphName)\n unicodes[glyphName] = _fetchUnicodes(text)\n return unicodes\n\n def getComponentReferences(self, glyphNames=None):\n """\n Return a dictionary that maps glyph names to lists containing the\n base glyph name of components in the glyph. This parses the .glif\n files partially, so it is a lot faster than parsing all files completely.\n By default this checks all glyphs, but a subset can be passed with glyphNames.\n """\n components = {}\n if glyphNames is None:\n glyphNames = self.contents.keys()\n for glyphName in glyphNames:\n text = self.getGLIF(glyphName)\n components[glyphName] = _fetchComponentBases(text)\n return components\n\n def getImageReferences(self, glyphNames=None):\n """\n Return a dictionary that maps glyph names to the file name of the image\n referenced by the glyph. This parses the .glif files partially, so it is a\n lot faster than parsing all files completely.\n By default this checks all glyphs, but a subset can be passed with glyphNames.\n """\n images = {}\n if glyphNames is None:\n glyphNames = self.contents.keys()\n for glyphName in glyphNames:\n text = self.getGLIF(glyphName)\n images[glyphName] = _fetchImageFileName(text)\n return images\n\n def close(self):\n if self._shouldClose:\n self.fs.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self.close()\n\n\n# -----------------------\n# Glyph Name to File Name\n# -----------------------\n\n\ndef glyphNameToFileName(glyphName, existingFileNames):\n """\n Wrapper around the userNameToFileName function in filenames.py\n\n Note that existingFileNames should be a set for large glyphsets\n or performance will suffer.\n """\n if existingFileNames is None:\n existingFileNames = set()\n return userNameToFileName(glyphName, existing=existingFileNames, suffix=".glif")\n\n\n# -----------------------\n# GLIF To and From String\n# -----------------------\n\n\ndef readGlyphFromString(\n aString,\n glyphObject=None,\n pointPen=None,\n formatVersions=None,\n validate=True,\n):\n """\n Read .glif data from a string into a glyph object.\n\n The 'glyphObject' argument can be any kind of object (even None);\n the readGlyphFromString() method will attempt to set the following\n attributes on it:\n\n width\n the advance width of the glyph\n height\n the advance height of the glyph\n unicodes\n a list of unicode values for this glyph\n note\n a string\n lib\n a dictionary containing custom data\n image\n a dictionary containing image data\n guidelines\n a list of guideline data dictionaries\n anchors\n a list of anchor data dictionaries\n\n All attributes are optional, in two ways:\n\n 1) An attribute *won't* be set if the .glif file doesn't\n contain data for it. 'glyphObject' will have to deal\n with default values itself.\n 2) If setting the attribute fails with an AttributeError\n (for example if the 'glyphObject' attribute is read-\n only), readGlyphFromString() will not propagate that\n exception, but ignore that attribute.\n\n To retrieve outline information, you need to pass an object\n conforming to the PointPen protocol as the 'pointPen' argument.\n This argument may be None if you don't need the outline data.\n\n The formatVersions optional argument define the GLIF format versions\n that are allowed to be read.\n The type is Optional[Iterable[Tuple[int, int], int]]. It can contain\n either integers (for the major versions to be allowed, with minor\n digits defaulting to 0), or tuples of integers to specify both\n (major, minor) versions.\n By default when formatVersions is None all the GLIF format versions\n currently defined are allowed to be read.\n\n ``validate`` will validate the read data. It is set to ``True`` by default.\n """\n tree = _glifTreeFromString(aString)\n\n if formatVersions is None:\n validFormatVersions = GLIFFormatVersion.supported_versions()\n else:\n validFormatVersions, invalidFormatVersions = set(), set()\n for v in formatVersions:\n try:\n formatVersion = GLIFFormatVersion(v)\n except ValueError:\n invalidFormatVersions.add(v)\n else:\n validFormatVersions.add(formatVersion)\n if not validFormatVersions:\n raise ValueError(\n "None of the requested GLIF formatVersions are supported: "\n f"{formatVersions!r}"\n )\n\n _readGlyphFromTree(\n tree,\n glyphObject,\n pointPen,\n formatVersions=validFormatVersions,\n validate=validate,\n )\n\n\ndef _writeGlyphToBytes(\n glyphName,\n glyphObject=None,\n drawPointsFunc=None,\n writer=None,\n formatVersion=None,\n validate=True,\n):\n """Return .glif data for a glyph as a UTF-8 encoded bytes string."""\n try:\n formatVersion = GLIFFormatVersion(formatVersion)\n except ValueError:\n from fontTools.ufoLib.errors import UnsupportedGLIFFormat\n\n raise UnsupportedGLIFFormat(\n "Unsupported GLIF format version: {formatVersion!r}"\n )\n # start\n if validate and not isinstance(glyphName, str):\n raise GlifLibError("The glyph name is not properly formatted.")\n if validate and len(glyphName) == 0:\n raise GlifLibError("The glyph name is empty.")\n glyphAttrs = OrderedDict(\n [("name", glyphName), ("format", repr(formatVersion.major))]\n )\n if formatVersion.minor != 0:\n glyphAttrs["formatMinor"] = repr(formatVersion.minor)\n root = etree.Element("glyph", glyphAttrs)\n identifiers = set()\n # advance\n _writeAdvance(glyphObject, root, validate)\n # unicodes\n if getattr(glyphObject, "unicodes", None):\n _writeUnicodes(glyphObject, root, validate)\n # note\n if getattr(glyphObject, "note", None):\n _writeNote(glyphObject, root, validate)\n # image\n if formatVersion.major >= 2 and getattr(glyphObject, "image", None):\n _writeImage(glyphObject, root, validate)\n # guidelines\n if formatVersion.major >= 2 and getattr(glyphObject, "guidelines", None):\n _writeGuidelines(glyphObject, root, identifiers, validate)\n # anchors\n anchors = getattr(glyphObject, "anchors", None)\n if formatVersion.major >= 2 and anchors:\n _writeAnchors(glyphObject, root, identifiers, validate)\n # outline\n if drawPointsFunc is not None:\n outline = etree.SubElement(root, "outline")\n pen = GLIFPointPen(outline, identifiers=identifiers, validate=validate)\n drawPointsFunc(pen)\n if formatVersion.major == 1 and anchors:\n _writeAnchorsFormat1(pen, anchors, validate)\n # prevent lxml from writing self-closing tags\n if not len(outline):\n outline.text = "\n "\n # lib\n if getattr(glyphObject, "lib", None):\n _writeLib(glyphObject, root, validate)\n # return the text\n data = etree.tostring(\n root, encoding="UTF-8", xml_declaration=True, pretty_print=True\n )\n return data\n\n\ndef writeGlyphToString(\n glyphName,\n glyphObject=None,\n drawPointsFunc=None,\n formatVersion=None,\n validate=True,\n):\n """\n Return .glif data for a glyph as a string. The XML declaration's\n encoding is always set to "UTF-8".\n The 'glyphObject' argument can be any kind of object (even None);\n the writeGlyphToString() method will attempt to get the following\n attributes from it:\n\n width\n the advance width of the glyph\n height\n the advance height of the glyph\n unicodes\n a list of unicode values for this glyph\n note\n a string\n lib\n a dictionary containing custom data\n image\n a dictionary containing image data\n guidelines\n a list of guideline data dictionaries\n anchors\n a list of anchor data dictionaries\n\n All attributes are optional: if 'glyphObject' doesn't\n have the attribute, it will simply be skipped.\n\n To write outline data to the .glif file, writeGlyphToString() needs\n a function (any callable object actually) that will take one\n argument: an object that conforms to the PointPen protocol.\n The function will be called by writeGlyphToString(); it has to call the\n proper PointPen methods to transfer the outline to the .glif file.\n\n The GLIF format version can be specified with the formatVersion argument.\n This accepts either a tuple of integers for (major, minor), or a single\n integer for the major digit only (with minor digit implied as 0).\n By default when formatVesion is None the latest GLIF format version will\n be used; currently it's 2.0, which is equivalent to formatVersion=(2, 0).\n\n An UnsupportedGLIFFormat exception is raised if the requested UFO\n formatVersion is not supported.\n\n ``validate`` will validate the written data. It is set to ``True`` by default.\n """\n data = _writeGlyphToBytes(\n glyphName,\n glyphObject=glyphObject,\n drawPointsFunc=drawPointsFunc,\n formatVersion=formatVersion,\n validate=validate,\n )\n return data.decode("utf-8")\n\n\ndef _writeAdvance(glyphObject, element, validate):\n width = getattr(glyphObject, "width", None)\n if width is not None:\n if validate and not isinstance(width, numberTypes):\n raise GlifLibError("width attribute must be int or float")\n if width == 0:\n width = None\n height = getattr(glyphObject, "height", None)\n if height is not None:\n if validate and not isinstance(height, numberTypes):\n raise GlifLibError("height attribute must be int or float")\n if height == 0:\n height = None\n if width is not None and height is not None:\n etree.SubElement(\n element,\n "advance",\n OrderedDict([("height", repr(height)), ("width", repr(width))]),\n )\n elif width is not None:\n etree.SubElement(element, "advance", dict(width=repr(width)))\n elif height is not None:\n etree.SubElement(element, "advance", dict(height=repr(height)))\n\n\ndef _writeUnicodes(glyphObject, element, validate):\n unicodes = getattr(glyphObject, "unicodes", None)\n if validate and isinstance(unicodes, int):\n unicodes = [unicodes]\n seen = set()\n for code in unicodes:\n if validate and not isinstance(code, int):\n raise GlifLibError("unicode values must be int")\n if code in seen:\n continue\n seen.add(code)\n hexCode = "%04X" % code\n etree.SubElement(element, "unicode", dict(hex=hexCode))\n\n\ndef _writeNote(glyphObject, element, validate):\n note = getattr(glyphObject, "note", None)\n if validate and not isinstance(note, str):\n raise GlifLibError("note attribute must be str")\n note = note.strip()\n note = "\n" + note + "\n"\n etree.SubElement(element, "note").text = note\n\n\ndef _writeImage(glyphObject, element, validate):\n image = getattr(glyphObject, "image", None)\n if validate and not imageValidator(image):\n raise GlifLibError(\n "image attribute must be a dict or dict-like object with the proper structure."\n )\n attrs = OrderedDict([("fileName", image["fileName"])])\n for attr, default in _transformationInfo:\n value = image.get(attr, default)\n if value != default:\n attrs[attr] = repr(value)\n color = image.get("color")\n if color is not None:\n attrs["color"] = color\n etree.SubElement(element, "image", attrs)\n\n\ndef _writeGuidelines(glyphObject, element, identifiers, validate):\n guidelines = getattr(glyphObject, "guidelines", [])\n if validate and not guidelinesValidator(guidelines):\n raise GlifLibError("guidelines attribute does not have the proper structure.")\n for guideline in guidelines:\n attrs = OrderedDict()\n x = guideline.get("x")\n if x is not None:\n attrs["x"] = repr(x)\n y = guideline.get("y")\n if y is not None:\n attrs["y"] = repr(y)\n angle = guideline.get("angle")\n if angle is not None:\n attrs["angle"] = repr(angle)\n name = guideline.get("name")\n if name is not None:\n attrs["name"] = name\n color = guideline.get("color")\n if color is not None:\n attrs["color"] = color\n identifier = guideline.get("identifier")\n if identifier is not None:\n if validate and identifier in identifiers:\n raise GlifLibError("identifier used more than once: %s" % identifier)\n attrs["identifier"] = identifier\n identifiers.add(identifier)\n etree.SubElement(element, "guideline", attrs)\n\n\ndef _writeAnchorsFormat1(pen, anchors, validate):\n if validate and not anchorsValidator(anchors):\n raise GlifLibError("anchors attribute does not have the proper structure.")\n for anchor in anchors:\n attrs = {}\n x = anchor["x"]\n attrs["x"] = repr(x)\n y = anchor["y"]\n attrs["y"] = repr(y)\n name = anchor.get("name")\n if name is not None:\n attrs["name"] = name\n pen.beginPath()\n pen.addPoint((x, y), segmentType="move", name=name)\n pen.endPath()\n\n\ndef _writeAnchors(glyphObject, element, identifiers, validate):\n anchors = getattr(glyphObject, "anchors", [])\n if validate and not anchorsValidator(anchors):\n raise GlifLibError("anchors attribute does not have the proper structure.")\n for anchor in anchors:\n attrs = OrderedDict()\n x = anchor["x"]\n attrs["x"] = repr(x)\n y = anchor["y"]\n attrs["y"] = repr(y)\n name = anchor.get("name")\n if name is not None:\n attrs["name"] = name\n color = anchor.get("color")\n if color is not None:\n attrs["color"] = color\n identifier = anchor.get("identifier")\n if identifier is not None:\n if validate and identifier in identifiers:\n raise GlifLibError("identifier used more than once: %s" % identifier)\n attrs["identifier"] = identifier\n identifiers.add(identifier)\n etree.SubElement(element, "anchor", attrs)\n\n\ndef _writeLib(glyphObject, element, validate):\n lib = getattr(glyphObject, "lib", None)\n if not lib:\n # don't write empty lib\n return\n if validate:\n valid, message = glyphLibValidator(lib)\n if not valid:\n raise GlifLibError(message)\n if not isinstance(lib, dict):\n lib = dict(lib)\n # plist inside GLIF begins with 2 levels of indentation\n e = plistlib.totree(lib, indent_level=2)\n etree.SubElement(element, "lib").append(e)\n\n\n# -----------------------\n# layerinfo.plist Support\n# -----------------------\n\nlayerInfoVersion3ValueData = {\n "color": dict(type=str, valueValidator=colorValidator),\n "lib": dict(type=dict, valueValidator=genericTypeValidator),\n}\n\n\ndef validateLayerInfoVersion3ValueForAttribute(attr, value):\n """\n This performs very basic validation of the value for attribute\n following the UFO 3 fontinfo.plist specification. The results\n of this should not be interpretted as *correct* for the font\n that they are part of. This merely indicates that the value\n is of the proper type and, where the specification defines\n a set range of possible values for an attribute, that the\n value is in the accepted range.\n """\n if attr not in layerInfoVersion3ValueData:\n return False\n dataValidationDict = layerInfoVersion3ValueData[attr]\n valueType = dataValidationDict.get("type")\n validator = dataValidationDict.get("valueValidator")\n valueOptions = dataValidationDict.get("valueOptions")\n # have specific options for the validator\n if valueOptions is not None:\n isValidValue = validator(value, valueOptions)\n # no specific options\n else:\n if validator == genericTypeValidator:\n isValidValue = validator(value, valueType)\n else:\n isValidValue = validator(value)\n return isValidValue\n\n\ndef validateLayerInfoVersion3Data(infoData):\n """\n This performs very basic validation of the value for infoData\n following the UFO 3 layerinfo.plist specification. The results\n of this should not be interpretted as *correct* for the font\n that they are part of. This merely indicates that the values\n are of the proper type and, where the specification defines\n a set range of possible values for an attribute, that the\n value is in the accepted range.\n """\n for attr, value in infoData.items():\n if attr not in layerInfoVersion3ValueData:\n raise GlifLibError("Unknown attribute %s." % attr)\n isValidValue = validateLayerInfoVersion3ValueForAttribute(attr, value)\n if not isValidValue:\n raise GlifLibError(f"Invalid value for attribute {attr} ({value!r}).")\n return infoData\n\n\n# -----------------\n# GLIF Tree Support\n# -----------------\n\n\ndef _glifTreeFromFile(aFile):\n if etree._have_lxml:\n tree = etree.parse(aFile, parser=etree.XMLParser(remove_comments=True))\n else:\n tree = etree.parse(aFile)\n root = tree.getroot()\n if root.tag != "glyph":\n raise GlifLibError("The GLIF is not properly formatted.")\n if root.text and root.text.strip() != "":\n raise GlifLibError("Invalid GLIF structure.")\n return root\n\n\ndef _glifTreeFromString(aString):\n data = tobytes(aString, encoding="utf-8")\n try:\n if etree._have_lxml:\n root = etree.fromstring(data, parser=etree.XMLParser(remove_comments=True))\n else:\n root = etree.fromstring(data)\n except Exception as etree_exception:\n raise GlifLibError("GLIF contains invalid XML.") from etree_exception\n\n if root.tag != "glyph":\n raise GlifLibError("The GLIF is not properly formatted.")\n if root.text and root.text.strip() != "":\n raise GlifLibError("Invalid GLIF structure.")\n return root\n\n\ndef _readGlyphFromTree(\n tree,\n glyphObject=None,\n pointPen=None,\n formatVersions=GLIFFormatVersion.supported_versions(),\n validate=True,\n):\n # check the format version\n formatVersionMajor = tree.get("format")\n if validate and formatVersionMajor is None:\n raise GlifLibError("Unspecified format version in GLIF.")\n formatVersionMinor = tree.get("formatMinor", 0)\n try:\n formatVersion = GLIFFormatVersion(\n (int(formatVersionMajor), int(formatVersionMinor))\n )\n except ValueError as e:\n msg = "Unsupported GLIF format: %s.%s" % (\n formatVersionMajor,\n formatVersionMinor,\n )\n if validate:\n from fontTools.ufoLib.errors import UnsupportedGLIFFormat\n\n raise UnsupportedGLIFFormat(msg) from e\n # warn but continue using the latest supported format\n formatVersion = GLIFFormatVersion.default()\n logger.warning(\n "%s. Assuming the latest supported version (%s). "\n "Some data may be skipped or parsed incorrectly.",\n msg,\n formatVersion,\n )\n\n if validate and formatVersion not in formatVersions:\n raise GlifLibError(f"Forbidden GLIF format version: {formatVersion!s}")\n\n try:\n readGlyphFromTree = _READ_GLYPH_FROM_TREE_FUNCS[formatVersion]\n except KeyError:\n raise NotImplementedError(formatVersion)\n\n readGlyphFromTree(\n tree=tree,\n glyphObject=glyphObject,\n pointPen=pointPen,\n validate=validate,\n formatMinor=formatVersion.minor,\n )\n\n\ndef _readGlyphFromTreeFormat1(\n tree, glyphObject=None, pointPen=None, validate=None, **kwargs\n):\n # get the name\n _readName(glyphObject, tree, validate)\n # populate the sub elements\n unicodes = []\n haveSeenAdvance = haveSeenOutline = haveSeenLib = haveSeenNote = False\n for element in tree:\n if element.tag == "outline":\n if validate:\n if haveSeenOutline:\n raise GlifLibError("The outline element occurs more than once.")\n if element.attrib:\n raise GlifLibError(\n "The outline element contains unknown attributes."\n )\n if element.text and element.text.strip() != "":\n raise GlifLibError("Invalid outline structure.")\n haveSeenOutline = True\n buildOutlineFormat1(glyphObject, pointPen, element, validate)\n elif glyphObject is None:\n continue\n elif element.tag == "advance":\n if validate and haveSeenAdvance:\n raise GlifLibError("The advance element occurs more than once.")\n haveSeenAdvance = True\n _readAdvance(glyphObject, element)\n elif element.tag == "unicode":\n v = element.get("hex")\n if v is None:\n raise GlifLibError(\n "A unicode element is missing its required hex attribute."\n )\n try:\n v = int(v, 16)\n if v not in unicodes:\n unicodes.append(v)\n except ValueError:\n raise GlifLibError(\n "Illegal value for hex attribute of unicode element."\n )\n elif element.tag == "note":\n if validate and haveSeenNote:\n raise GlifLibError("The note element occurs more than once.")\n haveSeenNote = True\n _readNote(glyphObject, element)\n elif element.tag == "lib":\n if validate and haveSeenLib:\n raise GlifLibError("The lib element occurs more than once.")\n haveSeenLib = True\n _readLib(glyphObject, element, validate)\n else:\n raise GlifLibError("Unknown element in GLIF: %s" % element)\n # set the collected unicodes\n if unicodes:\n _relaxedSetattr(glyphObject, "unicodes", unicodes)\n\n\ndef _readGlyphFromTreeFormat2(\n tree, glyphObject=None, pointPen=None, validate=None, formatMinor=0\n):\n # get the name\n _readName(glyphObject, tree, validate)\n # populate the sub elements\n unicodes = []\n guidelines = []\n anchors = []\n haveSeenAdvance = haveSeenImage = haveSeenOutline = haveSeenLib = haveSeenNote = (\n False\n )\n identifiers = set()\n for element in tree:\n if element.tag == "outline":\n if validate:\n if haveSeenOutline:\n raise GlifLibError("The outline element occurs more than once.")\n if element.attrib:\n raise GlifLibError(\n "The outline element contains unknown attributes."\n )\n if element.text and element.text.strip() != "":\n raise GlifLibError("Invalid outline structure.")\n haveSeenOutline = True\n if pointPen is not None:\n buildOutlineFormat2(\n glyphObject, pointPen, element, identifiers, validate\n )\n elif glyphObject is None:\n continue\n elif element.tag == "advance":\n if validate and haveSeenAdvance:\n raise GlifLibError("The advance element occurs more than once.")\n haveSeenAdvance = True\n _readAdvance(glyphObject, element)\n elif element.tag == "unicode":\n v = element.get("hex")\n if v is None:\n raise GlifLibError(\n "A unicode element is missing its required hex attribute."\n )\n try:\n v = int(v, 16)\n if v not in unicodes:\n unicodes.append(v)\n except ValueError:\n raise GlifLibError(\n "Illegal value for hex attribute of unicode element."\n )\n elif element.tag == "guideline":\n if validate and len(element):\n raise GlifLibError("Unknown children in guideline element.")\n attrib = dict(element.attrib)\n for attr in ("x", "y", "angle"):\n if attr in attrib:\n attrib[attr] = _number(attrib[attr])\n guidelines.append(attrib)\n elif element.tag == "anchor":\n if validate and len(element):\n raise GlifLibError("Unknown children in anchor element.")\n attrib = dict(element.attrib)\n for attr in ("x", "y"):\n if attr in element.attrib:\n attrib[attr] = _number(attrib[attr])\n anchors.append(attrib)\n elif element.tag == "image":\n if validate:\n if haveSeenImage:\n raise GlifLibError("The image element occurs more than once.")\n if len(element):\n raise GlifLibError("Unknown children in image element.")\n haveSeenImage = True\n _readImage(glyphObject, element, validate)\n elif element.tag == "note":\n if validate and haveSeenNote:\n raise GlifLibError("The note element occurs more than once.")\n haveSeenNote = True\n _readNote(glyphObject, element)\n elif element.tag == "lib":\n if validate and haveSeenLib:\n raise GlifLibError("The lib element occurs more than once.")\n haveSeenLib = True\n _readLib(glyphObject, element, validate)\n else:\n raise GlifLibError("Unknown element in GLIF: %s" % element)\n # set the collected unicodes\n if unicodes:\n _relaxedSetattr(glyphObject, "unicodes", unicodes)\n # set the collected guidelines\n if guidelines:\n if validate and not guidelinesValidator(guidelines, identifiers):\n raise GlifLibError("The guidelines are improperly formatted.")\n _relaxedSetattr(glyphObject, "guidelines", guidelines)\n # set the collected anchors\n if anchors:\n if validate and not anchorsValidator(anchors, identifiers):\n raise GlifLibError("The anchors are improperly formatted.")\n _relaxedSetattr(glyphObject, "anchors", anchors)\n\n\n_READ_GLYPH_FROM_TREE_FUNCS = {\n GLIFFormatVersion.FORMAT_1_0: _readGlyphFromTreeFormat1,\n GLIFFormatVersion.FORMAT_2_0: _readGlyphFromTreeFormat2,\n}\n\n\ndef _readName(glyphObject, root, validate):\n glyphName = root.get("name")\n if validate and not glyphName:\n raise GlifLibError("Empty glyph name in GLIF.")\n if glyphName and glyphObject is not None:\n _relaxedSetattr(glyphObject, "name", glyphName)\n\n\ndef _readAdvance(glyphObject, advance):\n width = _number(advance.get("width", 0))\n _relaxedSetattr(glyphObject, "width", width)\n height = _number(advance.get("height", 0))\n _relaxedSetattr(glyphObject, "height", height)\n\n\ndef _readNote(glyphObject, note):\n lines = note.text.split("\n")\n note = "\n".join(line.strip() for line in lines if line.strip())\n _relaxedSetattr(glyphObject, "note", note)\n\n\ndef _readLib(glyphObject, lib, validate):\n assert len(lib) == 1\n child = lib[0]\n plist = plistlib.fromtree(child)\n if validate:\n valid, message = glyphLibValidator(plist)\n if not valid:\n raise GlifLibError(message)\n _relaxedSetattr(glyphObject, "lib", plist)\n\n\ndef _readImage(glyphObject, image, validate):\n imageData = dict(image.attrib)\n for attr, default in _transformationInfo:\n value = imageData.get(attr, default)\n imageData[attr] = _number(value)\n if validate and not imageValidator(imageData):\n raise GlifLibError("The image element is not properly formatted.")\n _relaxedSetattr(glyphObject, "image", imageData)\n\n\n# ----------------\n# GLIF to PointPen\n# ----------------\n\ncontourAttributesFormat2 = {"identifier"}\ncomponentAttributesFormat1 = {\n "base",\n "xScale",\n "xyScale",\n "yxScale",\n "yScale",\n "xOffset",\n "yOffset",\n}\ncomponentAttributesFormat2 = componentAttributesFormat1 | {"identifier"}\npointAttributesFormat1 = {"x", "y", "type", "smooth", "name"}\npointAttributesFormat2 = pointAttributesFormat1 | {"identifier"}\npointSmoothOptions = {"no", "yes"}\npointTypeOptions = {"move", "line", "offcurve", "curve", "qcurve"}\n\n# format 1\n\n\ndef buildOutlineFormat1(glyphObject, pen, outline, validate):\n anchors = []\n for element in outline:\n if element.tag == "contour":\n if len(element) == 1:\n point = element[0]\n if point.tag == "point":\n anchor = _buildAnchorFormat1(point, validate)\n if anchor is not None:\n anchors.append(anchor)\n continue\n if pen is not None:\n _buildOutlineContourFormat1(pen, element, validate)\n elif element.tag == "component":\n if pen is not None:\n _buildOutlineComponentFormat1(pen, element, validate)\n else:\n raise GlifLibError("Unknown element in outline element: %s" % element)\n if glyphObject is not None and anchors:\n if validate and not anchorsValidator(anchors):\n raise GlifLibError("GLIF 1 anchors are not properly formatted.")\n _relaxedSetattr(glyphObject, "anchors", anchors)\n\n\ndef _buildAnchorFormat1(point, validate):\n if point.get("type") != "move":\n return None\n name = point.get("name")\n if name is None:\n return None\n x = point.get("x")\n y = point.get("y")\n if validate and x is None:\n raise GlifLibError("Required x attribute is missing in point element.")\n if validate and y is None:\n raise GlifLibError("Required y attribute is missing in point element.")\n x = _number(x)\n y = _number(y)\n anchor = dict(x=x, y=y, name=name)\n return anchor\n\n\ndef _buildOutlineContourFormat1(pen, contour, validate):\n if validate and contour.attrib:\n raise GlifLibError("Unknown attributes in contour element.")\n pen.beginPath()\n if len(contour):\n massaged = _validateAndMassagePointStructures(\n contour,\n pointAttributesFormat1,\n openContourOffCurveLeniency=True,\n validate=validate,\n )\n _buildOutlinePointsFormat1(pen, massaged)\n pen.endPath()\n\n\ndef _buildOutlinePointsFormat1(pen, contour):\n for point in contour:\n x = point["x"]\n y = point["y"]\n segmentType = point["segmentType"]\n smooth = point["smooth"]\n name = point["name"]\n pen.addPoint((x, y), segmentType=segmentType, smooth=smooth, name=name)\n\n\ndef _buildOutlineComponentFormat1(pen, component, validate):\n if validate:\n if len(component):\n raise GlifLibError("Unknown child elements of component element.")\n for attr in component.attrib.keys():\n if attr not in componentAttributesFormat1:\n raise GlifLibError("Unknown attribute in component element: %s" % attr)\n baseGlyphName = component.get("base")\n if validate and baseGlyphName is None:\n raise GlifLibError("The base attribute is not defined in the component.")\n transformation = []\n for attr, default in _transformationInfo:\n value = component.get(attr)\n if value is None:\n value = default\n else:\n value = _number(value)\n transformation.append(value)\n pen.addComponent(baseGlyphName, tuple(transformation))\n\n\n# format 2\n\n\ndef buildOutlineFormat2(glyphObject, pen, outline, identifiers, validate):\n for element in outline:\n if element.tag == "contour":\n _buildOutlineContourFormat2(pen, element, identifiers, validate)\n elif element.tag == "component":\n _buildOutlineComponentFormat2(pen, element, identifiers, validate)\n else:\n raise GlifLibError("Unknown element in outline element: %s" % element.tag)\n\n\ndef _buildOutlineContourFormat2(pen, contour, identifiers, validate):\n if validate:\n for attr in contour.attrib.keys():\n if attr not in contourAttributesFormat2:\n raise GlifLibError("Unknown attribute in contour element: %s" % attr)\n identifier = contour.get("identifier")\n if identifier is not None:\n if validate:\n if identifier in identifiers:\n raise GlifLibError(\n "The identifier %s is used more than once." % identifier\n )\n if not identifierValidator(identifier):\n raise GlifLibError(\n "The contour identifier %s is not valid." % identifier\n )\n identifiers.add(identifier)\n try:\n pen.beginPath(identifier=identifier)\n except TypeError:\n pen.beginPath()\n warn(\n "The beginPath method needs an identifier kwarg. The contour's identifier value has been discarded.",\n DeprecationWarning,\n )\n if len(contour):\n massaged = _validateAndMassagePointStructures(\n contour, pointAttributesFormat2, validate=validate\n )\n _buildOutlinePointsFormat2(pen, massaged, identifiers, validate)\n pen.endPath()\n\n\ndef _buildOutlinePointsFormat2(pen, contour, identifiers, validate):\n for point in contour:\n x = point["x"]\n y = point["y"]\n segmentType = point["segmentType"]\n smooth = point["smooth"]\n name = point["name"]\n identifier = point.get("identifier")\n if identifier is not None:\n if validate:\n if identifier in identifiers:\n raise GlifLibError(\n "The identifier %s is used more than once." % identifier\n )\n if not identifierValidator(identifier):\n raise GlifLibError("The identifier %s is not valid." % identifier)\n identifiers.add(identifier)\n try:\n pen.addPoint(\n (x, y),\n segmentType=segmentType,\n smooth=smooth,\n name=name,\n identifier=identifier,\n )\n except TypeError:\n pen.addPoint((x, y), segmentType=segmentType, smooth=smooth, name=name)\n warn(\n "The addPoint method needs an identifier kwarg. The point's identifier value has been discarded.",\n DeprecationWarning,\n )\n\n\ndef _buildOutlineComponentFormat2(pen, component, identifiers, validate):\n if validate:\n if len(component):\n raise GlifLibError("Unknown child elements of component element.")\n for attr in component.attrib.keys():\n if attr not in componentAttributesFormat2:\n raise GlifLibError("Unknown attribute in component element: %s" % attr)\n baseGlyphName = component.get("base")\n if validate and baseGlyphName is None:\n raise GlifLibError("The base attribute is not defined in the component.")\n transformation = []\n for attr, default in _transformationInfo:\n value = component.get(attr)\n if value is None:\n value = default\n else:\n value = _number(value)\n transformation.append(value)\n identifier = component.get("identifier")\n if identifier is not None:\n if validate:\n if identifier in identifiers:\n raise GlifLibError(\n "The identifier %s is used more than once." % identifier\n )\n if validate and not identifierValidator(identifier):\n raise GlifLibError("The identifier %s is not valid." % identifier)\n identifiers.add(identifier)\n try:\n pen.addComponent(baseGlyphName, tuple(transformation), identifier=identifier)\n except TypeError:\n pen.addComponent(baseGlyphName, tuple(transformation))\n warn(\n "The addComponent method needs an identifier kwarg. The component's identifier value has been discarded.",\n DeprecationWarning,\n )\n\n\n# all formats\n\n\ndef _validateAndMassagePointStructures(\n contour, pointAttributes, openContourOffCurveLeniency=False, validate=True\n):\n if not len(contour):\n return\n # store some data for later validation\n lastOnCurvePoint = None\n haveOffCurvePoint = False\n # validate and massage the individual point elements\n massaged = []\n for index, element in enumerate(contour):\n # not <point>\n if element.tag != "point":\n raise GlifLibError(\n "Unknown child element (%s) of contour element." % element.tag\n )\n point = dict(element.attrib)\n massaged.append(point)\n if validate:\n # unknown attributes\n for attr in point.keys():\n if attr not in pointAttributes:\n raise GlifLibError("Unknown attribute in point element: %s" % attr)\n # search for unknown children\n if len(element):\n raise GlifLibError("Unknown child elements in point element.")\n # x and y are required\n for attr in ("x", "y"):\n try:\n point[attr] = _number(point[attr])\n except KeyError as e:\n raise GlifLibError(\n f"Required {attr} attribute is missing in point element."\n ) from e\n # segment type\n pointType = point.pop("type", "offcurve")\n if validate and pointType not in pointTypeOptions:\n raise GlifLibError("Unknown point type: %s" % pointType)\n if pointType == "offcurve":\n pointType = None\n point["segmentType"] = pointType\n if pointType is None:\n haveOffCurvePoint = True\n else:\n lastOnCurvePoint = index\n # move can only occur as the first point\n if validate and pointType == "move" and index != 0:\n raise GlifLibError(\n "A move point occurs after the first point in the contour."\n )\n # smooth is optional\n smooth = point.get("smooth", "no")\n if validate and smooth is not None:\n if smooth not in pointSmoothOptions:\n raise GlifLibError("Unknown point smooth value: %s" % smooth)\n smooth = smooth == "yes"\n point["smooth"] = smooth\n # smooth can only be applied to curve and qcurve\n if validate and smooth and pointType is None:\n raise GlifLibError("smooth attribute set in an offcurve point.")\n # name is optional\n if "name" not in element.attrib:\n point["name"] = None\n if openContourOffCurveLeniency:\n # remove offcurves that precede a move. this is technically illegal,\n # but we let it slide because there are fonts out there in the wild like this.\n if massaged[0]["segmentType"] == "move":\n count = 0\n for point in reversed(massaged):\n if point["segmentType"] is None:\n count += 1\n else:\n break\n if count:\n massaged = massaged[:-count]\n # validate the off-curves in the segments\n if validate and haveOffCurvePoint and lastOnCurvePoint is not None:\n # we only care about how many offCurves there are before an onCurve\n # filter out the trailing offCurves\n offCurvesCount = len(massaged) - 1 - lastOnCurvePoint\n for point in massaged:\n segmentType = point["segmentType"]\n if segmentType is None:\n offCurvesCount += 1\n else:\n if offCurvesCount:\n # move and line can't be preceded by off-curves\n if segmentType == "move":\n # this will have been filtered out already\n raise GlifLibError("move can not have an offcurve.")\n elif segmentType == "line":\n raise GlifLibError("line can not have an offcurve.")\n elif segmentType == "curve":\n if offCurvesCount > 2:\n raise GlifLibError("Too many offcurves defined for curve.")\n elif segmentType == "qcurve":\n pass\n else:\n # unknown segment type. it'll be caught later.\n pass\n offCurvesCount = 0\n return massaged\n\n\n# ---------------------\n# Misc Helper Functions\n# ---------------------\n\n\ndef _relaxedSetattr(object, attr, value):\n try:\n setattr(object, attr, value)\n except AttributeError:\n pass\n\n\ndef _number(s):\n """\n Given a numeric string, return an integer or a float, whichever\n the string indicates. _number("1") will return the integer 1,\n _number("1.0") will return the float 1.0.\n\n >>> _number("1")\n 1\n >>> _number("1.0")\n 1.0\n >>> _number("a") # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n GlifLibError: Could not convert a to an int or float.\n """\n try:\n n = int(s)\n return n\n except ValueError:\n pass\n try:\n n = float(s)\n return n\n except ValueError:\n raise GlifLibError("Could not convert %s to an int or float." % s)\n\n\n# --------------------\n# Rapid Value Fetching\n# --------------------\n\n# base\n\n\nclass _DoneParsing(Exception):\n pass\n\n\nclass _BaseParser:\n def __init__(self):\n self._elementStack = []\n\n def parse(self, text):\n from xml.parsers.expat import ParserCreate\n\n parser = ParserCreate()\n parser.StartElementHandler = self.startElementHandler\n parser.EndElementHandler = self.endElementHandler\n parser.Parse(text, 1)\n\n def startElementHandler(self, name, attrs):\n self._elementStack.append(name)\n\n def endElementHandler(self, name):\n other = self._elementStack.pop(-1)\n assert other == name\n\n\n# unicodes\n\n\ndef _fetchUnicodes(glif):\n """\n Get a list of unicodes listed in glif.\n """\n parser = _FetchUnicodesParser()\n parser.parse(glif)\n return parser.unicodes\n\n\nclass _FetchUnicodesParser(_BaseParser):\n def __init__(self):\n self.unicodes = []\n super().__init__()\n\n def startElementHandler(self, name, attrs):\n if (\n name == "unicode"\n and self._elementStack\n and self._elementStack[-1] == "glyph"\n ):\n value = attrs.get("hex")\n if value is not None:\n try:\n value = int(value, 16)\n if value not in self.unicodes:\n self.unicodes.append(value)\n except ValueError:\n pass\n super().startElementHandler(name, attrs)\n\n\n# image\n\n\ndef _fetchImageFileName(glif):\n """\n The image file name (if any) from glif.\n """\n parser = _FetchImageFileNameParser()\n try:\n parser.parse(glif)\n except _DoneParsing:\n pass\n return parser.fileName\n\n\nclass _FetchImageFileNameParser(_BaseParser):\n def __init__(self):\n self.fileName = None\n super().__init__()\n\n def startElementHandler(self, name, attrs):\n if name == "image" and self._elementStack and self._elementStack[-1] == "glyph":\n self.fileName = attrs.get("fileName")\n raise _DoneParsing\n super().startElementHandler(name, attrs)\n\n\n# component references\n\n\ndef _fetchComponentBases(glif):\n """\n Get a list of component base glyphs listed in glif.\n """\n parser = _FetchComponentBasesParser()\n try:\n parser.parse(glif)\n except _DoneParsing:\n pass\n return list(parser.bases)\n\n\nclass _FetchComponentBasesParser(_BaseParser):\n def __init__(self):\n self.bases = []\n super().__init__()\n\n def startElementHandler(self, name, attrs):\n if (\n name == "component"\n and self._elementStack\n and self._elementStack[-1] == "outline"\n ):\n base = attrs.get("base")\n if base is not None:\n self.bases.append(base)\n super().startElementHandler(name, attrs)\n\n def endElementHandler(self, name):\n if name == "outline":\n raise _DoneParsing\n super().endElementHandler(name)\n\n\n# --------------\n# GLIF Point Pen\n# --------------\n\n_transformationInfo = [\n # field name, default value\n ("xScale", 1),\n ("xyScale", 0),\n ("yxScale", 0),\n ("yScale", 1),\n ("xOffset", 0),\n ("yOffset", 0),\n]\n\n\nclass GLIFPointPen(AbstractPointPen):\n """\n Helper class using the PointPen protocol to write the <outline>\n part of .glif files.\n """\n\n def __init__(self, element, formatVersion=None, identifiers=None, validate=True):\n if identifiers is None:\n identifiers = set()\n self.formatVersion = GLIFFormatVersion(formatVersion)\n self.identifiers = identifiers\n self.outline = element\n self.contour = None\n self.prevOffCurveCount = 0\n self.prevPointTypes = []\n self.validate = validate\n\n def beginPath(self, identifier=None, **kwargs):\n attrs = OrderedDict()\n if identifier is not None and self.formatVersion.major >= 2:\n if self.validate:\n if identifier in self.identifiers:\n raise GlifLibError(\n "identifier used more than once: %s" % identifier\n )\n if not identifierValidator(identifier):\n raise GlifLibError(\n "identifier not formatted properly: %s" % identifier\n )\n attrs["identifier"] = identifier\n self.identifiers.add(identifier)\n self.contour = etree.SubElement(self.outline, "contour", attrs)\n self.prevOffCurveCount = 0\n\n def endPath(self):\n if self.prevPointTypes and self.prevPointTypes[0] == "move":\n if self.validate and self.prevPointTypes[-1] == "offcurve":\n raise GlifLibError("open contour has loose offcurve point")\n # prevent lxml from writing self-closing tags\n if not len(self.contour):\n self.contour.text = "\n "\n self.contour = None\n self.prevPointType = None\n self.prevOffCurveCount = 0\n self.prevPointTypes = []\n\n def addPoint(\n self, pt, segmentType=None, smooth=None, name=None, identifier=None, **kwargs\n ):\n attrs = OrderedDict()\n # coordinates\n if pt is not None:\n if self.validate:\n for coord in pt:\n if not isinstance(coord, numberTypes):\n raise GlifLibError("coordinates must be int or float")\n attrs["x"] = repr(pt[0])\n attrs["y"] = repr(pt[1])\n # segment type\n if segmentType == "offcurve":\n segmentType = None\n if self.validate:\n if segmentType == "move" and self.prevPointTypes:\n raise GlifLibError(\n "move occurs after a point has already been added to the contour."\n )\n if (\n segmentType in ("move", "line")\n and self.prevPointTypes\n and self.prevPointTypes[-1] == "offcurve"\n ):\n raise GlifLibError("offcurve occurs before %s point." % segmentType)\n if segmentType == "curve" and self.prevOffCurveCount > 2:\n raise GlifLibError("too many offcurve points before curve point.")\n if segmentType is not None:\n attrs["type"] = segmentType\n else:\n segmentType = "offcurve"\n if segmentType == "offcurve":\n self.prevOffCurveCount += 1\n else:\n self.prevOffCurveCount = 0\n self.prevPointTypes.append(segmentType)\n # smooth\n if smooth:\n if self.validate and segmentType == "offcurve":\n raise GlifLibError("can't set smooth in an offcurve point.")\n attrs["smooth"] = "yes"\n # name\n if name is not None:\n attrs["name"] = name\n # identifier\n if identifier is not None and self.formatVersion.major >= 2:\n if self.validate:\n if identifier in self.identifiers:\n raise GlifLibError(\n "identifier used more than once: %s" % identifier\n )\n if not identifierValidator(identifier):\n raise GlifLibError(\n "identifier not formatted properly: %s" % identifier\n )\n attrs["identifier"] = identifier\n self.identifiers.add(identifier)\n etree.SubElement(self.contour, "point", attrs)\n\n def addComponent(self, glyphName, transformation, identifier=None, **kwargs):\n attrs = OrderedDict([("base", glyphName)])\n for (attr, default), value in zip(_transformationInfo, transformation):\n if self.validate and not isinstance(value, numberTypes):\n raise GlifLibError("transformation values must be int or float")\n if value != default:\n attrs[attr] = repr(value)\n if identifier is not None and self.formatVersion.major >= 2:\n if self.validate:\n if identifier in self.identifiers:\n raise GlifLibError(\n "identifier used more than once: %s" % identifier\n )\n if self.validate and not identifierValidator(identifier):\n raise GlifLibError(\n "identifier not formatted properly: %s" % identifier\n )\n attrs["identifier"] = identifier\n self.identifiers.add(identifier)\n etree.SubElement(self.outline, "component", attrs)\n\n\nif __name__ == "__main__":\n import doctest\n\n doctest.testmod()\n
.venv\Lib\site-packages\fontTools\ufoLib\glifLib.py
glifLib.py
Python
74,781
0.75
0.237555
0.063901
react-lib
213
2023-08-18T02:12:08.497313
BSD-3-Clause
false
96f135aeb6114f19af8cc097a16ef6ba
def lookupKerningValue(\n pair, kerning, groups, fallback=0, glyphToFirstGroup=None, glyphToSecondGroup=None\n):\n """Retrieve the kerning value (if any) between a pair of elements.\n\n The elments can be either individual glyphs (by name) or kerning\n groups (by name), or any combination of the two.\n\n Args:\n pair:\n A tuple, in logical order (first, second) with respect\n to the reading direction, to query the font for kerning\n information on. Each element in the tuple can be either\n a glyph name or a kerning group name.\n kerning:\n A dictionary of kerning pairs.\n groups:\n A set of kerning groups.\n fallback:\n The fallback value to return if no kern is found between\n the elements in ``pair``. Defaults to 0.\n glyphToFirstGroup:\n A dictionary mapping glyph names to the first-glyph kerning\n groups to which they belong. Defaults to ``None``.\n glyphToSecondGroup:\n A dictionary mapping glyph names to the second-glyph kerning\n groups to which they belong. Defaults to ``None``.\n\n Returns:\n The kerning value between the element pair. If no kerning for\n the pair is found, the fallback value is returned.\n\n Note: This function expects the ``kerning`` argument to be a flat\n dictionary of kerning pairs, not the nested structure used in a\n kerning.plist file.\n\n Examples::\n\n >>> groups = {\n ... "public.kern1.O" : ["O", "D", "Q"],\n ... "public.kern2.E" : ["E", "F"]\n ... }\n >>> kerning = {\n ... ("public.kern1.O", "public.kern2.E") : -100,\n ... ("public.kern1.O", "F") : -200,\n ... ("D", "F") : -300\n ... }\n >>> lookupKerningValue(("D", "F"), kerning, groups)\n -300\n >>> lookupKerningValue(("O", "F"), kerning, groups)\n -200\n >>> lookupKerningValue(("O", "E"), kerning, groups)\n -100\n >>> lookupKerningValue(("O", "O"), kerning, groups)\n 0\n >>> lookupKerningValue(("E", "E"), kerning, groups)\n 0\n >>> lookupKerningValue(("E", "O"), kerning, groups)\n 0\n >>> lookupKerningValue(("X", "X"), kerning, groups)\n 0\n >>> lookupKerningValue(("public.kern1.O", "public.kern2.E"),\n ... kerning, groups)\n -100\n >>> lookupKerningValue(("public.kern1.O", "F"), kerning, groups)\n -200\n >>> lookupKerningValue(("O", "public.kern2.E"), kerning, groups)\n -100\n >>> lookupKerningValue(("public.kern1.X", "public.kern2.X"), kerning, groups)\n 0\n """\n # quickly check to see if the pair is in the kerning dictionary\n if pair in kerning:\n return kerning[pair]\n # create glyph to group mapping\n if glyphToFirstGroup is not None:\n assert glyphToSecondGroup is not None\n if glyphToSecondGroup is not None:\n assert glyphToFirstGroup is not None\n if glyphToFirstGroup is None:\n glyphToFirstGroup = {}\n glyphToSecondGroup = {}\n for group, groupMembers in groups.items():\n if group.startswith("public.kern1."):\n for glyph in groupMembers:\n glyphToFirstGroup[glyph] = group\n elif group.startswith("public.kern2."):\n for glyph in groupMembers:\n glyphToSecondGroup[glyph] = group\n # get group names and make sure first and second are glyph names\n first, second = pair\n firstGroup = secondGroup = None\n if first.startswith("public.kern1."):\n firstGroup = first\n first = None\n else:\n firstGroup = glyphToFirstGroup.get(first)\n if second.startswith("public.kern2."):\n secondGroup = second\n second = None\n else:\n secondGroup = glyphToSecondGroup.get(second)\n # make an ordered list of pairs to look up\n pairs = [\n (first, second),\n (first, secondGroup),\n (firstGroup, second),\n (firstGroup, secondGroup),\n ]\n # look up the pairs and return any matches\n for pair in pairs:\n if pair in kerning:\n return kerning[pair]\n # use the fallback value\n return fallback\n\n\nif __name__ == "__main__":\n import doctest\n\n doctest.testmod()\n
.venv\Lib\site-packages\fontTools\ufoLib\kerning.py
kerning.py
Python
4,354
0.95
0.165289
0.053571
python-kit
650
2025-03-24T04:37:50.245527
Apache-2.0
false
959ac3fcbe15c81b316a61dc5c79cbe7
"""DEPRECATED - This module is kept here only as a backward compatibility shim\nfor the old `ufoLib.plistlib` module, which was moved to :class:`fontTools.misc.plistlib`.\nPlease use the latter instead.\n"""\n\nfrom fontTools.misc.plistlib import dump, dumps, load, loads\nfrom fontTools.misc.textTools import tobytes\n\n# The following functions were part of the old py2-like ufoLib.plistlib API.\n# They are kept only for backward compatiblity.\nfrom fontTools.ufoLib.utils import deprecated\n\n\n@deprecated("Use 'fontTools.misc.plistlib.load' instead")\ndef readPlist(path_or_file):\n did_open = False\n if isinstance(path_or_file, str):\n path_or_file = open(path_or_file, "rb")\n did_open = True\n try:\n return load(path_or_file, use_builtin_types=False)\n finally:\n if did_open:\n path_or_file.close()\n\n\n@deprecated("Use 'fontTools.misc.plistlib.dump' instead")\ndef writePlist(value, path_or_file):\n did_open = False\n if isinstance(path_or_file, str):\n path_or_file = open(path_or_file, "wb")\n did_open = True\n try:\n dump(value, path_or_file, use_builtin_types=False)\n finally:\n if did_open:\n path_or_file.close()\n\n\n@deprecated("Use 'fontTools.misc.plistlib.loads' instead")\ndef readPlistFromString(data):\n return loads(tobytes(data, encoding="utf-8"), use_builtin_types=False)\n\n\n@deprecated("Use 'fontTools.misc.plistlib.dumps' instead")\ndef writePlistToString(value):\n return dumps(value, use_builtin_types=False)\n
.venv\Lib\site-packages\fontTools\ufoLib\plistlib.py
plistlib.py
Python
1,557
0.95
0.276596
0.054054
react-lib
273
2024-09-14T08:35:55.617426
BSD-3-Clause
false
f08e6234f9cbc63c752b2d846a058e97
"""DEPRECATED - This module is kept here only as a backward compatibility shim\nfor the old `ufoLib.pointPen` module, which was moved to :class:`fontTools.pens.pointPen`.\nPlease use the latter instead.\n"""\n\nfrom fontTools.pens.pointPen import *\n
.venv\Lib\site-packages\fontTools\ufoLib\pointPen.py
pointPen.py
Python
250
0.85
0.333333
0
react-lib
662
2025-04-12T17:19:05.067584
Apache-2.0
false
834b1e70cd5b29226416c87acbb3d30e
"""This module contains miscellaneous helpers.\n\nIt is not considered part of the public ufoLib API. It does, however,\ndefine the :py:obj:`.deprecated` decorator that is used elsewhere in\nthe module.\n"""\n\nimport warnings\nimport functools\n\n\nnumberTypes = (int, float)\n\n\ndef deprecated(msg=""):\n """Decorator factory to mark functions as deprecated with given message.\n\n >>> @deprecated("Enough!")\n ... def some_function():\n ... "I just print 'hello world'."\n ... print("hello world")\n >>> some_function()\n hello world\n >>> some_function.__doc__ == "I just print 'hello world'."\n True\n """\n\n def deprecated_decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n warnings.warn(\n f"{func.__name__} function is a deprecated. {msg}",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return func(*args, **kwargs)\n\n return wrapper\n\n return deprecated_decorator\n\n\n# To be mixed with enum.Enum in UFOFormatVersion and GLIFFormatVersion\nclass _VersionTupleEnumMixin:\n @property\n def major(self):\n return self.value[0]\n\n @property\n def minor(self):\n return self.value[1]\n\n @classmethod\n def _missing_(cls, value):\n # allow to initialize a version enum from a single (major) integer\n if isinstance(value, int):\n return cls((value, 0))\n # or from None to obtain the current default version\n if value is None:\n return cls.default()\n return super()._missing_(value)\n\n def __str__(self):\n return f"{self.major}.{self.minor}"\n\n @classmethod\n def default(cls):\n # get the latest defined version (i.e. the max of all versions)\n return max(cls.__members__.values())\n\n @classmethod\n def supported_versions(cls):\n return frozenset(cls.__members__.values())\n\n\nif __name__ == "__main__":\n import doctest\n\n doctest.testmod()\n
.venv\Lib\site-packages\fontTools\ufoLib\utils.py
utils.py
Python
2,074
0.95
0.189873
0.067797
node-utils
957
2025-03-31T14:34:01.790370
GPL-3.0
false
7b7918dcdf4bbbf4f6679b39a28ddb76
"""Various low level data validators."""\n\nimport calendar\nfrom io import open\nimport fs.base\nimport fs.osfs\n\nfrom collections.abc import Mapping\nfrom fontTools.ufoLib.utils import numberTypes\n\n\n# -------\n# Generic\n# -------\n\n\ndef isDictEnough(value):\n """\n Some objects will likely come in that aren't\n dicts but are dict-ish enough.\n """\n if isinstance(value, Mapping):\n return True\n for attr in ("keys", "values", "items"):\n if not hasattr(value, attr):\n return False\n return True\n\n\ndef genericTypeValidator(value, typ):\n """\n Generic. (Added at version 2.)\n """\n return isinstance(value, typ)\n\n\ndef genericIntListValidator(values, validValues):\n """\n Generic. (Added at version 2.)\n """\n if not isinstance(values, (list, tuple)):\n return False\n valuesSet = set(values)\n validValuesSet = set(validValues)\n if valuesSet - validValuesSet:\n return False\n for value in values:\n if not isinstance(value, int):\n return False\n return True\n\n\ndef genericNonNegativeIntValidator(value):\n """\n Generic. (Added at version 3.)\n """\n if not isinstance(value, int):\n return False\n if value < 0:\n return False\n return True\n\n\ndef genericNonNegativeNumberValidator(value):\n """\n Generic. (Added at version 3.)\n """\n if not isinstance(value, numberTypes):\n return False\n if value < 0:\n return False\n return True\n\n\ndef genericDictValidator(value, prototype):\n """\n Generic. (Added at version 3.)\n """\n # not a dict\n if not isinstance(value, Mapping):\n return False\n # missing required keys\n for key, (typ, required) in prototype.items():\n if not required:\n continue\n if key not in value:\n return False\n # unknown keys\n for key in value.keys():\n if key not in prototype:\n return False\n # incorrect types\n for key, v in value.items():\n prototypeType, required = prototype[key]\n if v is None and not required:\n continue\n if not isinstance(v, prototypeType):\n return False\n return True\n\n\n# --------------\n# fontinfo.plist\n# --------------\n\n# Data Validators\n\n\ndef fontInfoStyleMapStyleNameValidator(value):\n """\n Version 2+.\n """\n options = ["regular", "italic", "bold", "bold italic"]\n return value in options\n\n\ndef fontInfoOpenTypeGaspRangeRecordsValidator(value):\n """\n Version 3+.\n """\n if not isinstance(value, list):\n return False\n if len(value) == 0:\n return True\n validBehaviors = [0, 1, 2, 3]\n dictPrototype = dict(rangeMaxPPEM=(int, True), rangeGaspBehavior=(list, True))\n ppemOrder = []\n for rangeRecord in value:\n if not genericDictValidator(rangeRecord, dictPrototype):\n return False\n ppem = rangeRecord["rangeMaxPPEM"]\n behavior = rangeRecord["rangeGaspBehavior"]\n ppemValidity = genericNonNegativeIntValidator(ppem)\n if not ppemValidity:\n return False\n behaviorValidity = genericIntListValidator(behavior, validBehaviors)\n if not behaviorValidity:\n return False\n ppemOrder.append(ppem)\n if ppemOrder != sorted(ppemOrder):\n return False\n return True\n\n\ndef fontInfoOpenTypeHeadCreatedValidator(value):\n """\n Version 2+.\n """\n # format: 0000/00/00 00:00:00\n if not isinstance(value, str):\n return False\n # basic formatting\n if not len(value) == 19:\n return False\n if value.count(" ") != 1:\n return False\n date, time = value.split(" ")\n if date.count("/") != 2:\n return False\n if time.count(":") != 2:\n return False\n # date\n year, month, day = date.split("/")\n if len(year) != 4:\n return False\n if len(month) != 2:\n return False\n if len(day) != 2:\n return False\n try:\n year = int(year)\n month = int(month)\n day = int(day)\n except ValueError:\n return False\n if month < 1 or month > 12:\n return False\n monthMaxDay = calendar.monthrange(year, month)[1]\n if day < 1 or day > monthMaxDay:\n return False\n # time\n hour, minute, second = time.split(":")\n if len(hour) != 2:\n return False\n if len(minute) != 2:\n return False\n if len(second) != 2:\n return False\n try:\n hour = int(hour)\n minute = int(minute)\n second = int(second)\n except ValueError:\n return False\n if hour < 0 or hour > 23:\n return False\n if minute < 0 or minute > 59:\n return False\n if second < 0 or second > 59:\n return False\n # fallback\n return True\n\n\ndef fontInfoOpenTypeNameRecordsValidator(value):\n """\n Version 3+.\n """\n if not isinstance(value, list):\n return False\n dictPrototype = dict(\n nameID=(int, True),\n platformID=(int, True),\n encodingID=(int, True),\n languageID=(int, True),\n string=(str, True),\n )\n for nameRecord in value:\n if not genericDictValidator(nameRecord, dictPrototype):\n return False\n return True\n\n\ndef fontInfoOpenTypeOS2WeightClassValidator(value):\n """\n Version 2+.\n """\n if not isinstance(value, int):\n return False\n if value < 0:\n return False\n return True\n\n\ndef fontInfoOpenTypeOS2WidthClassValidator(value):\n """\n Version 2+.\n """\n if not isinstance(value, int):\n return False\n if value < 1:\n return False\n if value > 9:\n return False\n return True\n\n\ndef fontInfoVersion2OpenTypeOS2PanoseValidator(values):\n """\n Version 2.\n """\n if not isinstance(values, (list, tuple)):\n return False\n if len(values) != 10:\n return False\n for value in values:\n if not isinstance(value, int):\n return False\n # XXX further validation?\n return True\n\n\ndef fontInfoVersion3OpenTypeOS2PanoseValidator(values):\n """\n Version 3+.\n """\n if not isinstance(values, (list, tuple)):\n return False\n if len(values) != 10:\n return False\n for value in values:\n if not isinstance(value, int):\n return False\n if value < 0:\n return False\n # XXX further validation?\n return True\n\n\ndef fontInfoOpenTypeOS2FamilyClassValidator(values):\n """\n Version 2+.\n """\n if not isinstance(values, (list, tuple)):\n return False\n if len(values) != 2:\n return False\n for value in values:\n if not isinstance(value, int):\n return False\n classID, subclassID = values\n if classID < 0 or classID > 14:\n return False\n if subclassID < 0 or subclassID > 15:\n return False\n return True\n\n\ndef fontInfoPostscriptBluesValidator(values):\n """\n Version 2+.\n """\n if not isinstance(values, (list, tuple)):\n return False\n if len(values) > 14:\n return False\n if len(values) % 2:\n return False\n for value in values:\n if not isinstance(value, numberTypes):\n return False\n return True\n\n\ndef fontInfoPostscriptOtherBluesValidator(values):\n """\n Version 2+.\n """\n if not isinstance(values, (list, tuple)):\n return False\n if len(values) > 10:\n return False\n if len(values) % 2:\n return False\n for value in values:\n if not isinstance(value, numberTypes):\n return False\n return True\n\n\ndef fontInfoPostscriptStemsValidator(values):\n """\n Version 2+.\n """\n if not isinstance(values, (list, tuple)):\n return False\n if len(values) > 12:\n return False\n for value in values:\n if not isinstance(value, numberTypes):\n return False\n return True\n\n\ndef fontInfoPostscriptWindowsCharacterSetValidator(value):\n """\n Version 2+.\n """\n validValues = list(range(1, 21))\n if value not in validValues:\n return False\n return True\n\n\ndef fontInfoWOFFMetadataUniqueIDValidator(value):\n """\n Version 3+.\n """\n dictPrototype = dict(id=(str, True))\n if not genericDictValidator(value, dictPrototype):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataVendorValidator(value):\n """\n Version 3+.\n """\n dictPrototype = {\n "name": (str, True),\n "url": (str, False),\n "dir": (str, False),\n "class": (str, False),\n }\n if not genericDictValidator(value, dictPrototype):\n return False\n if "dir" in value and value.get("dir") not in ("ltr", "rtl"):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataCreditsValidator(value):\n """\n Version 3+.\n """\n dictPrototype = dict(credits=(list, True))\n if not genericDictValidator(value, dictPrototype):\n return False\n if not len(value["credits"]):\n return False\n dictPrototype = {\n "name": (str, True),\n "url": (str, False),\n "role": (str, False),\n "dir": (str, False),\n "class": (str, False),\n }\n for credit in value["credits"]:\n if not genericDictValidator(credit, dictPrototype):\n return False\n if "dir" in credit and credit.get("dir") not in ("ltr", "rtl"):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataDescriptionValidator(value):\n """\n Version 3+.\n """\n dictPrototype = dict(url=(str, False), text=(list, True))\n if not genericDictValidator(value, dictPrototype):\n return False\n for text in value["text"]:\n if not fontInfoWOFFMetadataTextValue(text):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataLicenseValidator(value):\n """\n Version 3+.\n """\n dictPrototype = dict(url=(str, False), text=(list, False), id=(str, False))\n if not genericDictValidator(value, dictPrototype):\n return False\n if "text" in value:\n for text in value["text"]:\n if not fontInfoWOFFMetadataTextValue(text):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataTrademarkValidator(value):\n """\n Version 3+.\n """\n dictPrototype = dict(text=(list, True))\n if not genericDictValidator(value, dictPrototype):\n return False\n for text in value["text"]:\n if not fontInfoWOFFMetadataTextValue(text):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataCopyrightValidator(value):\n """\n Version 3+.\n """\n dictPrototype = dict(text=(list, True))\n if not genericDictValidator(value, dictPrototype):\n return False\n for text in value["text"]:\n if not fontInfoWOFFMetadataTextValue(text):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataLicenseeValidator(value):\n """\n Version 3+.\n """\n dictPrototype = {"name": (str, True), "dir": (str, False), "class": (str, False)}\n if not genericDictValidator(value, dictPrototype):\n return False\n if "dir" in value and value.get("dir") not in ("ltr", "rtl"):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataTextValue(value):\n """\n Version 3+.\n """\n dictPrototype = {\n "text": (str, True),\n "language": (str, False),\n "dir": (str, False),\n "class": (str, False),\n }\n if not genericDictValidator(value, dictPrototype):\n return False\n if "dir" in value and value.get("dir") not in ("ltr", "rtl"):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataExtensionsValidator(value):\n """\n Version 3+.\n """\n if not isinstance(value, list):\n return False\n if not value:\n return False\n for extension in value:\n if not fontInfoWOFFMetadataExtensionValidator(extension):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataExtensionValidator(value):\n """\n Version 3+.\n """\n dictPrototype = dict(names=(list, False), items=(list, True), id=(str, False))\n if not genericDictValidator(value, dictPrototype):\n return False\n if "names" in value:\n for name in value["names"]:\n if not fontInfoWOFFMetadataExtensionNameValidator(name):\n return False\n for item in value["items"]:\n if not fontInfoWOFFMetadataExtensionItemValidator(item):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataExtensionItemValidator(value):\n """\n Version 3+.\n """\n dictPrototype = dict(id=(str, False), names=(list, True), values=(list, True))\n if not genericDictValidator(value, dictPrototype):\n return False\n for name in value["names"]:\n if not fontInfoWOFFMetadataExtensionNameValidator(name):\n return False\n for val in value["values"]:\n if not fontInfoWOFFMetadataExtensionValueValidator(val):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataExtensionNameValidator(value):\n """\n Version 3+.\n """\n dictPrototype = {\n "text": (str, True),\n "language": (str, False),\n "dir": (str, False),\n "class": (str, False),\n }\n if not genericDictValidator(value, dictPrototype):\n return False\n if "dir" in value and value.get("dir") not in ("ltr", "rtl"):\n return False\n return True\n\n\ndef fontInfoWOFFMetadataExtensionValueValidator(value):\n """\n Version 3+.\n """\n dictPrototype = {\n "text": (str, True),\n "language": (str, False),\n "dir": (str, False),\n "class": (str, False),\n }\n if not genericDictValidator(value, dictPrototype):\n return False\n if "dir" in value and value.get("dir") not in ("ltr", "rtl"):\n return False\n return True\n\n\n# ----------\n# Guidelines\n# ----------\n\n\ndef guidelinesValidator(value, identifiers=None):\n """\n Version 3+.\n """\n if not isinstance(value, list):\n return False\n if identifiers is None:\n identifiers = set()\n for guide in value:\n if not guidelineValidator(guide):\n return False\n identifier = guide.get("identifier")\n if identifier is not None:\n if identifier in identifiers:\n return False\n identifiers.add(identifier)\n return True\n\n\n_guidelineDictPrototype = dict(\n x=((int, float), False),\n y=((int, float), False),\n angle=((int, float), False),\n name=(str, False),\n color=(str, False),\n identifier=(str, False),\n)\n\n\ndef guidelineValidator(value):\n """\n Version 3+.\n """\n if not genericDictValidator(value, _guidelineDictPrototype):\n return False\n x = value.get("x")\n y = value.get("y")\n angle = value.get("angle")\n # x or y must be present\n if x is None and y is None:\n return False\n # if x or y are None, angle must not be present\n if x is None or y is None:\n if angle is not None:\n return False\n # if x and y are defined, angle must be defined\n if x is not None and y is not None and angle is None:\n return False\n # angle must be between 0 and 360\n if angle is not None:\n if angle < 0:\n return False\n if angle > 360:\n return False\n # identifier must be 1 or more characters\n identifier = value.get("identifier")\n if identifier is not None and not identifierValidator(identifier):\n return False\n # color must follow the proper format\n color = value.get("color")\n if color is not None and not colorValidator(color):\n return False\n return True\n\n\n# -------\n# Anchors\n# -------\n\n\ndef anchorsValidator(value, identifiers=None):\n """\n Version 3+.\n """\n if not isinstance(value, list):\n return False\n if identifiers is None:\n identifiers = set()\n for anchor in value:\n if not anchorValidator(anchor):\n return False\n identifier = anchor.get("identifier")\n if identifier is not None:\n if identifier in identifiers:\n return False\n identifiers.add(identifier)\n return True\n\n\n_anchorDictPrototype = dict(\n x=((int, float), False),\n y=((int, float), False),\n name=(str, False),\n color=(str, False),\n identifier=(str, False),\n)\n\n\ndef anchorValidator(value):\n """\n Version 3+.\n """\n if not genericDictValidator(value, _anchorDictPrototype):\n return False\n x = value.get("x")\n y = value.get("y")\n # x and y must be present\n if x is None or y is None:\n return False\n # identifier must be 1 or more characters\n identifier = value.get("identifier")\n if identifier is not None and not identifierValidator(identifier):\n return False\n # color must follow the proper format\n color = value.get("color")\n if color is not None and not colorValidator(color):\n return False\n return True\n\n\n# ----------\n# Identifier\n# ----------\n\n\ndef identifierValidator(value):\n """\n Version 3+.\n\n >>> identifierValidator("a")\n True\n >>> identifierValidator("")\n False\n >>> identifierValidator("a" * 101)\n False\n """\n validCharactersMin = 0x20\n validCharactersMax = 0x7E\n if not isinstance(value, str):\n return False\n if not value:\n return False\n if len(value) > 100:\n return False\n for c in value:\n c = ord(c)\n if c < validCharactersMin or c > validCharactersMax:\n return False\n return True\n\n\n# -----\n# Color\n# -----\n\n\ndef colorValidator(value):\n """\n Version 3+.\n\n >>> colorValidator("0,0,0,0")\n True\n >>> colorValidator(".5,.5,.5,.5")\n True\n >>> colorValidator("0.5,0.5,0.5,0.5")\n True\n >>> colorValidator("1,1,1,1")\n True\n\n >>> colorValidator("2,0,0,0")\n False\n >>> colorValidator("0,2,0,0")\n False\n >>> colorValidator("0,0,2,0")\n False\n >>> colorValidator("0,0,0,2")\n False\n\n >>> colorValidator("1r,1,1,1")\n False\n >>> colorValidator("1,1g,1,1")\n False\n >>> colorValidator("1,1,1b,1")\n False\n >>> colorValidator("1,1,1,1a")\n False\n\n >>> colorValidator("1 1 1 1")\n False\n >>> colorValidator("1 1,1,1")\n False\n >>> colorValidator("1,1 1,1")\n False\n >>> colorValidator("1,1,1 1")\n False\n\n >>> colorValidator("1, 1, 1, 1")\n True\n """\n if not isinstance(value, str):\n return False\n parts = value.split(",")\n if len(parts) != 4:\n return False\n for part in parts:\n part = part.strip()\n converted = False\n try:\n part = int(part)\n converted = True\n except ValueError:\n pass\n if not converted:\n try:\n part = float(part)\n converted = True\n except ValueError:\n pass\n if not converted:\n return False\n if part < 0:\n return False\n if part > 1:\n return False\n return True\n\n\n# -----\n# image\n# -----\n\npngSignature = b"\x89PNG\r\n\x1a\n"\n\n_imageDictPrototype = dict(\n fileName=(str, True),\n xScale=((int, float), False),\n xyScale=((int, float), False),\n yxScale=((int, float), False),\n yScale=((int, float), False),\n xOffset=((int, float), False),\n yOffset=((int, float), False),\n color=(str, False),\n)\n\n\ndef imageValidator(value):\n """\n Version 3+.\n """\n if not genericDictValidator(value, _imageDictPrototype):\n return False\n # fileName must be one or more characters\n if not value["fileName"]:\n return False\n # color must follow the proper format\n color = value.get("color")\n if color is not None and not colorValidator(color):\n return False\n return True\n\n\ndef pngValidator(path=None, data=None, fileObj=None):\n """\n Version 3+.\n\n This checks the signature of the image data.\n """\n assert path is not None or data is not None or fileObj is not None\n if path is not None:\n with open(path, "rb") as f:\n signature = f.read(8)\n elif data is not None:\n signature = data[:8]\n elif fileObj is not None:\n pos = fileObj.tell()\n signature = fileObj.read(8)\n fileObj.seek(pos)\n if signature != pngSignature:\n return False, "Image does not begin with the PNG signature."\n return True, None\n\n\n# -------------------\n# layercontents.plist\n# -------------------\n\n\ndef layerContentsValidator(value, ufoPathOrFileSystem):\n """\n Check the validity of layercontents.plist.\n Version 3+.\n """\n if isinstance(ufoPathOrFileSystem, fs.base.FS):\n fileSystem = ufoPathOrFileSystem\n else:\n fileSystem = fs.osfs.OSFS(ufoPathOrFileSystem)\n\n bogusFileMessage = "layercontents.plist in not in the correct format."\n # file isn't in the right format\n if not isinstance(value, list):\n return False, bogusFileMessage\n # work through each entry\n usedLayerNames = set()\n usedDirectories = set()\n contents = {}\n for entry in value:\n # layer entry in the incorrect format\n if not isinstance(entry, list):\n return False, bogusFileMessage\n if not len(entry) == 2:\n return False, bogusFileMessage\n for i in entry:\n if not isinstance(i, str):\n return False, bogusFileMessage\n layerName, directoryName = entry\n # check directory naming\n if directoryName != "glyphs":\n if not directoryName.startswith("glyphs."):\n return (\n False,\n "Invalid directory name (%s) in layercontents.plist."\n % directoryName,\n )\n if len(layerName) == 0:\n return False, "Empty layer name in layercontents.plist."\n # directory doesn't exist\n if not fileSystem.exists(directoryName):\n return False, "A glyphset does not exist at %s." % directoryName\n # default layer name\n if layerName == "public.default" and directoryName != "glyphs":\n return (\n False,\n "The name public.default is being used by a layer that is not the default.",\n )\n # check usage\n if layerName in usedLayerNames:\n return (\n False,\n "The layer name %s is used by more than one layer." % layerName,\n )\n usedLayerNames.add(layerName)\n if directoryName in usedDirectories:\n return (\n False,\n "The directory %s is used by more than one layer." % directoryName,\n )\n usedDirectories.add(directoryName)\n # store\n contents[layerName] = directoryName\n # missing default layer\n foundDefault = "glyphs" in contents.values()\n if not foundDefault:\n return False, "The required default glyph set is not in the UFO."\n return True, None\n\n\n# ------------\n# groups.plist\n# ------------\n\n\ndef groupsValidator(value):\n """\n Check the validity of the groups.\n Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).\n\n >>> groups = {"A" : ["A", "A"], "A2" : ["A"]}\n >>> groupsValidator(groups)\n (True, None)\n\n >>> groups = {"" : ["A"]}\n >>> valid, msg = groupsValidator(groups)\n >>> valid\n False\n >>> print(msg)\n A group has an empty name.\n\n >>> groups = {"public.awesome" : ["A"]}\n >>> groupsValidator(groups)\n (True, None)\n\n >>> groups = {"public.kern1." : ["A"]}\n >>> valid, msg = groupsValidator(groups)\n >>> valid\n False\n >>> print(msg)\n The group data contains a kerning group with an incomplete name.\n >>> groups = {"public.kern2." : ["A"]}\n >>> valid, msg = groupsValidator(groups)\n >>> valid\n False\n >>> print(msg)\n The group data contains a kerning group with an incomplete name.\n\n >>> groups = {"public.kern1.A" : ["A"], "public.kern2.A" : ["A"]}\n >>> groupsValidator(groups)\n (True, None)\n\n >>> groups = {"public.kern1.A1" : ["A"], "public.kern1.A2" : ["A"]}\n >>> valid, msg = groupsValidator(groups)\n >>> valid\n False\n >>> print(msg)\n The glyph "A" occurs in too many kerning groups.\n """\n bogusFormatMessage = "The group data is not in the correct format."\n if not isDictEnough(value):\n return False, bogusFormatMessage\n firstSideMapping = {}\n secondSideMapping = {}\n for groupName, glyphList in value.items():\n if not isinstance(groupName, (str)):\n return False, bogusFormatMessage\n if not isinstance(glyphList, (list, tuple)):\n return False, bogusFormatMessage\n if not groupName:\n return False, "A group has an empty name."\n if groupName.startswith("public."):\n if not groupName.startswith("public.kern1.") and not groupName.startswith(\n "public.kern2."\n ):\n # unknown public.* name. silently skip.\n continue\n else:\n if len("public.kernN.") == len(groupName):\n return (\n False,\n "The group data contains a kerning group with an incomplete name.",\n )\n if groupName.startswith("public.kern1."):\n d = firstSideMapping\n else:\n d = secondSideMapping\n for glyphName in glyphList:\n if not isinstance(glyphName, str):\n return (\n False,\n "The group data %s contains an invalid member." % groupName,\n )\n if glyphName in d:\n return (\n False,\n 'The glyph "%s" occurs in too many kerning groups.' % glyphName,\n )\n d[glyphName] = groupName\n return True, None\n\n\n# -------------\n# kerning.plist\n# -------------\n\n\ndef kerningValidator(data):\n """\n Check the validity of the kerning data structure.\n Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).\n\n >>> kerning = {"A" : {"B" : 100}}\n >>> kerningValidator(kerning)\n (True, None)\n\n >>> kerning = {"A" : ["B"]}\n >>> valid, msg = kerningValidator(kerning)\n >>> valid\n False\n >>> print(msg)\n The kerning data is not in the correct format.\n\n >>> kerning = {"A" : {"B" : "100"}}\n >>> valid, msg = kerningValidator(kerning)\n >>> valid\n False\n >>> print(msg)\n The kerning data is not in the correct format.\n """\n bogusFormatMessage = "The kerning data is not in the correct format."\n if not isinstance(data, Mapping):\n return False, bogusFormatMessage\n for first, secondDict in data.items():\n if not isinstance(first, str):\n return False, bogusFormatMessage\n elif not isinstance(secondDict, Mapping):\n return False, bogusFormatMessage\n for second, value in secondDict.items():\n if not isinstance(second, str):\n return False, bogusFormatMessage\n elif not isinstance(value, numberTypes):\n return False, bogusFormatMessage\n return True, None\n\n\n# -------------\n# lib.plist/lib\n# -------------\n\n_bogusLibFormatMessage = "The lib data is not in the correct format: %s"\n\n\ndef fontLibValidator(value):\n """\n Check the validity of the lib.\n Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).\n\n >>> lib = {"foo" : "bar"}\n >>> fontLibValidator(lib)\n (True, None)\n\n >>> lib = {"public.awesome" : "hello"}\n >>> fontLibValidator(lib)\n (True, None)\n\n >>> lib = {"public.glyphOrder" : ["A", "C", "B"]}\n >>> fontLibValidator(lib)\n (True, None)\n\n >>> lib = "hello"\n >>> valid, msg = fontLibValidator(lib)\n >>> valid\n False\n >>> print(msg) # doctest: +ELLIPSIS\n The lib data is not in the correct format: expected a dictionary, ...\n\n >>> lib = {1: "hello"}\n >>> valid, msg = fontLibValidator(lib)\n >>> valid\n False\n >>> print(msg)\n The lib key is not properly formatted: expected str, found int: 1\n\n >>> lib = {"public.glyphOrder" : "hello"}\n >>> valid, msg = fontLibValidator(lib)\n >>> valid\n False\n >>> print(msg) # doctest: +ELLIPSIS\n public.glyphOrder is not properly formatted: expected list or tuple,...\n\n >>> lib = {"public.glyphOrder" : ["A", 1, "B"]}\n >>> valid, msg = fontLibValidator(lib)\n >>> valid\n False\n >>> print(msg) # doctest: +ELLIPSIS\n public.glyphOrder is not properly formatted: expected str,...\n """\n if not isDictEnough(value):\n reason = "expected a dictionary, found %s" % type(value).__name__\n return False, _bogusLibFormatMessage % reason\n for key, value in value.items():\n if not isinstance(key, str):\n return False, (\n "The lib key is not properly formatted: expected str, found %s: %r"\n % (type(key).__name__, key)\n )\n # public.glyphOrder\n if key == "public.glyphOrder":\n bogusGlyphOrderMessage = "public.glyphOrder is not properly formatted: %s"\n if not isinstance(value, (list, tuple)):\n reason = "expected list or tuple, found %s" % type(value).__name__\n return False, bogusGlyphOrderMessage % reason\n for glyphName in value:\n if not isinstance(glyphName, str):\n reason = "expected str, found %s" % type(glyphName).__name__\n return False, bogusGlyphOrderMessage % reason\n return True, None\n\n\n# --------\n# GLIF lib\n# --------\n\n\ndef glyphLibValidator(value):\n """\n Check the validity of the lib.\n Version 3+ (though it's backwards compatible with UFO 1 and UFO 2).\n\n >>> lib = {"foo" : "bar"}\n >>> glyphLibValidator(lib)\n (True, None)\n\n >>> lib = {"public.awesome" : "hello"}\n >>> glyphLibValidator(lib)\n (True, None)\n\n >>> lib = {"public.markColor" : "1,0,0,0.5"}\n >>> glyphLibValidator(lib)\n (True, None)\n\n >>> lib = {"public.markColor" : 1}\n >>> valid, msg = glyphLibValidator(lib)\n >>> valid\n False\n >>> print(msg)\n public.markColor is not properly formatted.\n """\n if not isDictEnough(value):\n reason = "expected a dictionary, found %s" % type(value).__name__\n return False, _bogusLibFormatMessage % reason\n for key, value in value.items():\n if not isinstance(key, str):\n reason = "key (%s) should be a string" % key\n return False, _bogusLibFormatMessage % reason\n # public.markColor\n if key == "public.markColor":\n if not colorValidator(value):\n return False, "public.markColor is not properly formatted."\n return True, None\n\n\nif __name__ == "__main__":\n import doctest\n\n doctest.testmod()\n
.venv\Lib\site-packages\fontTools\ufoLib\validators.py
validators.py
Python
31,991
0.95
0.228499
0.068999
react-lib
356
2024-01-09T05:28:44.838842
Apache-2.0
false
7a64be667215e8ab50e0141719752cb4
"""\nA library for importing .ufo files and their descendants.\nRefer to http://unifiedfontobject.org for the UFO specification.\n\nThe main interfaces are the :class:`.UFOReader` and :class:`.UFOWriter`\nclasses, which support versions 1, 2, and 3 of the UFO specification.\n\nSet variables are available for external use that list the font\ninfo attribute names for the `fontinfo.plist` formats. These are:\n\n- :obj:`.fontInfoAttributesVersion1`\n- :obj:`.fontInfoAttributesVersion2`\n- :obj:`.fontInfoAttributesVersion3`\n\nA set listing the `fontinfo.plist` attributes that were deprecated\nin version 2 is available for external use:\n\n- :obj:`.deprecatedFontInfoAttributesVersion2`\n\nFunctions that do basic validation on values for `fontinfo.plist`\nare available for external use. These are\n\n- :func:`.validateFontInfoVersion2ValueForAttribute`\n- :func:`.validateFontInfoVersion3ValueForAttribute`\n\nValue conversion functions are available for converting\n`fontinfo.plist` values between the possible format versions.\n\n- :func:`.convertFontInfoValueForAttributeFromVersion1ToVersion2`\n- :func:`.convertFontInfoValueForAttributeFromVersion2ToVersion1`\n- :func:`.convertFontInfoValueForAttributeFromVersion2ToVersion3`\n- :func:`.convertFontInfoValueForAttributeFromVersion3ToVersion2`\n"""\n\nimport os\nfrom copy import deepcopy\nfrom os import fsdecode\nimport logging\nimport zipfile\nimport enum\nfrom collections import OrderedDict\nimport fs\nimport fs.base\nimport fs.subfs\nimport fs.errors\nimport fs.copy\nimport fs.osfs\nimport fs.zipfs\nimport fs.tempfs\nimport fs.tools\nfrom fontTools.misc import plistlib\nfrom fontTools.ufoLib.validators import *\nfrom fontTools.ufoLib.filenames import userNameToFileName\nfrom fontTools.ufoLib.converters import convertUFO1OrUFO2KerningToUFO3Kerning\nfrom fontTools.ufoLib.errors import UFOLibError\nfrom fontTools.ufoLib.utils import numberTypes, _VersionTupleEnumMixin\n\n__all__ = [\n "makeUFOPath",\n "UFOLibError",\n "UFOReader",\n "UFOWriter",\n "UFOReaderWriter",\n "UFOFileStructure",\n "fontInfoAttributesVersion1",\n "fontInfoAttributesVersion2",\n "fontInfoAttributesVersion3",\n "deprecatedFontInfoAttributesVersion2",\n "validateFontInfoVersion2ValueForAttribute",\n "validateFontInfoVersion3ValueForAttribute",\n "convertFontInfoValueForAttributeFromVersion1ToVersion2",\n "convertFontInfoValueForAttributeFromVersion2ToVersion1",\n]\n\n__version__ = "3.0.0"\n\n\nlogger = logging.getLogger(__name__)\n\n\n# ---------\n# Constants\n# ---------\n\nDEFAULT_GLYPHS_DIRNAME = "glyphs"\nDATA_DIRNAME = "data"\nIMAGES_DIRNAME = "images"\nMETAINFO_FILENAME = "metainfo.plist"\nFONTINFO_FILENAME = "fontinfo.plist"\nLIB_FILENAME = "lib.plist"\nGROUPS_FILENAME = "groups.plist"\nKERNING_FILENAME = "kerning.plist"\nFEATURES_FILENAME = "features.fea"\nLAYERCONTENTS_FILENAME = "layercontents.plist"\nLAYERINFO_FILENAME = "layerinfo.plist"\n\nDEFAULT_LAYER_NAME = "public.default"\n\n\nclass UFOFormatVersion(tuple, _VersionTupleEnumMixin, enum.Enum):\n FORMAT_1_0 = (1, 0)\n FORMAT_2_0 = (2, 0)\n FORMAT_3_0 = (3, 0)\n\n\n# python 3.11 doesn't like when a mixin overrides a dunder method like __str__\n# for some reasons it keep using Enum.__str__, see\n# https://github.com/fonttools/fonttools/pull/2655\nUFOFormatVersion.__str__ = _VersionTupleEnumMixin.__str__\n\n\nclass UFOFileStructure(enum.Enum):\n ZIP = "zip"\n PACKAGE = "package"\n\n\n# --------------\n# Shared Methods\n# --------------\n\n\nclass _UFOBaseIO:\n def getFileModificationTime(self, path):\n """\n Returns the modification time for the file at the given path, as a\n floating point number giving the number of seconds since the epoch.\n The path must be relative to the UFO path.\n Returns None if the file does not exist.\n """\n try:\n dt = self.fs.getinfo(fsdecode(path), namespaces=["details"]).modified\n except (fs.errors.MissingInfoNamespace, fs.errors.ResourceNotFound):\n return None\n else:\n return dt.timestamp()\n\n def _getPlist(self, fileName, default=None):\n """\n Read a property list relative to the UFO filesystem's root.\n Raises UFOLibError if the file is missing and default is None,\n otherwise default is returned.\n\n The errors that could be raised during the reading of a plist are\n unpredictable and/or too large to list, so, a blind try: except:\n is done. If an exception occurs, a UFOLibError will be raised.\n """\n try:\n with self.fs.open(fileName, "rb") as f:\n return plistlib.load(f)\n except fs.errors.ResourceNotFound:\n if default is None:\n raise UFOLibError(\n "'%s' is missing on %s. This file is required" % (fileName, self.fs)\n )\n else:\n return default\n except Exception as e:\n # TODO(anthrotype): try to narrow this down a little\n raise UFOLibError(f"'{fileName}' could not be read on {self.fs}: {e}")\n\n def _writePlist(self, fileName, obj):\n """\n Write a property list to a file relative to the UFO filesystem's root.\n\n Do this sort of atomically, making it harder to corrupt existing files,\n for example when plistlib encounters an error halfway during write.\n This also checks to see if text matches the text that is already in the\n file at path. If so, the file is not rewritten so that the modification\n date is preserved.\n\n The errors that could be raised during the writing of a plist are\n unpredictable and/or too large to list, so, a blind try: except: is done.\n If an exception occurs, a UFOLibError will be raised.\n """\n if self._havePreviousFile:\n try:\n data = plistlib.dumps(obj)\n except Exception as e:\n raise UFOLibError(\n "'%s' could not be written on %s because "\n "the data is not properly formatted: %s" % (fileName, self.fs, e)\n )\n if self.fs.exists(fileName) and data == self.fs.readbytes(fileName):\n return\n self.fs.writebytes(fileName, data)\n else:\n with self.fs.openbin(fileName, mode="w") as fp:\n try:\n plistlib.dump(obj, fp)\n except Exception as e:\n raise UFOLibError(\n "'%s' could not be written on %s because "\n "the data is not properly formatted: %s"\n % (fileName, self.fs, e)\n )\n\n\n# ----------\n# UFO Reader\n# ----------\n\n\nclass UFOReader(_UFOBaseIO):\n """Read the various components of a .ufo.\n\n Attributes:\n path: An :class:`os.PathLike` object pointing to the .ufo.\n validate: A boolean indicating if the data read should be\n validated. Defaults to `True`.\n\n By default read data is validated. Set ``validate`` to\n ``False`` to not validate the data.\n """\n\n def __init__(self, path, validate=True):\n if hasattr(path, "__fspath__"): # support os.PathLike objects\n path = path.__fspath__()\n\n if isinstance(path, str):\n structure = _sniffFileStructure(path)\n try:\n if structure is UFOFileStructure.ZIP:\n parentFS = fs.zipfs.ZipFS(path, write=False, encoding="utf-8")\n else:\n parentFS = fs.osfs.OSFS(path)\n except fs.errors.CreateFailed as e:\n raise UFOLibError(f"unable to open '{path}': {e}")\n\n if structure is UFOFileStructure.ZIP:\n # .ufoz zip files must contain a single root directory, with arbitrary\n # name, containing all the UFO files\n rootDirs = [\n p.name\n for p in parentFS.scandir("/")\n # exclude macOS metadata contained in zip file\n if p.is_dir and p.name != "__MACOSX"\n ]\n if len(rootDirs) == 1:\n # 'ClosingSubFS' ensures that the parent zip file is closed when\n # its root subdirectory is closed\n self.fs = parentFS.opendir(\n rootDirs[0], factory=fs.subfs.ClosingSubFS\n )\n else:\n raise UFOLibError(\n "Expected exactly 1 root directory, found %d" % len(rootDirs)\n )\n else:\n # normal UFO 'packages' are just a single folder\n self.fs = parentFS\n # when passed a path string, we make sure we close the newly opened fs\n # upon calling UFOReader.close method or context manager's __exit__\n self._shouldClose = True\n self._fileStructure = structure\n elif isinstance(path, fs.base.FS):\n filesystem = path\n try:\n filesystem.check()\n except fs.errors.FilesystemClosed:\n raise UFOLibError("the filesystem '%s' is closed" % path)\n else:\n self.fs = filesystem\n try:\n path = filesystem.getsyspath("/")\n except fs.errors.NoSysPath:\n # network or in-memory FS may not map to the local one\n path = str(filesystem)\n # when user passed an already initialized fs instance, it is her\n # responsibility to close it, thus UFOReader.close/__exit__ are no-op\n self._shouldClose = False\n # default to a 'package' structure\n self._fileStructure = UFOFileStructure.PACKAGE\n else:\n raise TypeError(\n "Expected a path string or fs.base.FS object, found '%s'"\n % type(path).__name__\n )\n self._path = fsdecode(path)\n self._validate = validate\n self._upConvertedKerningData = None\n\n try:\n self.readMetaInfo(validate=validate)\n except UFOLibError:\n self.close()\n raise\n\n # properties\n\n def _get_path(self):\n import warnings\n\n warnings.warn(\n "The 'path' attribute is deprecated; use the 'fs' attribute instead",\n DeprecationWarning,\n stacklevel=2,\n )\n return self._path\n\n path = property(_get_path, doc="The path of the UFO (DEPRECATED).")\n\n def _get_formatVersion(self):\n import warnings\n\n warnings.warn(\n "The 'formatVersion' attribute is deprecated; use the 'formatVersionTuple'",\n DeprecationWarning,\n stacklevel=2,\n )\n return self._formatVersion.major\n\n formatVersion = property(\n _get_formatVersion,\n doc="The (major) format version of the UFO. DEPRECATED: Use formatVersionTuple",\n )\n\n @property\n def formatVersionTuple(self):\n """The (major, minor) format version of the UFO.\n This is determined by reading metainfo.plist during __init__.\n """\n return self._formatVersion\n\n def _get_fileStructure(self):\n return self._fileStructure\n\n fileStructure = property(\n _get_fileStructure,\n doc=(\n "The file structure of the UFO: "\n "either UFOFileStructure.ZIP or UFOFileStructure.PACKAGE"\n ),\n )\n\n # up conversion\n\n def _upConvertKerning(self, validate):\n """\n Up convert kerning and groups in UFO 1 and 2.\n The data will be held internally until each bit of data\n has been retrieved. The conversion of both must be done\n at once, so the raw data is cached and an error is raised\n if one bit of data becomes obsolete before it is called.\n\n ``validate`` will validate the data.\n """\n if self._upConvertedKerningData:\n testKerning = self._readKerning()\n if testKerning != self._upConvertedKerningData["originalKerning"]:\n raise UFOLibError(\n "The data in kerning.plist has been modified since it was converted to UFO 3 format."\n )\n testGroups = self._readGroups()\n if testGroups != self._upConvertedKerningData["originalGroups"]:\n raise UFOLibError(\n "The data in groups.plist has been modified since it was converted to UFO 3 format."\n )\n else:\n groups = self._readGroups()\n if validate:\n invalidFormatMessage = "groups.plist is not properly formatted."\n if not isinstance(groups, dict):\n raise UFOLibError(invalidFormatMessage)\n for groupName, glyphList in groups.items():\n if not isinstance(groupName, str):\n raise UFOLibError(invalidFormatMessage)\n elif not isinstance(glyphList, list):\n raise UFOLibError(invalidFormatMessage)\n for glyphName in glyphList:\n if not isinstance(glyphName, str):\n raise UFOLibError(invalidFormatMessage)\n self._upConvertedKerningData = dict(\n kerning={},\n originalKerning=self._readKerning(),\n groups={},\n originalGroups=groups,\n )\n # convert kerning and groups\n kerning, groups, conversionMaps = convertUFO1OrUFO2KerningToUFO3Kerning(\n self._upConvertedKerningData["originalKerning"],\n deepcopy(self._upConvertedKerningData["originalGroups"]),\n self.getGlyphSet(),\n )\n # store\n self._upConvertedKerningData["kerning"] = kerning\n self._upConvertedKerningData["groups"] = groups\n self._upConvertedKerningData["groupRenameMaps"] = conversionMaps\n\n # support methods\n\n def readBytesFromPath(self, path):\n """\n Returns the bytes in the file at the given path.\n The path must be relative to the UFO's filesystem root.\n Returns None if the file does not exist.\n """\n try:\n return self.fs.readbytes(fsdecode(path))\n except fs.errors.ResourceNotFound:\n return None\n\n def getReadFileForPath(self, path, encoding=None):\n """\n Returns a file (or file-like) object for the file at the given path.\n The path must be relative to the UFO path.\n Returns None if the file does not exist.\n By default the file is opened in binary mode (reads bytes).\n If encoding is passed, the file is opened in text mode (reads str).\n\n Note: The caller is responsible for closing the open file.\n """\n path = fsdecode(path)\n try:\n if encoding is None:\n return self.fs.openbin(path)\n else:\n return self.fs.open(path, mode="r", encoding=encoding)\n except fs.errors.ResourceNotFound:\n return None\n\n # metainfo.plist\n\n def _readMetaInfo(self, validate=None):\n """\n Read metainfo.plist and return raw data. Only used for internal operations.\n\n ``validate`` will validate the read data, by default it is set\n to the class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n data = self._getPlist(METAINFO_FILENAME)\n if validate and not isinstance(data, dict):\n raise UFOLibError("metainfo.plist is not properly formatted.")\n try:\n formatVersionMajor = data["formatVersion"]\n except KeyError:\n raise UFOLibError(\n f"Missing required formatVersion in '{METAINFO_FILENAME}' on {self.fs}"\n )\n formatVersionMinor = data.setdefault("formatVersionMinor", 0)\n\n try:\n formatVersion = UFOFormatVersion((formatVersionMajor, formatVersionMinor))\n except ValueError as e:\n unsupportedMsg = (\n f"Unsupported UFO format ({formatVersionMajor}.{formatVersionMinor}) "\n f"in '{METAINFO_FILENAME}' on {self.fs}"\n )\n if validate:\n from fontTools.ufoLib.errors import UnsupportedUFOFormat\n\n raise UnsupportedUFOFormat(unsupportedMsg) from e\n\n formatVersion = UFOFormatVersion.default()\n logger.warning(\n "%s. Assuming the latest supported version (%s). "\n "Some data may be skipped or parsed incorrectly",\n unsupportedMsg,\n formatVersion,\n )\n data["formatVersionTuple"] = formatVersion\n return data\n\n def readMetaInfo(self, validate=None):\n """\n Read metainfo.plist and set formatVersion. Only used for internal operations.\n\n ``validate`` will validate the read data, by default it is set\n to the class's validate value, can be overridden.\n """\n data = self._readMetaInfo(validate=validate)\n self._formatVersion = data["formatVersionTuple"]\n\n # groups.plist\n\n def _readGroups(self):\n groups = self._getPlist(GROUPS_FILENAME, {})\n # remove any duplicate glyphs in a kerning group\n for groupName, glyphList in groups.items():\n if groupName.startswith(("public.kern1.", "public.kern2.")):\n groups[groupName] = list(OrderedDict.fromkeys(glyphList))\n return groups\n\n def readGroups(self, validate=None):\n """\n Read groups.plist. Returns a dict.\n ``validate`` will validate the read data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n # handle up conversion\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n self._upConvertKerning(validate)\n groups = self._upConvertedKerningData["groups"]\n # normal\n else:\n groups = self._readGroups()\n if validate:\n valid, message = groupsValidator(groups)\n if not valid:\n raise UFOLibError(message)\n return groups\n\n def getKerningGroupConversionRenameMaps(self, validate=None):\n """\n Get maps defining the renaming that was done during any\n needed kerning group conversion. This method returns a\n dictionary of this form::\n\n {\n "side1" : {"old group name" : "new group name"},\n "side2" : {"old group name" : "new group name"}\n }\n\n When no conversion has been performed, the side1 and side2\n dictionaries will be empty.\n\n ``validate`` will validate the groups, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n if self._formatVersion >= UFOFormatVersion.FORMAT_3_0:\n return dict(side1={}, side2={})\n # use the public group reader to force the load and\n # conversion of the data if it hasn't happened yet.\n self.readGroups(validate=validate)\n return self._upConvertedKerningData["groupRenameMaps"]\n\n # fontinfo.plist\n\n def _readInfo(self, validate):\n data = self._getPlist(FONTINFO_FILENAME, {})\n if validate and not isinstance(data, dict):\n raise UFOLibError("fontinfo.plist is not properly formatted.")\n return data\n\n def readInfo(self, info, validate=None):\n """\n Read fontinfo.plist. It requires an object that allows\n setting attributes with names that follow the fontinfo.plist\n version 3 specification. This will write the attributes\n defined in the file into the object.\n\n ``validate`` will validate the read data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n infoDict = self._readInfo(validate)\n infoDataToSet = {}\n # version 1\n if self._formatVersion == UFOFormatVersion.FORMAT_1_0:\n for attr in fontInfoAttributesVersion1:\n value = infoDict.get(attr)\n if value is not None:\n infoDataToSet[attr] = value\n infoDataToSet = _convertFontInfoDataVersion1ToVersion2(infoDataToSet)\n infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet)\n # version 2\n elif self._formatVersion == UFOFormatVersion.FORMAT_2_0:\n for attr, dataValidationDict in list(\n fontInfoAttributesVersion2ValueData.items()\n ):\n value = infoDict.get(attr)\n if value is None:\n continue\n infoDataToSet[attr] = value\n infoDataToSet = _convertFontInfoDataVersion2ToVersion3(infoDataToSet)\n # version 3.x\n elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major:\n for attr, dataValidationDict in list(\n fontInfoAttributesVersion3ValueData.items()\n ):\n value = infoDict.get(attr)\n if value is None:\n continue\n infoDataToSet[attr] = value\n # unsupported version\n else:\n raise NotImplementedError(self._formatVersion)\n # validate data\n if validate:\n infoDataToSet = validateInfoVersion3Data(infoDataToSet)\n # populate the object\n for attr, value in list(infoDataToSet.items()):\n try:\n setattr(info, attr, value)\n except AttributeError:\n raise UFOLibError(\n "The supplied info object does not support setting a necessary attribute (%s)."\n % attr\n )\n\n # kerning.plist\n\n def _readKerning(self):\n data = self._getPlist(KERNING_FILENAME, {})\n return data\n\n def readKerning(self, validate=None):\n """\n Read kerning.plist. Returns a dict.\n\n ``validate`` will validate the kerning data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n # handle up conversion\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n self._upConvertKerning(validate)\n kerningNested = self._upConvertedKerningData["kerning"]\n # normal\n else:\n kerningNested = self._readKerning()\n if validate:\n valid, message = kerningValidator(kerningNested)\n if not valid:\n raise UFOLibError(message)\n # flatten\n kerning = {}\n for left in kerningNested:\n for right in kerningNested[left]:\n value = kerningNested[left][right]\n kerning[left, right] = value\n return kerning\n\n # lib.plist\n\n def readLib(self, validate=None):\n """\n Read lib.plist. Returns a dict.\n\n ``validate`` will validate the data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n data = self._getPlist(LIB_FILENAME, {})\n if validate:\n valid, message = fontLibValidator(data)\n if not valid:\n raise UFOLibError(message)\n return data\n\n # features.fea\n\n def readFeatures(self):\n """\n Read features.fea. Return a string.\n The returned string is empty if the file is missing.\n """\n try:\n with self.fs.open(FEATURES_FILENAME, "r", encoding="utf-8-sig") as f:\n return f.read()\n except fs.errors.ResourceNotFound:\n return ""\n\n # glyph sets & layers\n\n def _readLayerContents(self, validate):\n """\n Rebuild the layer contents list by checking what glyphsets\n are available on disk.\n\n ``validate`` will validate the layer contents.\n """\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n return [(DEFAULT_LAYER_NAME, DEFAULT_GLYPHS_DIRNAME)]\n contents = self._getPlist(LAYERCONTENTS_FILENAME)\n if validate:\n valid, error = layerContentsValidator(contents, self.fs)\n if not valid:\n raise UFOLibError(error)\n return contents\n\n def getLayerNames(self, validate=None):\n """\n Get the ordered layer names from layercontents.plist.\n\n ``validate`` will validate the data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n layerContents = self._readLayerContents(validate)\n layerNames = [layerName for layerName, directoryName in layerContents]\n return layerNames\n\n def getDefaultLayerName(self, validate=None):\n """\n Get the default layer name from layercontents.plist.\n\n ``validate`` will validate the data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n layerContents = self._readLayerContents(validate)\n for layerName, layerDirectory in layerContents:\n if layerDirectory == DEFAULT_GLYPHS_DIRNAME:\n return layerName\n # this will already have been raised during __init__\n raise UFOLibError("The default layer is not defined in layercontents.plist.")\n\n def getGlyphSet(self, layerName=None, validateRead=None, validateWrite=None):\n """\n Return the GlyphSet associated with the\n glyphs directory mapped to layerName\n in the UFO. If layerName is not provided,\n the name retrieved with getDefaultLayerName\n will be used.\n\n ``validateRead`` will validate the read data, by default it is set to the\n class's validate value, can be overridden.\n ``validateWrite`` will validate the written data, by default it is set to the\n class's validate value, can be overridden.\n """\n from fontTools.ufoLib.glifLib import GlyphSet\n\n if validateRead is None:\n validateRead = self._validate\n if validateWrite is None:\n validateWrite = self._validate\n if layerName is None:\n layerName = self.getDefaultLayerName(validate=validateRead)\n directory = None\n layerContents = self._readLayerContents(validateRead)\n for storedLayerName, storedLayerDirectory in layerContents:\n if layerName == storedLayerName:\n directory = storedLayerDirectory\n break\n if directory is None:\n raise UFOLibError('No glyphs directory is mapped to "%s".' % layerName)\n try:\n glyphSubFS = self.fs.opendir(directory)\n except fs.errors.ResourceNotFound:\n raise UFOLibError(f"No '{directory}' directory for layer '{layerName}'")\n return GlyphSet(\n glyphSubFS,\n ufoFormatVersion=self._formatVersion,\n validateRead=validateRead,\n validateWrite=validateWrite,\n expectContentsFile=True,\n )\n\n def getCharacterMapping(self, layerName=None, validate=None):\n """\n Return a dictionary that maps unicode values (ints) to\n lists of glyph names.\n """\n if validate is None:\n validate = self._validate\n glyphSet = self.getGlyphSet(\n layerName, validateRead=validate, validateWrite=True\n )\n allUnicodes = glyphSet.getUnicodes()\n cmap = {}\n for glyphName, unicodes in allUnicodes.items():\n for code in unicodes:\n if code in cmap:\n cmap[code].append(glyphName)\n else:\n cmap[code] = [glyphName]\n return cmap\n\n # /data\n\n def getDataDirectoryListing(self):\n """\n Returns a list of all files in the data directory.\n The returned paths will be relative to the UFO.\n This will not list directory names, only file names.\n Thus, empty directories will be skipped.\n """\n try:\n self._dataFS = self.fs.opendir(DATA_DIRNAME)\n except fs.errors.ResourceNotFound:\n return []\n except fs.errors.DirectoryExpected:\n raise UFOLibError('The UFO contains a "data" file instead of a directory.')\n try:\n # fs Walker.files method returns "absolute" paths (in terms of the\n # root of the 'data' SubFS), so we strip the leading '/' to make\n # them relative\n return [p.lstrip("/") for p in self._dataFS.walk.files()]\n except fs.errors.ResourceError:\n return []\n\n def getImageDirectoryListing(self, validate=None):\n """\n Returns a list of all image file names in\n the images directory. Each of the images will\n have been verified to have the PNG signature.\n\n ``validate`` will validate the data, by default it is set to the\n class's validate value, can be overridden.\n """\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n return []\n if validate is None:\n validate = self._validate\n try:\n self._imagesFS = imagesFS = self.fs.opendir(IMAGES_DIRNAME)\n except fs.errors.ResourceNotFound:\n return []\n except fs.errors.DirectoryExpected:\n raise UFOLibError(\n 'The UFO contains an "images" file instead of a directory.'\n )\n result = []\n for path in imagesFS.scandir("/"):\n if path.is_dir:\n # silently skip this as version control\n # systems often have hidden directories\n continue\n if validate:\n with imagesFS.openbin(path.name) as fp:\n valid, error = pngValidator(fileObj=fp)\n if valid:\n result.append(path.name)\n else:\n result.append(path.name)\n return result\n\n def readData(self, fileName):\n """\n Return bytes for the file named 'fileName' inside the 'data/' directory.\n """\n fileName = fsdecode(fileName)\n try:\n try:\n dataFS = self._dataFS\n except AttributeError:\n # in case readData is called before getDataDirectoryListing\n dataFS = self.fs.opendir(DATA_DIRNAME)\n data = dataFS.readbytes(fileName)\n except fs.errors.ResourceNotFound:\n raise UFOLibError(f"No data file named '{fileName}' on {self.fs}")\n return data\n\n def readImage(self, fileName, validate=None):\n """\n Return image data for the file named fileName.\n\n ``validate`` will validate the data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n raise UFOLibError(\n f"Reading images is not allowed in UFO {self._formatVersion.major}."\n )\n fileName = fsdecode(fileName)\n try:\n try:\n imagesFS = self._imagesFS\n except AttributeError:\n # in case readImage is called before getImageDirectoryListing\n imagesFS = self.fs.opendir(IMAGES_DIRNAME)\n data = imagesFS.readbytes(fileName)\n except fs.errors.ResourceNotFound:\n raise UFOLibError(f"No image file named '{fileName}' on {self.fs}")\n if validate:\n valid, error = pngValidator(data=data)\n if not valid:\n raise UFOLibError(error)\n return data\n\n def close(self):\n if self._shouldClose:\n self.fs.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self.close()\n\n\n# ----------\n# UFO Writer\n# ----------\n\n\nclass UFOWriter(UFOReader):\n """Write the various components of a .ufo.\n\n Attributes:\n path: An :class:`os.PathLike` object pointing to the .ufo.\n formatVersion: the UFO format version as a tuple of integers (major, minor),\n or as a single integer for the major digit only (minor is implied to be 0).\n By default, the latest formatVersion will be used; currently it is 3.0,\n which is equivalent to formatVersion=(3, 0).\n fileCreator: The creator of the .ufo file. Defaults to\n `com.github.fonttools.ufoLib`.\n structure: The internal structure of the .ufo file: either `ZIP` or `PACKAGE`.\n validate: A boolean indicating if the data read should be validated. Defaults\n to `True`.\n\n By default, the written data will be validated before writing. Set ``validate`` to\n ``False`` if you do not want to validate the data. Validation can also be overriden\n on a per-method level if desired.\n\n Raises:\n UnsupportedUFOFormat: An exception indicating that the requested UFO\n formatVersion is not supported.\n """\n\n def __init__(\n self,\n path,\n formatVersion=None,\n fileCreator="com.github.fonttools.ufoLib",\n structure=None,\n validate=True,\n ):\n try:\n formatVersion = UFOFormatVersion(formatVersion)\n except ValueError as e:\n from fontTools.ufoLib.errors import UnsupportedUFOFormat\n\n raise UnsupportedUFOFormat(\n f"Unsupported UFO format: {formatVersion!r}"\n ) from e\n\n if hasattr(path, "__fspath__"): # support os.PathLike objects\n path = path.__fspath__()\n\n if isinstance(path, str):\n # normalize path by removing trailing or double slashes\n path = os.path.normpath(path)\n havePreviousFile = os.path.exists(path)\n if havePreviousFile:\n # ensure we use the same structure as the destination\n existingStructure = _sniffFileStructure(path)\n if structure is not None:\n try:\n structure = UFOFileStructure(structure)\n except ValueError:\n raise UFOLibError(\n "Invalid or unsupported structure: '%s'" % structure\n )\n if structure is not existingStructure:\n raise UFOLibError(\n "A UFO with a different structure (%s) already exists "\n "at the given path: '%s'" % (existingStructure, path)\n )\n else:\n structure = existingStructure\n else:\n # if not exists, default to 'package' structure\n if structure is None:\n structure = UFOFileStructure.PACKAGE\n dirName = os.path.dirname(path)\n if dirName and not os.path.isdir(dirName):\n raise UFOLibError(\n "Cannot write to '%s': directory does not exist" % path\n )\n if structure is UFOFileStructure.ZIP:\n if havePreviousFile:\n # we can't write a zip in-place, so we have to copy its\n # contents to a temporary location and work from there, then\n # upon closing UFOWriter we create the final zip file\n parentFS = fs.tempfs.TempFS()\n with fs.zipfs.ZipFS(path, encoding="utf-8") as origFS:\n fs.copy.copy_fs(origFS, parentFS)\n # if output path is an existing zip, we require that it contains\n # one, and only one, root directory (with arbitrary name), in turn\n # containing all the existing UFO contents\n rootDirs = [\n p.name\n for p in parentFS.scandir("/")\n # exclude macOS metadata contained in zip file\n if p.is_dir and p.name != "__MACOSX"\n ]\n if len(rootDirs) != 1:\n raise UFOLibError(\n "Expected exactly 1 root directory, found %d"\n % len(rootDirs)\n )\n else:\n # 'ClosingSubFS' ensures that the parent filesystem is closed\n # when its root subdirectory is closed\n self.fs = parentFS.opendir(\n rootDirs[0], factory=fs.subfs.ClosingSubFS\n )\n else:\n # if the output zip file didn't exist, we create the root folder;\n # we name it the same as input 'path', but with '.ufo' extension\n rootDir = os.path.splitext(os.path.basename(path))[0] + ".ufo"\n parentFS = fs.zipfs.ZipFS(path, write=True, encoding="utf-8")\n parentFS.makedir(rootDir)\n self.fs = parentFS.opendir(rootDir, factory=fs.subfs.ClosingSubFS)\n else:\n self.fs = fs.osfs.OSFS(path, create=True)\n self._fileStructure = structure\n self._havePreviousFile = havePreviousFile\n self._shouldClose = True\n elif isinstance(path, fs.base.FS):\n filesystem = path\n try:\n filesystem.check()\n except fs.errors.FilesystemClosed:\n raise UFOLibError("the filesystem '%s' is closed" % path)\n else:\n self.fs = filesystem\n try:\n path = filesystem.getsyspath("/")\n except fs.errors.NoSysPath:\n # network or in-memory FS may not map to the local one\n path = str(filesystem)\n # if passed an FS object, always use 'package' structure\n if structure and structure is not UFOFileStructure.PACKAGE:\n import warnings\n\n warnings.warn(\n "The 'structure' argument is not used when input is an FS object",\n UserWarning,\n stacklevel=2,\n )\n self._fileStructure = UFOFileStructure.PACKAGE\n # if FS contains a "metainfo.plist", we consider it non-empty\n self._havePreviousFile = filesystem.exists(METAINFO_FILENAME)\n # the user is responsible for closing the FS object\n self._shouldClose = False\n else:\n raise TypeError(\n "Expected a path string or fs object, found %s" % type(path).__name__\n )\n\n # establish some basic stuff\n self._path = fsdecode(path)\n self._formatVersion = formatVersion\n self._fileCreator = fileCreator\n self._downConversionKerningData = None\n self._validate = validate\n # if the file already exists, get the format version.\n # this will be needed for up and down conversion.\n previousFormatVersion = None\n if self._havePreviousFile:\n metaInfo = self._readMetaInfo(validate=validate)\n previousFormatVersion = metaInfo["formatVersionTuple"]\n # catch down conversion\n if previousFormatVersion > formatVersion:\n from fontTools.ufoLib.errors import UnsupportedUFOFormat\n\n raise UnsupportedUFOFormat(\n "The UFO located at this path is a higher version "\n f"({previousFormatVersion}) than the version ({formatVersion}) "\n "that is trying to be written. This is not supported."\n )\n # handle the layer contents\n self.layerContents = {}\n if previousFormatVersion is not None and previousFormatVersion.major >= 3:\n # already exists\n self.layerContents = OrderedDict(self._readLayerContents(validate))\n else:\n # previous < 3\n # imply the layer contents\n if self.fs.exists(DEFAULT_GLYPHS_DIRNAME):\n self.layerContents = {DEFAULT_LAYER_NAME: DEFAULT_GLYPHS_DIRNAME}\n # write the new metainfo\n self._writeMetaInfo()\n\n # properties\n\n def _get_fileCreator(self):\n return self._fileCreator\n\n fileCreator = property(\n _get_fileCreator,\n doc="The file creator of the UFO. This is set into metainfo.plist during __init__.",\n )\n\n # support methods for file system interaction\n\n def copyFromReader(self, reader, sourcePath, destPath):\n """\n Copy the sourcePath in the provided UFOReader to destPath\n in this writer. The paths must be relative. This works with\n both individual files and directories.\n """\n if not isinstance(reader, UFOReader):\n raise UFOLibError("The reader must be an instance of UFOReader.")\n sourcePath = fsdecode(sourcePath)\n destPath = fsdecode(destPath)\n if not reader.fs.exists(sourcePath):\n raise UFOLibError(\n 'The reader does not have data located at "%s".' % sourcePath\n )\n if self.fs.exists(destPath):\n raise UFOLibError('A file named "%s" already exists.' % destPath)\n # create the destination directory if it doesn't exist\n self.fs.makedirs(fs.path.dirname(destPath), recreate=True)\n if reader.fs.isdir(sourcePath):\n fs.copy.copy_dir(reader.fs, sourcePath, self.fs, destPath)\n else:\n fs.copy.copy_file(reader.fs, sourcePath, self.fs, destPath)\n\n def writeBytesToPath(self, path, data):\n """\n Write bytes to a path relative to the UFO filesystem's root.\n If writing to an existing UFO, check to see if data matches the data\n that is already in the file at path; if so, the file is not rewritten\n so that the modification date is preserved.\n If needed, the directory tree for the given path will be built.\n """\n path = fsdecode(path)\n if self._havePreviousFile:\n if self.fs.isfile(path) and data == self.fs.readbytes(path):\n return\n try:\n self.fs.writebytes(path, data)\n except fs.errors.FileExpected:\n raise UFOLibError("A directory exists at '%s'" % path)\n except fs.errors.ResourceNotFound:\n self.fs.makedirs(fs.path.dirname(path), recreate=True)\n self.fs.writebytes(path, data)\n\n def getFileObjectForPath(self, path, mode="w", encoding=None):\n """\n Returns a file (or file-like) object for the\n file at the given path. The path must be relative\n to the UFO path. Returns None if the file does\n not exist and the mode is "r" or "rb.\n An encoding may be passed if the file is opened in text mode.\n\n Note: The caller is responsible for closing the open file.\n """\n path = fsdecode(path)\n try:\n return self.fs.open(path, mode=mode, encoding=encoding)\n except fs.errors.ResourceNotFound as e:\n m = mode[0]\n if m == "r":\n # XXX I think we should just let it raise. The docstring,\n # however, says that this returns None if mode is 'r'\n return None\n elif m == "w" or m == "a" or m == "x":\n self.fs.makedirs(fs.path.dirname(path), recreate=True)\n return self.fs.open(path, mode=mode, encoding=encoding)\n except fs.errors.ResourceError as e:\n return UFOLibError(f"unable to open '{path}' on {self.fs}: {e}")\n\n def removePath(self, path, force=False, removeEmptyParents=True):\n """\n Remove the file (or directory) at path. The path\n must be relative to the UFO.\n Raises UFOLibError if the path doesn't exist.\n If force=True, ignore non-existent paths.\n If the directory where 'path' is located becomes empty, it will\n be automatically removed, unless 'removeEmptyParents' is False.\n """\n path = fsdecode(path)\n try:\n self.fs.remove(path)\n except fs.errors.FileExpected:\n self.fs.removetree(path)\n except fs.errors.ResourceNotFound:\n if not force:\n raise UFOLibError(f"'{path}' does not exist on {self.fs}")\n if removeEmptyParents:\n parent = fs.path.dirname(path)\n if parent:\n fs.tools.remove_empty(self.fs, parent)\n\n # alias kept for backward compatibility with old API\n removeFileForPath = removePath\n\n # UFO mod time\n\n def setModificationTime(self):\n """\n Set the UFO modification time to the current time.\n This is never called automatically. It is up to the\n caller to call this when finished working on the UFO.\n """\n path = self._path\n if path is not None and os.path.exists(path):\n try:\n # this may fail on some filesystems (e.g. SMB servers)\n os.utime(path, None)\n except OSError as e:\n logger.warning("Failed to set modified time: %s", e)\n\n # metainfo.plist\n\n def _writeMetaInfo(self):\n metaInfo = dict(\n creator=self._fileCreator,\n formatVersion=self._formatVersion.major,\n )\n if self._formatVersion.minor != 0:\n metaInfo["formatVersionMinor"] = self._formatVersion.minor\n self._writePlist(METAINFO_FILENAME, metaInfo)\n\n # groups.plist\n\n def setKerningGroupConversionRenameMaps(self, maps):\n """\n Set maps defining the renaming that should be done\n when writing groups and kerning in UFO 1 and UFO 2.\n This will effectively undo the conversion done when\n UFOReader reads this data. The dictionary should have\n this form::\n\n {\n "side1" : {"group name to use when writing" : "group name in data"},\n "side2" : {"group name to use when writing" : "group name in data"}\n }\n\n This is the same form returned by UFOReader's\n getKerningGroupConversionRenameMaps method.\n """\n if self._formatVersion >= UFOFormatVersion.FORMAT_3_0:\n return # XXX raise an error here\n # flip the dictionaries\n remap = {}\n for side in ("side1", "side2"):\n for writeName, dataName in list(maps[side].items()):\n remap[dataName] = writeName\n self._downConversionKerningData = dict(groupRenameMap=remap)\n\n def writeGroups(self, groups, validate=None):\n """\n Write groups.plist. This method requires a\n dict of glyph groups as an argument.\n\n ``validate`` will validate the data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n # validate the data structure\n if validate:\n valid, message = groupsValidator(groups)\n if not valid:\n raise UFOLibError(message)\n # down convert\n if (\n self._formatVersion < UFOFormatVersion.FORMAT_3_0\n and self._downConversionKerningData is not None\n ):\n remap = self._downConversionKerningData["groupRenameMap"]\n remappedGroups = {}\n # there are some edge cases here that are ignored:\n # 1. if a group is being renamed to a name that\n # already exists, the existing group is always\n # overwritten. (this is why there are two loops\n # below.) there doesn't seem to be a logical\n # solution to groups mismatching and overwriting\n # with the specifiecd group seems like a better\n # solution than throwing an error.\n # 2. if side 1 and side 2 groups are being renamed\n # to the same group name there is no check to\n # ensure that the contents are identical. that\n # is left up to the caller.\n for name, contents in list(groups.items()):\n if name in remap:\n continue\n remappedGroups[name] = contents\n for name, contents in list(groups.items()):\n if name not in remap:\n continue\n name = remap[name]\n remappedGroups[name] = contents\n groups = remappedGroups\n # pack and write\n groupsNew = {}\n for key, value in groups.items():\n groupsNew[key] = list(value)\n if groupsNew:\n self._writePlist(GROUPS_FILENAME, groupsNew)\n elif self._havePreviousFile:\n self.removePath(GROUPS_FILENAME, force=True, removeEmptyParents=False)\n\n # fontinfo.plist\n\n def writeInfo(self, info, validate=None):\n """\n Write info.plist. This method requires an object\n that supports getting attributes that follow the\n fontinfo.plist version 2 specification. Attributes\n will be taken from the given object and written\n into the file.\n\n ``validate`` will validate the data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n # gather version 3 data\n infoData = {}\n for attr in list(fontInfoAttributesVersion3ValueData.keys()):\n if hasattr(info, attr):\n try:\n value = getattr(info, attr)\n except AttributeError:\n raise UFOLibError(\n "The supplied info object does not support getting a necessary attribute (%s)."\n % attr\n )\n if value is None:\n continue\n infoData[attr] = value\n # down convert data if necessary and validate\n if self._formatVersion == UFOFormatVersion.FORMAT_3_0:\n if validate:\n infoData = validateInfoVersion3Data(infoData)\n elif self._formatVersion == UFOFormatVersion.FORMAT_2_0:\n infoData = _convertFontInfoDataVersion3ToVersion2(infoData)\n if validate:\n infoData = validateInfoVersion2Data(infoData)\n elif self._formatVersion == UFOFormatVersion.FORMAT_1_0:\n infoData = _convertFontInfoDataVersion3ToVersion2(infoData)\n if validate:\n infoData = validateInfoVersion2Data(infoData)\n infoData = _convertFontInfoDataVersion2ToVersion1(infoData)\n # write file if there is anything to write\n if infoData:\n self._writePlist(FONTINFO_FILENAME, infoData)\n\n # kerning.plist\n\n def writeKerning(self, kerning, validate=None):\n """\n Write kerning.plist. This method requires a\n dict of kerning pairs as an argument.\n\n This performs basic structural validation of the kerning,\n but it does not check for compliance with the spec in\n regards to conflicting pairs. The assumption is that the\n kerning data being passed is standards compliant.\n\n ``validate`` will validate the data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n # validate the data structure\n if validate:\n invalidFormatMessage = "The kerning is not properly formatted."\n if not isDictEnough(kerning):\n raise UFOLibError(invalidFormatMessage)\n for pair, value in list(kerning.items()):\n if not isinstance(pair, (list, tuple)):\n raise UFOLibError(invalidFormatMessage)\n if not len(pair) == 2:\n raise UFOLibError(invalidFormatMessage)\n if not isinstance(pair[0], str):\n raise UFOLibError(invalidFormatMessage)\n if not isinstance(pair[1], str):\n raise UFOLibError(invalidFormatMessage)\n if not isinstance(value, numberTypes):\n raise UFOLibError(invalidFormatMessage)\n # down convert\n if (\n self._formatVersion < UFOFormatVersion.FORMAT_3_0\n and self._downConversionKerningData is not None\n ):\n remap = self._downConversionKerningData["groupRenameMap"]\n remappedKerning = {}\n for (side1, side2), value in list(kerning.items()):\n side1 = remap.get(side1, side1)\n side2 = remap.get(side2, side2)\n remappedKerning[side1, side2] = value\n kerning = remappedKerning\n # pack and write\n kerningDict = {}\n for left, right in kerning.keys():\n value = kerning[left, right]\n if left not in kerningDict:\n kerningDict[left] = {}\n kerningDict[left][right] = value\n if kerningDict:\n self._writePlist(KERNING_FILENAME, kerningDict)\n elif self._havePreviousFile:\n self.removePath(KERNING_FILENAME, force=True, removeEmptyParents=False)\n\n # lib.plist\n\n def writeLib(self, libDict, validate=None):\n """\n Write lib.plist. This method requires a\n lib dict as an argument.\n\n ``validate`` will validate the data, by default it is set to the\n class's validate value, can be overridden.\n """\n if validate is None:\n validate = self._validate\n if validate:\n valid, message = fontLibValidator(libDict)\n if not valid:\n raise UFOLibError(message)\n if libDict:\n self._writePlist(LIB_FILENAME, libDict)\n elif self._havePreviousFile:\n self.removePath(LIB_FILENAME, force=True, removeEmptyParents=False)\n\n # features.fea\n\n def writeFeatures(self, features, validate=None):\n """\n Write features.fea. This method requires a\n features string as an argument.\n """\n if validate is None:\n validate = self._validate\n if self._formatVersion == UFOFormatVersion.FORMAT_1_0:\n raise UFOLibError("features.fea is not allowed in UFO Format Version 1.")\n if validate:\n if not isinstance(features, str):\n raise UFOLibError("The features are not text.")\n if features:\n self.writeBytesToPath(FEATURES_FILENAME, features.encode("utf8"))\n elif self._havePreviousFile:\n self.removePath(FEATURES_FILENAME, force=True, removeEmptyParents=False)\n\n # glyph sets & layers\n\n def writeLayerContents(self, layerOrder=None, validate=None):\n """\n Write the layercontents.plist file. This method *must* be called\n after all glyph sets have been written.\n """\n if validate is None:\n validate = self._validate\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n return\n if layerOrder is not None:\n newOrder = []\n for layerName in layerOrder:\n if layerName is None:\n layerName = DEFAULT_LAYER_NAME\n newOrder.append(layerName)\n layerOrder = newOrder\n else:\n layerOrder = list(self.layerContents.keys())\n if validate and set(layerOrder) != set(self.layerContents.keys()):\n raise UFOLibError(\n "The layer order content does not match the glyph sets that have been created."\n )\n layerContents = [\n (layerName, self.layerContents[layerName]) for layerName in layerOrder\n ]\n self._writePlist(LAYERCONTENTS_FILENAME, layerContents)\n\n def _findDirectoryForLayerName(self, layerName):\n foundDirectory = None\n for existingLayerName, directoryName in list(self.layerContents.items()):\n if layerName is None and directoryName == DEFAULT_GLYPHS_DIRNAME:\n foundDirectory = directoryName\n break\n elif existingLayerName == layerName:\n foundDirectory = directoryName\n break\n if not foundDirectory:\n raise UFOLibError(\n "Could not locate a glyph set directory for the layer named %s."\n % layerName\n )\n return foundDirectory\n\n def getGlyphSet(\n self,\n layerName=None,\n defaultLayer=True,\n glyphNameToFileNameFunc=None,\n validateRead=None,\n validateWrite=None,\n expectContentsFile=False,\n ):\n """\n Return the GlyphSet object associated with the\n appropriate glyph directory in the .ufo.\n If layerName is None, the default glyph set\n will be used. The defaultLayer flag indictes\n that the layer should be saved into the default\n glyphs directory.\n\n ``validateRead`` will validate the read data, by default it is set to the\n class's validate value, can be overridden.\n ``validateWrte`` will validate the written data, by default it is set to the\n class's validate value, can be overridden.\n ``expectContentsFile`` will raise a GlifLibError if a contents.plist file is\n not found on the glyph set file system. This should be set to ``True`` if you\n are reading an existing UFO and ``False`` if you use ``getGlyphSet`` to create\n a fresh glyph set.\n """\n if validateRead is None:\n validateRead = self._validate\n if validateWrite is None:\n validateWrite = self._validate\n # only default can be written in < 3\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0 and (\n not defaultLayer or layerName is not None\n ):\n raise UFOLibError(\n f"Only the default layer can be writen in UFO {self._formatVersion.major}."\n )\n # locate a layer name when None has been given\n if layerName is None and defaultLayer:\n for existingLayerName, directory in self.layerContents.items():\n if directory == DEFAULT_GLYPHS_DIRNAME:\n layerName = existingLayerName\n if layerName is None:\n layerName = DEFAULT_LAYER_NAME\n elif layerName is None and not defaultLayer:\n raise UFOLibError("A layer name must be provided for non-default layers.")\n # move along to format specific writing\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n return self._getDefaultGlyphSet(\n validateRead,\n validateWrite,\n glyphNameToFileNameFunc=glyphNameToFileNameFunc,\n expectContentsFile=expectContentsFile,\n )\n elif self._formatVersion.major == UFOFormatVersion.FORMAT_3_0.major:\n return self._getGlyphSetFormatVersion3(\n validateRead,\n validateWrite,\n layerName=layerName,\n defaultLayer=defaultLayer,\n glyphNameToFileNameFunc=glyphNameToFileNameFunc,\n expectContentsFile=expectContentsFile,\n )\n else:\n raise NotImplementedError(self._formatVersion)\n\n def _getDefaultGlyphSet(\n self,\n validateRead,\n validateWrite,\n glyphNameToFileNameFunc=None,\n expectContentsFile=False,\n ):\n from fontTools.ufoLib.glifLib import GlyphSet\n\n glyphSubFS = self.fs.makedir(DEFAULT_GLYPHS_DIRNAME, recreate=True)\n return GlyphSet(\n glyphSubFS,\n glyphNameToFileNameFunc=glyphNameToFileNameFunc,\n ufoFormatVersion=self._formatVersion,\n validateRead=validateRead,\n validateWrite=validateWrite,\n expectContentsFile=expectContentsFile,\n )\n\n def _getGlyphSetFormatVersion3(\n self,\n validateRead,\n validateWrite,\n layerName=None,\n defaultLayer=True,\n glyphNameToFileNameFunc=None,\n expectContentsFile=False,\n ):\n from fontTools.ufoLib.glifLib import GlyphSet\n\n # if the default flag is on, make sure that the default in the file\n # matches the default being written. also make sure that this layer\n # name is not already linked to a non-default layer.\n if defaultLayer:\n for existingLayerName, directory in self.layerContents.items():\n if directory == DEFAULT_GLYPHS_DIRNAME:\n if existingLayerName != layerName:\n raise UFOLibError(\n "Another layer ('%s') is already mapped to the default directory."\n % existingLayerName\n )\n elif existingLayerName == layerName:\n raise UFOLibError(\n "The layer name is already mapped to a non-default layer."\n )\n # get an existing directory name\n if layerName in self.layerContents:\n directory = self.layerContents[layerName]\n # get a new directory name\n else:\n if defaultLayer:\n directory = DEFAULT_GLYPHS_DIRNAME\n else:\n # not caching this could be slightly expensive,\n # but caching it will be cumbersome\n existing = {d.lower() for d in self.layerContents.values()}\n directory = userNameToFileName(\n layerName, existing=existing, prefix="glyphs."\n )\n # make the directory\n glyphSubFS = self.fs.makedir(directory, recreate=True)\n # store the mapping\n self.layerContents[layerName] = directory\n # load the glyph set\n return GlyphSet(\n glyphSubFS,\n glyphNameToFileNameFunc=glyphNameToFileNameFunc,\n ufoFormatVersion=self._formatVersion,\n validateRead=validateRead,\n validateWrite=validateWrite,\n expectContentsFile=expectContentsFile,\n )\n\n def renameGlyphSet(self, layerName, newLayerName, defaultLayer=False):\n """\n Rename a glyph set.\n\n Note: if a GlyphSet object has already been retrieved for\n layerName, it is up to the caller to inform that object that\n the directory it represents has changed.\n """\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n # ignore renaming glyph sets for UFO1 UFO2\n # just write the data from the default layer\n return\n # the new and old names can be the same\n # as long as the default is being switched\n if layerName == newLayerName:\n # if the default is off and the layer is already not the default, skip\n if (\n self.layerContents[layerName] != DEFAULT_GLYPHS_DIRNAME\n and not defaultLayer\n ):\n return\n # if the default is on and the layer is already the default, skip\n if self.layerContents[layerName] == DEFAULT_GLYPHS_DIRNAME and defaultLayer:\n return\n else:\n # make sure the new layer name doesn't already exist\n if newLayerName is None:\n newLayerName = DEFAULT_LAYER_NAME\n if newLayerName in self.layerContents:\n raise UFOLibError("A layer named %s already exists." % newLayerName)\n # make sure the default layer doesn't already exist\n if defaultLayer and DEFAULT_GLYPHS_DIRNAME in self.layerContents.values():\n raise UFOLibError("A default layer already exists.")\n # get the paths\n oldDirectory = self._findDirectoryForLayerName(layerName)\n if defaultLayer:\n newDirectory = DEFAULT_GLYPHS_DIRNAME\n else:\n existing = {name.lower() for name in self.layerContents.values()}\n newDirectory = userNameToFileName(\n newLayerName, existing=existing, prefix="glyphs."\n )\n # update the internal mapping\n del self.layerContents[layerName]\n self.layerContents[newLayerName] = newDirectory\n # do the file system copy\n self.fs.movedir(oldDirectory, newDirectory, create=True)\n\n def deleteGlyphSet(self, layerName):\n """\n Remove the glyph set matching layerName.\n """\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n # ignore deleting glyph sets for UFO1 UFO2 as there are no layers\n # just write the data from the default layer\n return\n foundDirectory = self._findDirectoryForLayerName(layerName)\n self.removePath(foundDirectory, removeEmptyParents=False)\n del self.layerContents[layerName]\n\n def writeData(self, fileName, data):\n """\n Write data to fileName in the 'data' directory.\n The data must be a bytes string.\n """\n self.writeBytesToPath(f"{DATA_DIRNAME}/{fsdecode(fileName)}", data)\n\n def removeData(self, fileName):\n """\n Remove the file named fileName from the data directory.\n """\n self.removePath(f"{DATA_DIRNAME}/{fsdecode(fileName)}")\n\n # /images\n\n def writeImage(self, fileName, data, validate=None):\n """\n Write data to fileName in the images directory.\n The data must be a valid PNG.\n """\n if validate is None:\n validate = self._validate\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n raise UFOLibError(\n f"Images are not allowed in UFO {self._formatVersion.major}."\n )\n fileName = fsdecode(fileName)\n if validate:\n valid, error = pngValidator(data=data)\n if not valid:\n raise UFOLibError(error)\n self.writeBytesToPath(f"{IMAGES_DIRNAME}/{fileName}", data)\n\n def removeImage(self, fileName, validate=None): # XXX remove unused 'validate'?\n """\n Remove the file named fileName from the\n images directory.\n """\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n raise UFOLibError(\n f"Images are not allowed in UFO {self._formatVersion.major}."\n )\n self.removePath(f"{IMAGES_DIRNAME}/{fsdecode(fileName)}")\n\n def copyImageFromReader(self, reader, sourceFileName, destFileName, validate=None):\n """\n Copy the sourceFileName in the provided UFOReader to destFileName\n in this writer. This uses the most memory efficient method possible\n for copying the data possible.\n """\n if validate is None:\n validate = self._validate\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n raise UFOLibError(\n f"Images are not allowed in UFO {self._formatVersion.major}."\n )\n sourcePath = f"{IMAGES_DIRNAME}/{fsdecode(sourceFileName)}"\n destPath = f"{IMAGES_DIRNAME}/{fsdecode(destFileName)}"\n self.copyFromReader(reader, sourcePath, destPath)\n\n def close(self):\n if self._havePreviousFile and self._fileStructure is UFOFileStructure.ZIP:\n # if we are updating an existing zip file, we can now compress the\n # contents of the temporary filesystem in the destination path\n rootDir = os.path.splitext(os.path.basename(self._path))[0] + ".ufo"\n with fs.zipfs.ZipFS(self._path, write=True, encoding="utf-8") as destFS:\n fs.copy.copy_fs(self.fs, destFS.makedir(rootDir))\n super().close()\n\n\n# just an alias, makes it more explicit\nUFOReaderWriter = UFOWriter\n\n\n# ----------------\n# Helper Functions\n# ----------------\n\n\ndef _sniffFileStructure(ufo_path):\n """Return UFOFileStructure.ZIP if the UFO at path 'ufo_path' (str)\n is a zip file, else return UFOFileStructure.PACKAGE if 'ufo_path' is a\n directory.\n Raise UFOLibError if it is a file with unknown structure, or if the path\n does not exist.\n """\n if zipfile.is_zipfile(ufo_path):\n return UFOFileStructure.ZIP\n elif os.path.isdir(ufo_path):\n return UFOFileStructure.PACKAGE\n elif os.path.isfile(ufo_path):\n raise UFOLibError(\n "The specified UFO does not have a known structure: '%s'" % ufo_path\n )\n else:\n raise UFOLibError("No such file or directory: '%s'" % ufo_path)\n\n\ndef makeUFOPath(path):\n """\n Return a .ufo pathname.\n\n >>> makeUFOPath("directory/something.ext") == (\n ... os.path.join('directory', 'something.ufo'))\n True\n >>> makeUFOPath("directory/something.another.thing.ext") == (\n ... os.path.join('directory', 'something.another.thing.ufo'))\n True\n """\n dir, name = os.path.split(path)\n name = ".".join([".".join(name.split(".")[:-1]), "ufo"])\n return os.path.join(dir, name)\n\n\n# ----------------------\n# fontinfo.plist Support\n# ----------------------\n\n# Version Validators\n\n# There is no version 1 validator and there shouldn't be.\n# The version 1 spec was very loose and there were numerous\n# cases of invalid values.\n\n\ndef validateFontInfoVersion2ValueForAttribute(attr, value):\n """\n This performs very basic validation of the value for attribute\n following the UFO 2 fontinfo.plist specification. The results\n of this should not be interpretted as *correct* for the font\n that they are part of. This merely indicates that the value\n is of the proper type and, where the specification defines\n a set range of possible values for an attribute, that the\n value is in the accepted range.\n """\n dataValidationDict = fontInfoAttributesVersion2ValueData[attr]\n valueType = dataValidationDict.get("type")\n validator = dataValidationDict.get("valueValidator")\n valueOptions = dataValidationDict.get("valueOptions")\n # have specific options for the validator\n if valueOptions is not None:\n isValidValue = validator(value, valueOptions)\n # no specific options\n else:\n if validator == genericTypeValidator:\n isValidValue = validator(value, valueType)\n else:\n isValidValue = validator(value)\n return isValidValue\n\n\ndef validateInfoVersion2Data(infoData):\n """\n This performs very basic validation of the value for infoData\n following the UFO 2 fontinfo.plist specification. The results\n of this should not be interpretted as *correct* for the font\n that they are part of. This merely indicates that the values\n are of the proper type and, where the specification defines\n a set range of possible values for an attribute, that the\n value is in the accepted range.\n """\n validInfoData = {}\n for attr, value in list(infoData.items()):\n isValidValue = validateFontInfoVersion2ValueForAttribute(attr, value)\n if not isValidValue:\n raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")\n else:\n validInfoData[attr] = value\n return validInfoData\n\n\ndef validateFontInfoVersion3ValueForAttribute(attr, value):\n """\n This performs very basic validation of the value for attribute\n following the UFO 3 fontinfo.plist specification. The results\n of this should not be interpretted as *correct* for the font\n that they are part of. This merely indicates that the value\n is of the proper type and, where the specification defines\n a set range of possible values for an attribute, that the\n value is in the accepted range.\n """\n dataValidationDict = fontInfoAttributesVersion3ValueData[attr]\n valueType = dataValidationDict.get("type")\n validator = dataValidationDict.get("valueValidator")\n valueOptions = dataValidationDict.get("valueOptions")\n # have specific options for the validator\n if valueOptions is not None:\n isValidValue = validator(value, valueOptions)\n # no specific options\n else:\n if validator == genericTypeValidator:\n isValidValue = validator(value, valueType)\n else:\n isValidValue = validator(value)\n return isValidValue\n\n\ndef validateInfoVersion3Data(infoData):\n """\n This performs very basic validation of the value for infoData\n following the UFO 3 fontinfo.plist specification. The results\n of this should not be interpretted as *correct* for the font\n that they are part of. This merely indicates that the values\n are of the proper type and, where the specification defines\n a set range of possible values for an attribute, that the\n value is in the accepted range.\n """\n validInfoData = {}\n for attr, value in list(infoData.items()):\n isValidValue = validateFontInfoVersion3ValueForAttribute(attr, value)\n if not isValidValue:\n raise UFOLibError(f"Invalid value for attribute {attr} ({value!r}).")\n else:\n validInfoData[attr] = value\n return validInfoData\n\n\n# Value Options\n\nfontInfoOpenTypeHeadFlagsOptions = list(range(0, 15))\nfontInfoOpenTypeOS2SelectionOptions = [1, 2, 3, 4, 7, 8, 9]\nfontInfoOpenTypeOS2UnicodeRangesOptions = list(range(0, 128))\nfontInfoOpenTypeOS2CodePageRangesOptions = list(range(0, 64))\nfontInfoOpenTypeOS2TypeOptions = [0, 1, 2, 3, 8, 9]\n\n# Version Attribute Definitions\n# This defines the attributes, types and, in some\n# cases the possible values, that can exist is\n# fontinfo.plist.\n\nfontInfoAttributesVersion1 = {\n "familyName",\n "styleName",\n "fullName",\n "fontName",\n "menuName",\n "fontStyle",\n "note",\n "versionMajor",\n "versionMinor",\n "year",\n "copyright",\n "notice",\n "trademark",\n "license",\n "licenseURL",\n "createdBy",\n "designer",\n "designerURL",\n "vendorURL",\n "unitsPerEm",\n "ascender",\n "descender",\n "capHeight",\n "xHeight",\n "defaultWidth",\n "slantAngle",\n "italicAngle",\n "widthName",\n "weightName",\n "weightValue",\n "fondName",\n "otFamilyName",\n "otStyleName",\n "otMacName",\n "msCharSet",\n "fondID",\n "uniqueID",\n "ttVendor",\n "ttUniqueID",\n "ttVersion",\n}\n\nfontInfoAttributesVersion2ValueData = {\n "familyName": dict(type=str),\n "styleName": dict(type=str),\n "styleMapFamilyName": dict(type=str),\n "styleMapStyleName": dict(\n type=str, valueValidator=fontInfoStyleMapStyleNameValidator\n ),\n "versionMajor": dict(type=int),\n "versionMinor": dict(type=int),\n "year": dict(type=int),\n "copyright": dict(type=str),\n "trademark": dict(type=str),\n "unitsPerEm": dict(type=(int, float)),\n "descender": dict(type=(int, float)),\n "xHeight": dict(type=(int, float)),\n "capHeight": dict(type=(int, float)),\n "ascender": dict(type=(int, float)),\n "italicAngle": dict(type=(float, int)),\n "note": dict(type=str),\n "openTypeHeadCreated": dict(\n type=str, valueValidator=fontInfoOpenTypeHeadCreatedValidator\n ),\n "openTypeHeadLowestRecPPEM": dict(type=(int, float)),\n "openTypeHeadFlags": dict(\n type="integerList",\n valueValidator=genericIntListValidator,\n valueOptions=fontInfoOpenTypeHeadFlagsOptions,\n ),\n "openTypeHheaAscender": dict(type=(int, float)),\n "openTypeHheaDescender": dict(type=(int, float)),\n "openTypeHheaLineGap": dict(type=(int, float)),\n "openTypeHheaCaretSlopeRise": dict(type=int),\n "openTypeHheaCaretSlopeRun": dict(type=int),\n "openTypeHheaCaretOffset": dict(type=(int, float)),\n "openTypeNameDesigner": dict(type=str),\n "openTypeNameDesignerURL": dict(type=str),\n "openTypeNameManufacturer": dict(type=str),\n "openTypeNameManufacturerURL": dict(type=str),\n "openTypeNameLicense": dict(type=str),\n "openTypeNameLicenseURL": dict(type=str),\n "openTypeNameVersion": dict(type=str),\n "openTypeNameUniqueID": dict(type=str),\n "openTypeNameDescription": dict(type=str),\n "openTypeNamePreferredFamilyName": dict(type=str),\n "openTypeNamePreferredSubfamilyName": dict(type=str),\n "openTypeNameCompatibleFullName": dict(type=str),\n "openTypeNameSampleText": dict(type=str),\n "openTypeNameWWSFamilyName": dict(type=str),\n "openTypeNameWWSSubfamilyName": dict(type=str),\n "openTypeOS2WidthClass": dict(\n type=int, valueValidator=fontInfoOpenTypeOS2WidthClassValidator\n ),\n "openTypeOS2WeightClass": dict(\n type=int, valueValidator=fontInfoOpenTypeOS2WeightClassValidator\n ),\n "openTypeOS2Selection": dict(\n type="integerList",\n valueValidator=genericIntListValidator,\n valueOptions=fontInfoOpenTypeOS2SelectionOptions,\n ),\n "openTypeOS2VendorID": dict(type=str),\n "openTypeOS2Panose": dict(\n type="integerList", valueValidator=fontInfoVersion2OpenTypeOS2PanoseValidator\n ),\n "openTypeOS2FamilyClass": dict(\n type="integerList", valueValidator=fontInfoOpenTypeOS2FamilyClassValidator\n ),\n "openTypeOS2UnicodeRanges": dict(\n type="integerList",\n valueValidator=genericIntListValidator,\n valueOptions=fontInfoOpenTypeOS2UnicodeRangesOptions,\n ),\n "openTypeOS2CodePageRanges": dict(\n type="integerList",\n valueValidator=genericIntListValidator,\n valueOptions=fontInfoOpenTypeOS2CodePageRangesOptions,\n ),\n "openTypeOS2TypoAscender": dict(type=(int, float)),\n "openTypeOS2TypoDescender": dict(type=(int, float)),\n "openTypeOS2TypoLineGap": dict(type=(int, float)),\n "openTypeOS2WinAscent": dict(type=(int, float)),\n "openTypeOS2WinDescent": dict(type=(int, float)),\n "openTypeOS2Type": dict(\n type="integerList",\n valueValidator=genericIntListValidator,\n valueOptions=fontInfoOpenTypeOS2TypeOptions,\n ),\n "openTypeOS2SubscriptXSize": dict(type=(int, float)),\n "openTypeOS2SubscriptYSize": dict(type=(int, float)),\n "openTypeOS2SubscriptXOffset": dict(type=(int, float)),\n "openTypeOS2SubscriptYOffset": dict(type=(int, float)),\n "openTypeOS2SuperscriptXSize": dict(type=(int, float)),\n "openTypeOS2SuperscriptYSize": dict(type=(int, float)),\n "openTypeOS2SuperscriptXOffset": dict(type=(int, float)),\n "openTypeOS2SuperscriptYOffset": dict(type=(int, float)),\n "openTypeOS2StrikeoutSize": dict(type=(int, float)),\n "openTypeOS2StrikeoutPosition": dict(type=(int, float)),\n "openTypeVheaVertTypoAscender": dict(type=(int, float)),\n "openTypeVheaVertTypoDescender": dict(type=(int, float)),\n "openTypeVheaVertTypoLineGap": dict(type=(int, float)),\n "openTypeVheaCaretSlopeRise": dict(type=int),\n "openTypeVheaCaretSlopeRun": dict(type=int),\n "openTypeVheaCaretOffset": dict(type=(int, float)),\n "postscriptFontName": dict(type=str),\n "postscriptFullName": dict(type=str),\n "postscriptSlantAngle": dict(type=(float, int)),\n "postscriptUniqueID": dict(type=int),\n "postscriptUnderlineThickness": dict(type=(int, float)),\n "postscriptUnderlinePosition": dict(type=(int, float)),\n "postscriptIsFixedPitch": dict(type=bool),\n "postscriptBlueValues": dict(\n type="integerList", valueValidator=fontInfoPostscriptBluesValidator\n ),\n "postscriptOtherBlues": dict(\n type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator\n ),\n "postscriptFamilyBlues": dict(\n type="integerList", valueValidator=fontInfoPostscriptBluesValidator\n ),\n "postscriptFamilyOtherBlues": dict(\n type="integerList", valueValidator=fontInfoPostscriptOtherBluesValidator\n ),\n "postscriptStemSnapH": dict(\n type="integerList", valueValidator=fontInfoPostscriptStemsValidator\n ),\n "postscriptStemSnapV": dict(\n type="integerList", valueValidator=fontInfoPostscriptStemsValidator\n ),\n "postscriptBlueFuzz": dict(type=(int, float)),\n "postscriptBlueShift": dict(type=(int, float)),\n "postscriptBlueScale": dict(type=(float, int)),\n "postscriptForceBold": dict(type=bool),\n "postscriptDefaultWidthX": dict(type=(int, float)),\n "postscriptNominalWidthX": dict(type=(int, float)),\n "postscriptWeightName": dict(type=str),\n "postscriptDefaultCharacter": dict(type=str),\n "postscriptWindowsCharacterSet": dict(\n type=int, valueValidator=fontInfoPostscriptWindowsCharacterSetValidator\n ),\n "macintoshFONDFamilyID": dict(type=int),\n "macintoshFONDName": dict(type=str),\n}\nfontInfoAttributesVersion2 = set(fontInfoAttributesVersion2ValueData.keys())\n\nfontInfoAttributesVersion3ValueData = deepcopy(fontInfoAttributesVersion2ValueData)\nfontInfoAttributesVersion3ValueData.update(\n {\n "versionMinor": dict(type=int, valueValidator=genericNonNegativeIntValidator),\n "unitsPerEm": dict(\n type=(int, float), valueValidator=genericNonNegativeNumberValidator\n ),\n "openTypeHeadLowestRecPPEM": dict(\n type=int, valueValidator=genericNonNegativeNumberValidator\n ),\n "openTypeHheaAscender": dict(type=int),\n "openTypeHheaDescender": dict(type=int),\n "openTypeHheaLineGap": dict(type=int),\n "openTypeHheaCaretOffset": dict(type=int),\n "openTypeOS2Panose": dict(\n type="integerList",\n valueValidator=fontInfoVersion3OpenTypeOS2PanoseValidator,\n ),\n "openTypeOS2TypoAscender": dict(type=int),\n "openTypeOS2TypoDescender": dict(type=int),\n "openTypeOS2TypoLineGap": dict(type=int),\n "openTypeOS2WinAscent": dict(\n type=int, valueValidator=genericNonNegativeNumberValidator\n ),\n "openTypeOS2WinDescent": dict(\n type=int, valueValidator=genericNonNegativeNumberValidator\n ),\n "openTypeOS2SubscriptXSize": dict(type=int),\n "openTypeOS2SubscriptYSize": dict(type=int),\n "openTypeOS2SubscriptXOffset": dict(type=int),\n "openTypeOS2SubscriptYOffset": dict(type=int),\n "openTypeOS2SuperscriptXSize": dict(type=int),\n "openTypeOS2SuperscriptYSize": dict(type=int),\n "openTypeOS2SuperscriptXOffset": dict(type=int),\n "openTypeOS2SuperscriptYOffset": dict(type=int),\n "openTypeOS2StrikeoutSize": dict(type=int),\n "openTypeOS2StrikeoutPosition": dict(type=int),\n "openTypeGaspRangeRecords": dict(\n type="dictList", valueValidator=fontInfoOpenTypeGaspRangeRecordsValidator\n ),\n "openTypeNameRecords": dict(\n type="dictList", valueValidator=fontInfoOpenTypeNameRecordsValidator\n ),\n "openTypeVheaVertTypoAscender": dict(type=int),\n "openTypeVheaVertTypoDescender": dict(type=int),\n "openTypeVheaVertTypoLineGap": dict(type=int),\n "openTypeVheaCaretOffset": dict(type=int),\n "woffMajorVersion": dict(\n type=int, valueValidator=genericNonNegativeIntValidator\n ),\n "woffMinorVersion": dict(\n type=int, valueValidator=genericNonNegativeIntValidator\n ),\n "woffMetadataUniqueID": dict(\n type=dict, valueValidator=fontInfoWOFFMetadataUniqueIDValidator\n ),\n "woffMetadataVendor": dict(\n type=dict, valueValidator=fontInfoWOFFMetadataVendorValidator\n ),\n "woffMetadataCredits": dict(\n type=dict, valueValidator=fontInfoWOFFMetadataCreditsValidator\n ),\n "woffMetadataDescription": dict(\n type=dict, valueValidator=fontInfoWOFFMetadataDescriptionValidator\n ),\n "woffMetadataLicense": dict(\n type=dict, valueValidator=fontInfoWOFFMetadataLicenseValidator\n ),\n "woffMetadataCopyright": dict(\n type=dict, valueValidator=fontInfoWOFFMetadataCopyrightValidator\n ),\n "woffMetadataTrademark": dict(\n type=dict, valueValidator=fontInfoWOFFMetadataTrademarkValidator\n ),\n "woffMetadataLicensee": dict(\n type=dict, valueValidator=fontInfoWOFFMetadataLicenseeValidator\n ),\n "woffMetadataExtensions": dict(\n type=list, valueValidator=fontInfoWOFFMetadataExtensionsValidator\n ),\n "guidelines": dict(type=list, valueValidator=guidelinesValidator),\n }\n)\nfontInfoAttributesVersion3 = set(fontInfoAttributesVersion3ValueData.keys())\n\n# insert the type validator for all attrs that\n# have no defined validator.\nfor attr, dataDict in list(fontInfoAttributesVersion2ValueData.items()):\n if "valueValidator" not in dataDict:\n dataDict["valueValidator"] = genericTypeValidator\n\nfor attr, dataDict in list(fontInfoAttributesVersion3ValueData.items()):\n if "valueValidator" not in dataDict:\n dataDict["valueValidator"] = genericTypeValidator\n\n# Version Conversion Support\n# These are used from converting from version 1\n# to version 2 or vice-versa.\n\n\ndef _flipDict(d):\n flipped = {}\n for key, value in list(d.items()):\n flipped[value] = key\n return flipped\n\n\nfontInfoAttributesVersion1To2 = {\n "menuName": "styleMapFamilyName",\n "designer": "openTypeNameDesigner",\n "designerURL": "openTypeNameDesignerURL",\n "createdBy": "openTypeNameManufacturer",\n "vendorURL": "openTypeNameManufacturerURL",\n "license": "openTypeNameLicense",\n "licenseURL": "openTypeNameLicenseURL",\n "ttVersion": "openTypeNameVersion",\n "ttUniqueID": "openTypeNameUniqueID",\n "notice": "openTypeNameDescription",\n "otFamilyName": "openTypeNamePreferredFamilyName",\n "otStyleName": "openTypeNamePreferredSubfamilyName",\n "otMacName": "openTypeNameCompatibleFullName",\n "weightName": "postscriptWeightName",\n "weightValue": "openTypeOS2WeightClass",\n "ttVendor": "openTypeOS2VendorID",\n "uniqueID": "postscriptUniqueID",\n "fontName": "postscriptFontName",\n "fondID": "macintoshFONDFamilyID",\n "fondName": "macintoshFONDName",\n "defaultWidth": "postscriptDefaultWidthX",\n "slantAngle": "postscriptSlantAngle",\n "fullName": "postscriptFullName",\n # require special value conversion\n "fontStyle": "styleMapStyleName",\n "widthName": "openTypeOS2WidthClass",\n "msCharSet": "postscriptWindowsCharacterSet",\n}\nfontInfoAttributesVersion2To1 = _flipDict(fontInfoAttributesVersion1To2)\ndeprecatedFontInfoAttributesVersion2 = set(fontInfoAttributesVersion1To2.keys())\n\n_fontStyle1To2 = {64: "regular", 1: "italic", 32: "bold", 33: "bold italic"}\n_fontStyle2To1 = _flipDict(_fontStyle1To2)\n# Some UFO 1 files have 0\n_fontStyle1To2[0] = "regular"\n\n_widthName1To2 = {\n "Ultra-condensed": 1,\n "Extra-condensed": 2,\n "Condensed": 3,\n "Semi-condensed": 4,\n "Medium (normal)": 5,\n "Semi-expanded": 6,\n "Expanded": 7,\n "Extra-expanded": 8,\n "Ultra-expanded": 9,\n}\n_widthName2To1 = _flipDict(_widthName1To2)\n# FontLab's default width value is "Normal".\n# Many format version 1 UFOs will have this.\n_widthName1To2["Normal"] = 5\n# FontLab has an "All" width value. In UFO 1\n# move this up to "Normal".\n_widthName1To2["All"] = 5\n# "medium" appears in a lot of UFO 1 files.\n_widthName1To2["medium"] = 5\n# "Medium" appears in a lot of UFO 1 files.\n_widthName1To2["Medium"] = 5\n\n_msCharSet1To2 = {\n 0: 1,\n 1: 2,\n 2: 3,\n 77: 4,\n 128: 5,\n 129: 6,\n 130: 7,\n 134: 8,\n 136: 9,\n 161: 10,\n 162: 11,\n 163: 12,\n 177: 13,\n 178: 14,\n 186: 15,\n 200: 16,\n 204: 17,\n 222: 18,\n 238: 19,\n 255: 20,\n}\n_msCharSet2To1 = _flipDict(_msCharSet1To2)\n\n# 1 <-> 2\n\n\ndef convertFontInfoValueForAttributeFromVersion1ToVersion2(attr, value):\n """\n Convert value from version 1 to version 2 format.\n Returns the new attribute name and the converted value.\n If the value is None, None will be returned for the new value.\n """\n # convert floats to ints if possible\n if isinstance(value, float):\n if int(value) == value:\n value = int(value)\n if value is not None:\n if attr == "fontStyle":\n v = _fontStyle1To2.get(value)\n if v is None:\n raise UFOLibError(\n f"Cannot convert value ({value!r}) for attribute {attr}."\n )\n value = v\n elif attr == "widthName":\n v = _widthName1To2.get(value)\n if v is None:\n raise UFOLibError(\n f"Cannot convert value ({value!r}) for attribute {attr}."\n )\n value = v\n elif attr == "msCharSet":\n v = _msCharSet1To2.get(value)\n if v is None:\n raise UFOLibError(\n f"Cannot convert value ({value!r}) for attribute {attr}."\n )\n value = v\n attr = fontInfoAttributesVersion1To2.get(attr, attr)\n return attr, value\n\n\ndef convertFontInfoValueForAttributeFromVersion2ToVersion1(attr, value):\n """\n Convert value from version 2 to version 1 format.\n Returns the new attribute name and the converted value.\n If the value is None, None will be returned for the new value.\n """\n if value is not None:\n if attr == "styleMapStyleName":\n value = _fontStyle2To1.get(value)\n elif attr == "openTypeOS2WidthClass":\n value = _widthName2To1.get(value)\n elif attr == "postscriptWindowsCharacterSet":\n value = _msCharSet2To1.get(value)\n attr = fontInfoAttributesVersion2To1.get(attr, attr)\n return attr, value\n\n\ndef _convertFontInfoDataVersion1ToVersion2(data):\n converted = {}\n for attr, value in list(data.items()):\n # FontLab gives -1 for the weightValue\n # for fonts wil no defined value. Many\n # format version 1 UFOs will have this.\n if attr == "weightValue" and value == -1:\n continue\n newAttr, newValue = convertFontInfoValueForAttributeFromVersion1ToVersion2(\n attr, value\n )\n # skip if the attribute is not part of version 2\n if newAttr not in fontInfoAttributesVersion2:\n continue\n # catch values that can't be converted\n if value is None:\n raise UFOLibError(\n f"Cannot convert value ({value!r}) for attribute {newAttr}."\n )\n # store\n converted[newAttr] = newValue\n return converted\n\n\ndef _convertFontInfoDataVersion2ToVersion1(data):\n converted = {}\n for attr, value in list(data.items()):\n newAttr, newValue = convertFontInfoValueForAttributeFromVersion2ToVersion1(\n attr, value\n )\n # only take attributes that are registered for version 1\n if newAttr not in fontInfoAttributesVersion1:\n continue\n # catch values that can't be converted\n if value is None:\n raise UFOLibError(\n f"Cannot convert value ({value!r}) for attribute {newAttr}."\n )\n # store\n converted[newAttr] = newValue\n return converted\n\n\n# 2 <-> 3\n\n_ufo2To3NonNegativeInt = {\n "versionMinor",\n "openTypeHeadLowestRecPPEM",\n "openTypeOS2WinAscent",\n "openTypeOS2WinDescent",\n}\n_ufo2To3NonNegativeIntOrFloat = {\n "unitsPerEm",\n}\n_ufo2To3FloatToInt = {\n "openTypeHeadLowestRecPPEM",\n "openTypeHheaAscender",\n "openTypeHheaDescender",\n "openTypeHheaLineGap",\n "openTypeHheaCaretOffset",\n "openTypeOS2TypoAscender",\n "openTypeOS2TypoDescender",\n "openTypeOS2TypoLineGap",\n "openTypeOS2WinAscent",\n "openTypeOS2WinDescent",\n "openTypeOS2SubscriptXSize",\n "openTypeOS2SubscriptYSize",\n "openTypeOS2SubscriptXOffset",\n "openTypeOS2SubscriptYOffset",\n "openTypeOS2SuperscriptXSize",\n "openTypeOS2SuperscriptYSize",\n "openTypeOS2SuperscriptXOffset",\n "openTypeOS2SuperscriptYOffset",\n "openTypeOS2StrikeoutSize",\n "openTypeOS2StrikeoutPosition",\n "openTypeVheaVertTypoAscender",\n "openTypeVheaVertTypoDescender",\n "openTypeVheaVertTypoLineGap",\n "openTypeVheaCaretOffset",\n}\n\n\ndef convertFontInfoValueForAttributeFromVersion2ToVersion3(attr, value):\n """\n Convert value from version 2 to version 3 format.\n Returns the new attribute name and the converted value.\n If the value is None, None will be returned for the new value.\n """\n if attr in _ufo2To3FloatToInt:\n try:\n value = round(value)\n except (ValueError, TypeError):\n raise UFOLibError("Could not convert value for %s." % attr)\n if attr in _ufo2To3NonNegativeInt:\n try:\n value = int(abs(value))\n except (ValueError, TypeError):\n raise UFOLibError("Could not convert value for %s." % attr)\n elif attr in _ufo2To3NonNegativeIntOrFloat:\n try:\n v = float(abs(value))\n except (ValueError, TypeError):\n raise UFOLibError("Could not convert value for %s." % attr)\n if v == int(v):\n v = int(v)\n if v != value:\n value = v\n return attr, value\n\n\ndef convertFontInfoValueForAttributeFromVersion3ToVersion2(attr, value):\n """\n Convert value from version 3 to version 2 format.\n Returns the new attribute name and the converted value.\n If the value is None, None will be returned for the new value.\n """\n return attr, value\n\n\ndef _convertFontInfoDataVersion3ToVersion2(data):\n converted = {}\n for attr, value in list(data.items()):\n newAttr, newValue = convertFontInfoValueForAttributeFromVersion3ToVersion2(\n attr, value\n )\n if newAttr not in fontInfoAttributesVersion2:\n continue\n converted[newAttr] = newValue\n return converted\n\n\ndef _convertFontInfoDataVersion2ToVersion3(data):\n converted = {}\n for attr, value in list(data.items()):\n attr, value = convertFontInfoValueForAttributeFromVersion2ToVersion3(\n attr, value\n )\n converted[attr] = value\n return converted\n\n\nif __name__ == "__main__":\n import doctest\n\n doctest.testmod()\n
.venv\Lib\site-packages\fontTools\ufoLib\__init__.py
__init__.py
Python
96,908
0.75
0.197013
0.089652
react-lib
241
2024-09-14T17:20:06.551860
Apache-2.0
false
bf8740811b06d57461336a172b71d263
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\converters.cpython-313.pyc
converters.cpython-313.pyc
Other
11,804
0.95
0.037152
0
awesome-app
253
2023-07-20T12:12:45.742787
GPL-3.0
false
67f8b0206eaa7ee4e45dfb1cb1e551a1
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\errors.cpython-313.pyc
errors.cpython-313.pyc
Other
1,546
0.95
0.142857
0
node-utils
16
2024-08-08T22:33:53.949707
Apache-2.0
false
48c8a30bffb4fc39e43ed15a1e8a085c
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\etree.cpython-313.pyc
etree.cpython-313.pyc
Other
440
0.8
0.166667
0
python-kit
246
2024-03-31T17:01:57.950902
BSD-3-Clause
false
cb3531003bfe5037cae6bc9f3738c7da
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\filenames.cpython-313.pyc
filenames.cpython-313.pyc
Other
9,214
0.95
0.046392
0
vue-tools
508
2023-09-15T04:22:34.845223
MIT
false
3ac0e65e91059b01fba622d6ca8f1536
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\glifLib.cpython-313.pyc
glifLib.cpython-313.pyc
Other
77,073
0.75
0.088235
0.009674
react-lib
429
2025-04-05T14:53:55.267657
Apache-2.0
false
474cc4324f2b899a1df374caa7db809c
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\kerning.cpython-313.pyc
kerning.cpython-313.pyc
Other
3,719
0.95
0.058824
0
python-kit
236
2025-03-25T06:50:41.059410
GPL-3.0
false
5381001ad2546946221c146fed40eeaa
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\plistlib.cpython-313.pyc
plistlib.cpython-313.pyc
Other
2,279
0.8
0.117647
0
node-utils
222
2024-06-04T07:51:00.319421
Apache-2.0
false
ce8bdbd7283be3790efaf9fd3e8e05ef
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\pointPen.cpython-313.pyc
pointPen.cpython-313.pyc
Other
456
0.7
0.333333
0
python-kit
210
2024-12-29T05:47:26.307324
MIT
false
c94f01c68bb69d0abbde4c375240d27d
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
3,569
0.85
0.066667
0
vue-tools
982
2023-10-05T04:33:32.676318
Apache-2.0
false
774930d8d76c27db40449d82c27bc29f
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\validators.cpython-313.pyc
validators.cpython-313.pyc
Other
30,471
0.95
0
0.012158
react-lib
315
2024-10-02T00:57:27.862184
Apache-2.0
false
946dff8e5d3a063a39f039b67cf4063b
\n\n
.venv\Lib\site-packages\fontTools\ufoLib\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
91,247
0.75
0.100967
0.009217
node-utils
145
2023-09-01T11:01:51.827279
Apache-2.0
false
ff978e649bbd6dc078fe0273d0937469
# -*- coding: utf-8 -*-\n#\n# NOTE: This file was auto-generated with MetaTools/buildUCD.py.\n# Source: https://unicode.org/Public/UNIDATA/Blocks.txt\n# License: http://unicode.org/copyright.html#License\n#\n# Blocks-16.0.0.txt\n# Date: 2024-02-02\n# © 2024 Unicode®, Inc.\n# Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries.\n# For terms of use and license, see https://www.unicode.org/terms_of_use.html\n#\n# Unicode Character Database\n# For documentation, see https://www.unicode.org/reports/tr44/\n#\n# Format:\n# Start Code..End Code; Block Name\n\nRANGES = [\n 0x0000, # .. 0x007F ; Basic Latin\n 0x0080, # .. 0x00FF ; Latin-1 Supplement\n 0x0100, # .. 0x017F ; Latin Extended-A\n 0x0180, # .. 0x024F ; Latin Extended-B\n 0x0250, # .. 0x02AF ; IPA Extensions\n 0x02B0, # .. 0x02FF ; Spacing Modifier Letters\n 0x0300, # .. 0x036F ; Combining Diacritical Marks\n 0x0370, # .. 0x03FF ; Greek and Coptic\n 0x0400, # .. 0x04FF ; Cyrillic\n 0x0500, # .. 0x052F ; Cyrillic Supplement\n 0x0530, # .. 0x058F ; Armenian\n 0x0590, # .. 0x05FF ; Hebrew\n 0x0600, # .. 0x06FF ; Arabic\n 0x0700, # .. 0x074F ; Syriac\n 0x0750, # .. 0x077F ; Arabic Supplement\n 0x0780, # .. 0x07BF ; Thaana\n 0x07C0, # .. 0x07FF ; NKo\n 0x0800, # .. 0x083F ; Samaritan\n 0x0840, # .. 0x085F ; Mandaic\n 0x0860, # .. 0x086F ; Syriac Supplement\n 0x0870, # .. 0x089F ; Arabic Extended-B\n 0x08A0, # .. 0x08FF ; Arabic Extended-A\n 0x0900, # .. 0x097F ; Devanagari\n 0x0980, # .. 0x09FF ; Bengali\n 0x0A00, # .. 0x0A7F ; Gurmukhi\n 0x0A80, # .. 0x0AFF ; Gujarati\n 0x0B00, # .. 0x0B7F ; Oriya\n 0x0B80, # .. 0x0BFF ; Tamil\n 0x0C00, # .. 0x0C7F ; Telugu\n 0x0C80, # .. 0x0CFF ; Kannada\n 0x0D00, # .. 0x0D7F ; Malayalam\n 0x0D80, # .. 0x0DFF ; Sinhala\n 0x0E00, # .. 0x0E7F ; Thai\n 0x0E80, # .. 0x0EFF ; Lao\n 0x0F00, # .. 0x0FFF ; Tibetan\n 0x1000, # .. 0x109F ; Myanmar\n 0x10A0, # .. 0x10FF ; Georgian\n 0x1100, # .. 0x11FF ; Hangul Jamo\n 0x1200, # .. 0x137F ; Ethiopic\n 0x1380, # .. 0x139F ; Ethiopic Supplement\n 0x13A0, # .. 0x13FF ; Cherokee\n 0x1400, # .. 0x167F ; Unified Canadian Aboriginal Syllabics\n 0x1680, # .. 0x169F ; Ogham\n 0x16A0, # .. 0x16FF ; Runic\n 0x1700, # .. 0x171F ; Tagalog\n 0x1720, # .. 0x173F ; Hanunoo\n 0x1740, # .. 0x175F ; Buhid\n 0x1760, # .. 0x177F ; Tagbanwa\n 0x1780, # .. 0x17FF ; Khmer\n 0x1800, # .. 0x18AF ; Mongolian\n 0x18B0, # .. 0x18FF ; Unified Canadian Aboriginal Syllabics Extended\n 0x1900, # .. 0x194F ; Limbu\n 0x1950, # .. 0x197F ; Tai Le\n 0x1980, # .. 0x19DF ; New Tai Lue\n 0x19E0, # .. 0x19FF ; Khmer Symbols\n 0x1A00, # .. 0x1A1F ; Buginese\n 0x1A20, # .. 0x1AAF ; Tai Tham\n 0x1AB0, # .. 0x1AFF ; Combining Diacritical Marks Extended\n 0x1B00, # .. 0x1B7F ; Balinese\n 0x1B80, # .. 0x1BBF ; Sundanese\n 0x1BC0, # .. 0x1BFF ; Batak\n 0x1C00, # .. 0x1C4F ; Lepcha\n 0x1C50, # .. 0x1C7F ; Ol Chiki\n 0x1C80, # .. 0x1C8F ; Cyrillic Extended-C\n 0x1C90, # .. 0x1CBF ; Georgian Extended\n 0x1CC0, # .. 0x1CCF ; Sundanese Supplement\n 0x1CD0, # .. 0x1CFF ; Vedic Extensions\n 0x1D00, # .. 0x1D7F ; Phonetic Extensions\n 0x1D80, # .. 0x1DBF ; Phonetic Extensions Supplement\n 0x1DC0, # .. 0x1DFF ; Combining Diacritical Marks Supplement\n 0x1E00, # .. 0x1EFF ; Latin Extended Additional\n 0x1F00, # .. 0x1FFF ; Greek Extended\n 0x2000, # .. 0x206F ; General Punctuation\n 0x2070, # .. 0x209F ; Superscripts and Subscripts\n 0x20A0, # .. 0x20CF ; Currency Symbols\n 0x20D0, # .. 0x20FF ; Combining Diacritical Marks for Symbols\n 0x2100, # .. 0x214F ; Letterlike Symbols\n 0x2150, # .. 0x218F ; Number Forms\n 0x2190, # .. 0x21FF ; Arrows\n 0x2200, # .. 0x22FF ; Mathematical Operators\n 0x2300, # .. 0x23FF ; Miscellaneous Technical\n 0x2400, # .. 0x243F ; Control Pictures\n 0x2440, # .. 0x245F ; Optical Character Recognition\n 0x2460, # .. 0x24FF ; Enclosed Alphanumerics\n 0x2500, # .. 0x257F ; Box Drawing\n 0x2580, # .. 0x259F ; Block Elements\n 0x25A0, # .. 0x25FF ; Geometric Shapes\n 0x2600, # .. 0x26FF ; Miscellaneous Symbols\n 0x2700, # .. 0x27BF ; Dingbats\n 0x27C0, # .. 0x27EF ; Miscellaneous Mathematical Symbols-A\n 0x27F0, # .. 0x27FF ; Supplemental Arrows-A\n 0x2800, # .. 0x28FF ; Braille Patterns\n 0x2900, # .. 0x297F ; Supplemental Arrows-B\n 0x2980, # .. 0x29FF ; Miscellaneous Mathematical Symbols-B\n 0x2A00, # .. 0x2AFF ; Supplemental Mathematical Operators\n 0x2B00, # .. 0x2BFF ; Miscellaneous Symbols and Arrows\n 0x2C00, # .. 0x2C5F ; Glagolitic\n 0x2C60, # .. 0x2C7F ; Latin Extended-C\n 0x2C80, # .. 0x2CFF ; Coptic\n 0x2D00, # .. 0x2D2F ; Georgian Supplement\n 0x2D30, # .. 0x2D7F ; Tifinagh\n 0x2D80, # .. 0x2DDF ; Ethiopic Extended\n 0x2DE0, # .. 0x2DFF ; Cyrillic Extended-A\n 0x2E00, # .. 0x2E7F ; Supplemental Punctuation\n 0x2E80, # .. 0x2EFF ; CJK Radicals Supplement\n 0x2F00, # .. 0x2FDF ; Kangxi Radicals\n 0x2FE0, # .. 0x2FEF ; No_Block\n 0x2FF0, # .. 0x2FFF ; Ideographic Description Characters\n 0x3000, # .. 0x303F ; CJK Symbols and Punctuation\n 0x3040, # .. 0x309F ; Hiragana\n 0x30A0, # .. 0x30FF ; Katakana\n 0x3100, # .. 0x312F ; Bopomofo\n 0x3130, # .. 0x318F ; Hangul Compatibility Jamo\n 0x3190, # .. 0x319F ; Kanbun\n 0x31A0, # .. 0x31BF ; Bopomofo Extended\n 0x31C0, # .. 0x31EF ; CJK Strokes\n 0x31F0, # .. 0x31FF ; Katakana Phonetic Extensions\n 0x3200, # .. 0x32FF ; Enclosed CJK Letters and Months\n 0x3300, # .. 0x33FF ; CJK Compatibility\n 0x3400, # .. 0x4DBF ; CJK Unified Ideographs Extension A\n 0x4DC0, # .. 0x4DFF ; Yijing Hexagram Symbols\n 0x4E00, # .. 0x9FFF ; CJK Unified Ideographs\n 0xA000, # .. 0xA48F ; Yi Syllables\n 0xA490, # .. 0xA4CF ; Yi Radicals\n 0xA4D0, # .. 0xA4FF ; Lisu\n 0xA500, # .. 0xA63F ; Vai\n 0xA640, # .. 0xA69F ; Cyrillic Extended-B\n 0xA6A0, # .. 0xA6FF ; Bamum\n 0xA700, # .. 0xA71F ; Modifier Tone Letters\n 0xA720, # .. 0xA7FF ; Latin Extended-D\n 0xA800, # .. 0xA82F ; Syloti Nagri\n 0xA830, # .. 0xA83F ; Common Indic Number Forms\n 0xA840, # .. 0xA87F ; Phags-pa\n 0xA880, # .. 0xA8DF ; Saurashtra\n 0xA8E0, # .. 0xA8FF ; Devanagari Extended\n 0xA900, # .. 0xA92F ; Kayah Li\n 0xA930, # .. 0xA95F ; Rejang\n 0xA960, # .. 0xA97F ; Hangul Jamo Extended-A\n 0xA980, # .. 0xA9DF ; Javanese\n 0xA9E0, # .. 0xA9FF ; Myanmar Extended-B\n 0xAA00, # .. 0xAA5F ; Cham\n 0xAA60, # .. 0xAA7F ; Myanmar Extended-A\n 0xAA80, # .. 0xAADF ; Tai Viet\n 0xAAE0, # .. 0xAAFF ; Meetei Mayek Extensions\n 0xAB00, # .. 0xAB2F ; Ethiopic Extended-A\n 0xAB30, # .. 0xAB6F ; Latin Extended-E\n 0xAB70, # .. 0xABBF ; Cherokee Supplement\n 0xABC0, # .. 0xABFF ; Meetei Mayek\n 0xAC00, # .. 0xD7AF ; Hangul Syllables\n 0xD7B0, # .. 0xD7FF ; Hangul Jamo Extended-B\n 0xD800, # .. 0xDB7F ; High Surrogates\n 0xDB80, # .. 0xDBFF ; High Private Use Surrogates\n 0xDC00, # .. 0xDFFF ; Low Surrogates\n 0xE000, # .. 0xF8FF ; Private Use Area\n 0xF900, # .. 0xFAFF ; CJK Compatibility Ideographs\n 0xFB00, # .. 0xFB4F ; Alphabetic Presentation Forms\n 0xFB50, # .. 0xFDFF ; Arabic Presentation Forms-A\n 0xFE00, # .. 0xFE0F ; Variation Selectors\n 0xFE10, # .. 0xFE1F ; Vertical Forms\n 0xFE20, # .. 0xFE2F ; Combining Half Marks\n 0xFE30, # .. 0xFE4F ; CJK Compatibility Forms\n 0xFE50, # .. 0xFE6F ; Small Form Variants\n 0xFE70, # .. 0xFEFF ; Arabic Presentation Forms-B\n 0xFF00, # .. 0xFFEF ; Halfwidth and Fullwidth Forms\n 0xFFF0, # .. 0xFFFF ; Specials\n 0x10000, # .. 0x1007F ; Linear B Syllabary\n 0x10080, # .. 0x100FF ; Linear B Ideograms\n 0x10100, # .. 0x1013F ; Aegean Numbers\n 0x10140, # .. 0x1018F ; Ancient Greek Numbers\n 0x10190, # .. 0x101CF ; Ancient Symbols\n 0x101D0, # .. 0x101FF ; Phaistos Disc\n 0x10200, # .. 0x1027F ; No_Block\n 0x10280, # .. 0x1029F ; Lycian\n 0x102A0, # .. 0x102DF ; Carian\n 0x102E0, # .. 0x102FF ; Coptic Epact Numbers\n 0x10300, # .. 0x1032F ; Old Italic\n 0x10330, # .. 0x1034F ; Gothic\n 0x10350, # .. 0x1037F ; Old Permic\n 0x10380, # .. 0x1039F ; Ugaritic\n 0x103A0, # .. 0x103DF ; Old Persian\n 0x103E0, # .. 0x103FF ; No_Block\n 0x10400, # .. 0x1044F ; Deseret\n 0x10450, # .. 0x1047F ; Shavian\n 0x10480, # .. 0x104AF ; Osmanya\n 0x104B0, # .. 0x104FF ; Osage\n 0x10500, # .. 0x1052F ; Elbasan\n 0x10530, # .. 0x1056F ; Caucasian Albanian\n 0x10570, # .. 0x105BF ; Vithkuqi\n 0x105C0, # .. 0x105FF ; Todhri\n 0x10600, # .. 0x1077F ; Linear A\n 0x10780, # .. 0x107BF ; Latin Extended-F\n 0x107C0, # .. 0x107FF ; No_Block\n 0x10800, # .. 0x1083F ; Cypriot Syllabary\n 0x10840, # .. 0x1085F ; Imperial Aramaic\n 0x10860, # .. 0x1087F ; Palmyrene\n 0x10880, # .. 0x108AF ; Nabataean\n 0x108B0, # .. 0x108DF ; No_Block\n 0x108E0, # .. 0x108FF ; Hatran\n 0x10900, # .. 0x1091F ; Phoenician\n 0x10920, # .. 0x1093F ; Lydian\n 0x10940, # .. 0x1097F ; No_Block\n 0x10980, # .. 0x1099F ; Meroitic Hieroglyphs\n 0x109A0, # .. 0x109FF ; Meroitic Cursive\n 0x10A00, # .. 0x10A5F ; Kharoshthi\n 0x10A60, # .. 0x10A7F ; Old South Arabian\n 0x10A80, # .. 0x10A9F ; Old North Arabian\n 0x10AA0, # .. 0x10ABF ; No_Block\n 0x10AC0, # .. 0x10AFF ; Manichaean\n 0x10B00, # .. 0x10B3F ; Avestan\n 0x10B40, # .. 0x10B5F ; Inscriptional Parthian\n 0x10B60, # .. 0x10B7F ; Inscriptional Pahlavi\n 0x10B80, # .. 0x10BAF ; Psalter Pahlavi\n 0x10BB0, # .. 0x10BFF ; No_Block\n 0x10C00, # .. 0x10C4F ; Old Turkic\n 0x10C50, # .. 0x10C7F ; No_Block\n 0x10C80, # .. 0x10CFF ; Old Hungarian\n 0x10D00, # .. 0x10D3F ; Hanifi Rohingya\n 0x10D40, # .. 0x10D8F ; Garay\n 0x10D90, # .. 0x10E5F ; No_Block\n 0x10E60, # .. 0x10E7F ; Rumi Numeral Symbols\n 0x10E80, # .. 0x10EBF ; Yezidi\n 0x10EC0, # .. 0x10EFF ; Arabic Extended-C\n 0x10F00, # .. 0x10F2F ; Old Sogdian\n 0x10F30, # .. 0x10F6F ; Sogdian\n 0x10F70, # .. 0x10FAF ; Old Uyghur\n 0x10FB0, # .. 0x10FDF ; Chorasmian\n 0x10FE0, # .. 0x10FFF ; Elymaic\n 0x11000, # .. 0x1107F ; Brahmi\n 0x11080, # .. 0x110CF ; Kaithi\n 0x110D0, # .. 0x110FF ; Sora Sompeng\n 0x11100, # .. 0x1114F ; Chakma\n 0x11150, # .. 0x1117F ; Mahajani\n 0x11180, # .. 0x111DF ; Sharada\n 0x111E0, # .. 0x111FF ; Sinhala Archaic Numbers\n 0x11200, # .. 0x1124F ; Khojki\n 0x11250, # .. 0x1127F ; No_Block\n 0x11280, # .. 0x112AF ; Multani\n 0x112B0, # .. 0x112FF ; Khudawadi\n 0x11300, # .. 0x1137F ; Grantha\n 0x11380, # .. 0x113FF ; Tulu-Tigalari\n 0x11400, # .. 0x1147F ; Newa\n 0x11480, # .. 0x114DF ; Tirhuta\n 0x114E0, # .. 0x1157F ; No_Block\n 0x11580, # .. 0x115FF ; Siddham\n 0x11600, # .. 0x1165F ; Modi\n 0x11660, # .. 0x1167F ; Mongolian Supplement\n 0x11680, # .. 0x116CF ; Takri\n 0x116D0, # .. 0x116FF ; Myanmar Extended-C\n 0x11700, # .. 0x1174F ; Ahom\n 0x11750, # .. 0x117FF ; No_Block\n 0x11800, # .. 0x1184F ; Dogra\n 0x11850, # .. 0x1189F ; No_Block\n 0x118A0, # .. 0x118FF ; Warang Citi\n 0x11900, # .. 0x1195F ; Dives Akuru\n 0x11960, # .. 0x1199F ; No_Block\n 0x119A0, # .. 0x119FF ; Nandinagari\n 0x11A00, # .. 0x11A4F ; Zanabazar Square\n 0x11A50, # .. 0x11AAF ; Soyombo\n 0x11AB0, # .. 0x11ABF ; Unified Canadian Aboriginal Syllabics Extended-A\n 0x11AC0, # .. 0x11AFF ; Pau Cin Hau\n 0x11B00, # .. 0x11B5F ; Devanagari Extended-A\n 0x11B60, # .. 0x11BBF ; No_Block\n 0x11BC0, # .. 0x11BFF ; Sunuwar\n 0x11C00, # .. 0x11C6F ; Bhaiksuki\n 0x11C70, # .. 0x11CBF ; Marchen\n 0x11CC0, # .. 0x11CFF ; No_Block\n 0x11D00, # .. 0x11D5F ; Masaram Gondi\n 0x11D60, # .. 0x11DAF ; Gunjala Gondi\n 0x11DB0, # .. 0x11EDF ; No_Block\n 0x11EE0, # .. 0x11EFF ; Makasar\n 0x11F00, # .. 0x11F5F ; Kawi\n 0x11F60, # .. 0x11FAF ; No_Block\n 0x11FB0, # .. 0x11FBF ; Lisu Supplement\n 0x11FC0, # .. 0x11FFF ; Tamil Supplement\n 0x12000, # .. 0x123FF ; Cuneiform\n 0x12400, # .. 0x1247F ; Cuneiform Numbers and Punctuation\n 0x12480, # .. 0x1254F ; Early Dynastic Cuneiform\n 0x12550, # .. 0x12F8F ; No_Block\n 0x12F90, # .. 0x12FFF ; Cypro-Minoan\n 0x13000, # .. 0x1342F ; Egyptian Hieroglyphs\n 0x13430, # .. 0x1345F ; Egyptian Hieroglyph Format Controls\n 0x13460, # .. 0x143FF ; Egyptian Hieroglyphs Extended-A\n 0x14400, # .. 0x1467F ; Anatolian Hieroglyphs\n 0x14680, # .. 0x160FF ; No_Block\n 0x16100, # .. 0x1613F ; Gurung Khema\n 0x16140, # .. 0x167FF ; No_Block\n 0x16800, # .. 0x16A3F ; Bamum Supplement\n 0x16A40, # .. 0x16A6F ; Mro\n 0x16A70, # .. 0x16ACF ; Tangsa\n 0x16AD0, # .. 0x16AFF ; Bassa Vah\n 0x16B00, # .. 0x16B8F ; Pahawh Hmong\n 0x16B90, # .. 0x16D3F ; No_Block\n 0x16D40, # .. 0x16D7F ; Kirat Rai\n 0x16D80, # .. 0x16E3F ; No_Block\n 0x16E40, # .. 0x16E9F ; Medefaidrin\n 0x16EA0, # .. 0x16EFF ; No_Block\n 0x16F00, # .. 0x16F9F ; Miao\n 0x16FA0, # .. 0x16FDF ; No_Block\n 0x16FE0, # .. 0x16FFF ; Ideographic Symbols and Punctuation\n 0x17000, # .. 0x187FF ; Tangut\n 0x18800, # .. 0x18AFF ; Tangut Components\n 0x18B00, # .. 0x18CFF ; Khitan Small Script\n 0x18D00, # .. 0x18D7F ; Tangut Supplement\n 0x18D80, # .. 0x1AFEF ; No_Block\n 0x1AFF0, # .. 0x1AFFF ; Kana Extended-B\n 0x1B000, # .. 0x1B0FF ; Kana Supplement\n 0x1B100, # .. 0x1B12F ; Kana Extended-A\n 0x1B130, # .. 0x1B16F ; Small Kana Extension\n 0x1B170, # .. 0x1B2FF ; Nushu\n 0x1B300, # .. 0x1BBFF ; No_Block\n 0x1BC00, # .. 0x1BC9F ; Duployan\n 0x1BCA0, # .. 0x1BCAF ; Shorthand Format Controls\n 0x1BCB0, # .. 0x1CBFF ; No_Block\n 0x1CC00, # .. 0x1CEBF ; Symbols for Legacy Computing Supplement\n 0x1CEC0, # .. 0x1CEFF ; No_Block\n 0x1CF00, # .. 0x1CFCF ; Znamenny Musical Notation\n 0x1CFD0, # .. 0x1CFFF ; No_Block\n 0x1D000, # .. 0x1D0FF ; Byzantine Musical Symbols\n 0x1D100, # .. 0x1D1FF ; Musical Symbols\n 0x1D200, # .. 0x1D24F ; Ancient Greek Musical Notation\n 0x1D250, # .. 0x1D2BF ; No_Block\n 0x1D2C0, # .. 0x1D2DF ; Kaktovik Numerals\n 0x1D2E0, # .. 0x1D2FF ; Mayan Numerals\n 0x1D300, # .. 0x1D35F ; Tai Xuan Jing Symbols\n 0x1D360, # .. 0x1D37F ; Counting Rod Numerals\n 0x1D380, # .. 0x1D3FF ; No_Block\n 0x1D400, # .. 0x1D7FF ; Mathematical Alphanumeric Symbols\n 0x1D800, # .. 0x1DAAF ; Sutton SignWriting\n 0x1DAB0, # .. 0x1DEFF ; No_Block\n 0x1DF00, # .. 0x1DFFF ; Latin Extended-G\n 0x1E000, # .. 0x1E02F ; Glagolitic Supplement\n 0x1E030, # .. 0x1E08F ; Cyrillic Extended-D\n 0x1E090, # .. 0x1E0FF ; No_Block\n 0x1E100, # .. 0x1E14F ; Nyiakeng Puachue Hmong\n 0x1E150, # .. 0x1E28F ; No_Block\n 0x1E290, # .. 0x1E2BF ; Toto\n 0x1E2C0, # .. 0x1E2FF ; Wancho\n 0x1E300, # .. 0x1E4CF ; No_Block\n 0x1E4D0, # .. 0x1E4FF ; Nag Mundari\n 0x1E500, # .. 0x1E5CF ; No_Block\n 0x1E5D0, # .. 0x1E5FF ; Ol Onal\n 0x1E600, # .. 0x1E7DF ; No_Block\n 0x1E7E0, # .. 0x1E7FF ; Ethiopic Extended-B\n 0x1E800, # .. 0x1E8DF ; Mende Kikakui\n 0x1E8E0, # .. 0x1E8FF ; No_Block\n 0x1E900, # .. 0x1E95F ; Adlam\n 0x1E960, # .. 0x1EC6F ; No_Block\n 0x1EC70, # .. 0x1ECBF ; Indic Siyaq Numbers\n 0x1ECC0, # .. 0x1ECFF ; No_Block\n 0x1ED00, # .. 0x1ED4F ; Ottoman Siyaq Numbers\n 0x1ED50, # .. 0x1EDFF ; No_Block\n 0x1EE00, # .. 0x1EEFF ; Arabic Mathematical Alphabetic Symbols\n 0x1EF00, # .. 0x1EFFF ; No_Block\n 0x1F000, # .. 0x1F02F ; Mahjong Tiles\n 0x1F030, # .. 0x1F09F ; Domino Tiles\n 0x1F0A0, # .. 0x1F0FF ; Playing Cards\n 0x1F100, # .. 0x1F1FF ; Enclosed Alphanumeric Supplement\n 0x1F200, # .. 0x1F2FF ; Enclosed Ideographic Supplement\n 0x1F300, # .. 0x1F5FF ; Miscellaneous Symbols and Pictographs\n 0x1F600, # .. 0x1F64F ; Emoticons\n 0x1F650, # .. 0x1F67F ; Ornamental Dingbats\n 0x1F680, # .. 0x1F6FF ; Transport and Map Symbols\n 0x1F700, # .. 0x1F77F ; Alchemical Symbols\n 0x1F780, # .. 0x1F7FF ; Geometric Shapes Extended\n 0x1F800, # .. 0x1F8FF ; Supplemental Arrows-C\n 0x1F900, # .. 0x1F9FF ; Supplemental Symbols and Pictographs\n 0x1FA00, # .. 0x1FA6F ; Chess Symbols\n 0x1FA70, # .. 0x1FAFF ; Symbols and Pictographs Extended-A\n 0x1FB00, # .. 0x1FBFF ; Symbols for Legacy Computing\n 0x1FC00, # .. 0x1FFFF ; No_Block\n 0x20000, # .. 0x2A6DF ; CJK Unified Ideographs Extension B\n 0x2A6E0, # .. 0x2A6FF ; No_Block\n 0x2A700, # .. 0x2B73F ; CJK Unified Ideographs Extension C\n 0x2B740, # .. 0x2B81F ; CJK Unified Ideographs Extension D\n 0x2B820, # .. 0x2CEAF ; CJK Unified Ideographs Extension E\n 0x2CEB0, # .. 0x2EBEF ; CJK Unified Ideographs Extension F\n 0x2EBF0, # .. 0x2EE5F ; CJK Unified Ideographs Extension I\n 0x2EE60, # .. 0x2F7FF ; No_Block\n 0x2F800, # .. 0x2FA1F ; CJK Compatibility Ideographs Supplement\n 0x2FA20, # .. 0x2FFFF ; No_Block\n 0x30000, # .. 0x3134F ; CJK Unified Ideographs Extension G\n 0x31350, # .. 0x323AF ; CJK Unified Ideographs Extension H\n 0x323B0, # .. 0xDFFFF ; No_Block\n 0xE0000, # .. 0xE007F ; Tags\n 0xE0080, # .. 0xE00FF ; No_Block\n 0xE0100, # .. 0xE01EF ; Variation Selectors Supplement\n 0xE01F0, # .. 0xEFFFF ; No_Block\n 0xF0000, # .. 0xFFFFF ; Supplementary Private Use Area-A\n 0x100000, # .. 0x10FFFF ; Supplementary Private Use Area-B\n]\n\nVALUES = [\n "Basic Latin", # 0000..007F\n "Latin-1 Supplement", # 0080..00FF\n "Latin Extended-A", # 0100..017F\n "Latin Extended-B", # 0180..024F\n "IPA Extensions", # 0250..02AF\n "Spacing Modifier Letters", # 02B0..02FF\n "Combining Diacritical Marks", # 0300..036F\n "Greek and Coptic", # 0370..03FF\n "Cyrillic", # 0400..04FF\n "Cyrillic Supplement", # 0500..052F\n "Armenian", # 0530..058F\n "Hebrew", # 0590..05FF\n "Arabic", # 0600..06FF\n "Syriac", # 0700..074F\n "Arabic Supplement", # 0750..077F\n "Thaana", # 0780..07BF\n "NKo", # 07C0..07FF\n "Samaritan", # 0800..083F\n "Mandaic", # 0840..085F\n "Syriac Supplement", # 0860..086F\n "Arabic Extended-B", # 0870..089F\n "Arabic Extended-A", # 08A0..08FF\n "Devanagari", # 0900..097F\n "Bengali", # 0980..09FF\n "Gurmukhi", # 0A00..0A7F\n "Gujarati", # 0A80..0AFF\n "Oriya", # 0B00..0B7F\n "Tamil", # 0B80..0BFF\n "Telugu", # 0C00..0C7F\n "Kannada", # 0C80..0CFF\n "Malayalam", # 0D00..0D7F\n "Sinhala", # 0D80..0DFF\n "Thai", # 0E00..0E7F\n "Lao", # 0E80..0EFF\n "Tibetan", # 0F00..0FFF\n "Myanmar", # 1000..109F\n "Georgian", # 10A0..10FF\n "Hangul Jamo", # 1100..11FF\n "Ethiopic", # 1200..137F\n "Ethiopic Supplement", # 1380..139F\n "Cherokee", # 13A0..13FF\n "Unified Canadian Aboriginal Syllabics", # 1400..167F\n "Ogham", # 1680..169F\n "Runic", # 16A0..16FF\n "Tagalog", # 1700..171F\n "Hanunoo", # 1720..173F\n "Buhid", # 1740..175F\n "Tagbanwa", # 1760..177F\n "Khmer", # 1780..17FF\n "Mongolian", # 1800..18AF\n "Unified Canadian Aboriginal Syllabics Extended", # 18B0..18FF\n "Limbu", # 1900..194F\n "Tai Le", # 1950..197F\n "New Tai Lue", # 1980..19DF\n "Khmer Symbols", # 19E0..19FF\n "Buginese", # 1A00..1A1F\n "Tai Tham", # 1A20..1AAF\n "Combining Diacritical Marks Extended", # 1AB0..1AFF\n "Balinese", # 1B00..1B7F\n "Sundanese", # 1B80..1BBF\n "Batak", # 1BC0..1BFF\n "Lepcha", # 1C00..1C4F\n "Ol Chiki", # 1C50..1C7F\n "Cyrillic Extended-C", # 1C80..1C8F\n "Georgian Extended", # 1C90..1CBF\n "Sundanese Supplement", # 1CC0..1CCF\n "Vedic Extensions", # 1CD0..1CFF\n "Phonetic Extensions", # 1D00..1D7F\n "Phonetic Extensions Supplement", # 1D80..1DBF\n "Combining Diacritical Marks Supplement", # 1DC0..1DFF\n "Latin Extended Additional", # 1E00..1EFF\n "Greek Extended", # 1F00..1FFF\n "General Punctuation", # 2000..206F\n "Superscripts and Subscripts", # 2070..209F\n "Currency Symbols", # 20A0..20CF\n "Combining Diacritical Marks for Symbols", # 20D0..20FF\n "Letterlike Symbols", # 2100..214F\n "Number Forms", # 2150..218F\n "Arrows", # 2190..21FF\n "Mathematical Operators", # 2200..22FF\n "Miscellaneous Technical", # 2300..23FF\n "Control Pictures", # 2400..243F\n "Optical Character Recognition", # 2440..245F\n "Enclosed Alphanumerics", # 2460..24FF\n "Box Drawing", # 2500..257F\n "Block Elements", # 2580..259F\n "Geometric Shapes", # 25A0..25FF\n "Miscellaneous Symbols", # 2600..26FF\n "Dingbats", # 2700..27BF\n "Miscellaneous Mathematical Symbols-A", # 27C0..27EF\n "Supplemental Arrows-A", # 27F0..27FF\n "Braille Patterns", # 2800..28FF\n "Supplemental Arrows-B", # 2900..297F\n "Miscellaneous Mathematical Symbols-B", # 2980..29FF\n "Supplemental Mathematical Operators", # 2A00..2AFF\n "Miscellaneous Symbols and Arrows", # 2B00..2BFF\n "Glagolitic", # 2C00..2C5F\n "Latin Extended-C", # 2C60..2C7F\n "Coptic", # 2C80..2CFF\n "Georgian Supplement", # 2D00..2D2F\n "Tifinagh", # 2D30..2D7F\n "Ethiopic Extended", # 2D80..2DDF\n "Cyrillic Extended-A", # 2DE0..2DFF\n "Supplemental Punctuation", # 2E00..2E7F\n "CJK Radicals Supplement", # 2E80..2EFF\n "Kangxi Radicals", # 2F00..2FDF\n "No_Block", # 2FE0..2FEF\n "Ideographic Description Characters", # 2FF0..2FFF\n "CJK Symbols and Punctuation", # 3000..303F\n "Hiragana", # 3040..309F\n "Katakana", # 30A0..30FF\n "Bopomofo", # 3100..312F\n "Hangul Compatibility Jamo", # 3130..318F\n "Kanbun", # 3190..319F\n "Bopomofo Extended", # 31A0..31BF\n "CJK Strokes", # 31C0..31EF\n "Katakana Phonetic Extensions", # 31F0..31FF\n "Enclosed CJK Letters and Months", # 3200..32FF\n "CJK Compatibility", # 3300..33FF\n "CJK Unified Ideographs Extension A", # 3400..4DBF\n "Yijing Hexagram Symbols", # 4DC0..4DFF\n "CJK Unified Ideographs", # 4E00..9FFF\n "Yi Syllables", # A000..A48F\n "Yi Radicals", # A490..A4CF\n "Lisu", # A4D0..A4FF\n "Vai", # A500..A63F\n "Cyrillic Extended-B", # A640..A69F\n "Bamum", # A6A0..A6FF\n "Modifier Tone Letters", # A700..A71F\n "Latin Extended-D", # A720..A7FF\n "Syloti Nagri", # A800..A82F\n "Common Indic Number Forms", # A830..A83F\n "Phags-pa", # A840..A87F\n "Saurashtra", # A880..A8DF\n "Devanagari Extended", # A8E0..A8FF\n "Kayah Li", # A900..A92F\n "Rejang", # A930..A95F\n "Hangul Jamo Extended-A", # A960..A97F\n "Javanese", # A980..A9DF\n "Myanmar Extended-B", # A9E0..A9FF\n "Cham", # AA00..AA5F\n "Myanmar Extended-A", # AA60..AA7F\n "Tai Viet", # AA80..AADF\n "Meetei Mayek Extensions", # AAE0..AAFF\n "Ethiopic Extended-A", # AB00..AB2F\n "Latin Extended-E", # AB30..AB6F\n "Cherokee Supplement", # AB70..ABBF\n "Meetei Mayek", # ABC0..ABFF\n "Hangul Syllables", # AC00..D7AF\n "Hangul Jamo Extended-B", # D7B0..D7FF\n "High Surrogates", # D800..DB7F\n "High Private Use Surrogates", # DB80..DBFF\n "Low Surrogates", # DC00..DFFF\n "Private Use Area", # E000..F8FF\n "CJK Compatibility Ideographs", # F900..FAFF\n "Alphabetic Presentation Forms", # FB00..FB4F\n "Arabic Presentation Forms-A", # FB50..FDFF\n "Variation Selectors", # FE00..FE0F\n "Vertical Forms", # FE10..FE1F\n "Combining Half Marks", # FE20..FE2F\n "CJK Compatibility Forms", # FE30..FE4F\n "Small Form Variants", # FE50..FE6F\n "Arabic Presentation Forms-B", # FE70..FEFF\n "Halfwidth and Fullwidth Forms", # FF00..FFEF\n "Specials", # FFF0..FFFF\n "Linear B Syllabary", # 10000..1007F\n "Linear B Ideograms", # 10080..100FF\n "Aegean Numbers", # 10100..1013F\n "Ancient Greek Numbers", # 10140..1018F\n "Ancient Symbols", # 10190..101CF\n "Phaistos Disc", # 101D0..101FF\n "No_Block", # 10200..1027F\n "Lycian", # 10280..1029F\n "Carian", # 102A0..102DF\n "Coptic Epact Numbers", # 102E0..102FF\n "Old Italic", # 10300..1032F\n "Gothic", # 10330..1034F\n "Old Permic", # 10350..1037F\n "Ugaritic", # 10380..1039F\n "Old Persian", # 103A0..103DF\n "No_Block", # 103E0..103FF\n "Deseret", # 10400..1044F\n "Shavian", # 10450..1047F\n "Osmanya", # 10480..104AF\n "Osage", # 104B0..104FF\n "Elbasan", # 10500..1052F\n "Caucasian Albanian", # 10530..1056F\n "Vithkuqi", # 10570..105BF\n "Todhri", # 105C0..105FF\n "Linear A", # 10600..1077F\n "Latin Extended-F", # 10780..107BF\n "No_Block", # 107C0..107FF\n "Cypriot Syllabary", # 10800..1083F\n "Imperial Aramaic", # 10840..1085F\n "Palmyrene", # 10860..1087F\n "Nabataean", # 10880..108AF\n "No_Block", # 108B0..108DF\n "Hatran", # 108E0..108FF\n "Phoenician", # 10900..1091F\n "Lydian", # 10920..1093F\n "No_Block", # 10940..1097F\n "Meroitic Hieroglyphs", # 10980..1099F\n "Meroitic Cursive", # 109A0..109FF\n "Kharoshthi", # 10A00..10A5F\n "Old South Arabian", # 10A60..10A7F\n "Old North Arabian", # 10A80..10A9F\n "No_Block", # 10AA0..10ABF\n "Manichaean", # 10AC0..10AFF\n "Avestan", # 10B00..10B3F\n "Inscriptional Parthian", # 10B40..10B5F\n "Inscriptional Pahlavi", # 10B60..10B7F\n "Psalter Pahlavi", # 10B80..10BAF\n "No_Block", # 10BB0..10BFF\n "Old Turkic", # 10C00..10C4F\n "No_Block", # 10C50..10C7F\n "Old Hungarian", # 10C80..10CFF\n "Hanifi Rohingya", # 10D00..10D3F\n "Garay", # 10D40..10D8F\n "No_Block", # 10D90..10E5F\n "Rumi Numeral Symbols", # 10E60..10E7F\n "Yezidi", # 10E80..10EBF\n "Arabic Extended-C", # 10EC0..10EFF\n "Old Sogdian", # 10F00..10F2F\n "Sogdian", # 10F30..10F6F\n "Old Uyghur", # 10F70..10FAF\n "Chorasmian", # 10FB0..10FDF\n "Elymaic", # 10FE0..10FFF\n "Brahmi", # 11000..1107F\n "Kaithi", # 11080..110CF\n "Sora Sompeng", # 110D0..110FF\n "Chakma", # 11100..1114F\n "Mahajani", # 11150..1117F\n "Sharada", # 11180..111DF\n "Sinhala Archaic Numbers", # 111E0..111FF\n "Khojki", # 11200..1124F\n "No_Block", # 11250..1127F\n "Multani", # 11280..112AF\n "Khudawadi", # 112B0..112FF\n "Grantha", # 11300..1137F\n "Tulu-Tigalari", # 11380..113FF\n "Newa", # 11400..1147F\n "Tirhuta", # 11480..114DF\n "No_Block", # 114E0..1157F\n "Siddham", # 11580..115FF\n "Modi", # 11600..1165F\n "Mongolian Supplement", # 11660..1167F\n "Takri", # 11680..116CF\n "Myanmar Extended-C", # 116D0..116FF\n "Ahom", # 11700..1174F\n "No_Block", # 11750..117FF\n "Dogra", # 11800..1184F\n "No_Block", # 11850..1189F\n "Warang Citi", # 118A0..118FF\n "Dives Akuru", # 11900..1195F\n "No_Block", # 11960..1199F\n "Nandinagari", # 119A0..119FF\n "Zanabazar Square", # 11A00..11A4F\n "Soyombo", # 11A50..11AAF\n "Unified Canadian Aboriginal Syllabics Extended-A", # 11AB0..11ABF\n "Pau Cin Hau", # 11AC0..11AFF\n "Devanagari Extended-A", # 11B00..11B5F\n "No_Block", # 11B60..11BBF\n "Sunuwar", # 11BC0..11BFF\n "Bhaiksuki", # 11C00..11C6F\n "Marchen", # 11C70..11CBF\n "No_Block", # 11CC0..11CFF\n "Masaram Gondi", # 11D00..11D5F\n "Gunjala Gondi", # 11D60..11DAF\n "No_Block", # 11DB0..11EDF\n "Makasar", # 11EE0..11EFF\n "Kawi", # 11F00..11F5F\n "No_Block", # 11F60..11FAF\n "Lisu Supplement", # 11FB0..11FBF\n "Tamil Supplement", # 11FC0..11FFF\n "Cuneiform", # 12000..123FF\n "Cuneiform Numbers and Punctuation", # 12400..1247F\n "Early Dynastic Cuneiform", # 12480..1254F\n "No_Block", # 12550..12F8F\n "Cypro-Minoan", # 12F90..12FFF\n "Egyptian Hieroglyphs", # 13000..1342F\n "Egyptian Hieroglyph Format Controls", # 13430..1345F\n "Egyptian Hieroglyphs Extended-A", # 13460..143FF\n "Anatolian Hieroglyphs", # 14400..1467F\n "No_Block", # 14680..160FF\n "Gurung Khema", # 16100..1613F\n "No_Block", # 16140..167FF\n "Bamum Supplement", # 16800..16A3F\n "Mro", # 16A40..16A6F\n "Tangsa", # 16A70..16ACF\n "Bassa Vah", # 16AD0..16AFF\n "Pahawh Hmong", # 16B00..16B8F\n "No_Block", # 16B90..16D3F\n "Kirat Rai", # 16D40..16D7F\n "No_Block", # 16D80..16E3F\n "Medefaidrin", # 16E40..16E9F\n "No_Block", # 16EA0..16EFF\n "Miao", # 16F00..16F9F\n "No_Block", # 16FA0..16FDF\n "Ideographic Symbols and Punctuation", # 16FE0..16FFF\n "Tangut", # 17000..187FF\n "Tangut Components", # 18800..18AFF\n "Khitan Small Script", # 18B00..18CFF\n "Tangut Supplement", # 18D00..18D7F\n "No_Block", # 18D80..1AFEF\n "Kana Extended-B", # 1AFF0..1AFFF\n "Kana Supplement", # 1B000..1B0FF\n "Kana Extended-A", # 1B100..1B12F\n "Small Kana Extension", # 1B130..1B16F\n "Nushu", # 1B170..1B2FF\n "No_Block", # 1B300..1BBFF\n "Duployan", # 1BC00..1BC9F\n "Shorthand Format Controls", # 1BCA0..1BCAF\n "No_Block", # 1BCB0..1CBFF\n "Symbols for Legacy Computing Supplement", # 1CC00..1CEBF\n "No_Block", # 1CEC0..1CEFF\n "Znamenny Musical Notation", # 1CF00..1CFCF\n "No_Block", # 1CFD0..1CFFF\n "Byzantine Musical Symbols", # 1D000..1D0FF\n "Musical Symbols", # 1D100..1D1FF\n "Ancient Greek Musical Notation", # 1D200..1D24F\n "No_Block", # 1D250..1D2BF\n "Kaktovik Numerals", # 1D2C0..1D2DF\n "Mayan Numerals", # 1D2E0..1D2FF\n "Tai Xuan Jing Symbols", # 1D300..1D35F\n "Counting Rod Numerals", # 1D360..1D37F\n "No_Block", # 1D380..1D3FF\n "Mathematical Alphanumeric Symbols", # 1D400..1D7FF\n "Sutton SignWriting", # 1D800..1DAAF\n "No_Block", # 1DAB0..1DEFF\n "Latin Extended-G", # 1DF00..1DFFF\n "Glagolitic Supplement", # 1E000..1E02F\n "Cyrillic Extended-D", # 1E030..1E08F\n "No_Block", # 1E090..1E0FF\n "Nyiakeng Puachue Hmong", # 1E100..1E14F\n "No_Block", # 1E150..1E28F\n "Toto", # 1E290..1E2BF\n "Wancho", # 1E2C0..1E2FF\n "No_Block", # 1E300..1E4CF\n "Nag Mundari", # 1E4D0..1E4FF\n "No_Block", # 1E500..1E5CF\n "Ol Onal", # 1E5D0..1E5FF\n "No_Block", # 1E600..1E7DF\n "Ethiopic Extended-B", # 1E7E0..1E7FF\n "Mende Kikakui", # 1E800..1E8DF\n "No_Block", # 1E8E0..1E8FF\n "Adlam", # 1E900..1E95F\n "No_Block", # 1E960..1EC6F\n "Indic Siyaq Numbers", # 1EC70..1ECBF\n "No_Block", # 1ECC0..1ECFF\n "Ottoman Siyaq Numbers", # 1ED00..1ED4F\n "No_Block", # 1ED50..1EDFF\n "Arabic Mathematical Alphabetic Symbols", # 1EE00..1EEFF\n "No_Block", # 1EF00..1EFFF\n "Mahjong Tiles", # 1F000..1F02F\n "Domino Tiles", # 1F030..1F09F\n "Playing Cards", # 1F0A0..1F0FF\n "Enclosed Alphanumeric Supplement", # 1F100..1F1FF\n "Enclosed Ideographic Supplement", # 1F200..1F2FF\n "Miscellaneous Symbols and Pictographs", # 1F300..1F5FF\n "Emoticons", # 1F600..1F64F\n "Ornamental Dingbats", # 1F650..1F67F\n "Transport and Map Symbols", # 1F680..1F6FF\n "Alchemical Symbols", # 1F700..1F77F\n "Geometric Shapes Extended", # 1F780..1F7FF\n "Supplemental Arrows-C", # 1F800..1F8FF\n "Supplemental Symbols and Pictographs", # 1F900..1F9FF\n "Chess Symbols", # 1FA00..1FA6F\n "Symbols and Pictographs Extended-A", # 1FA70..1FAFF\n "Symbols for Legacy Computing", # 1FB00..1FBFF\n "No_Block", # 1FC00..1FFFF\n "CJK Unified Ideographs Extension B", # 20000..2A6DF\n "No_Block", # 2A6E0..2A6FF\n "CJK Unified Ideographs Extension C", # 2A700..2B73F\n "CJK Unified Ideographs Extension D", # 2B740..2B81F\n "CJK Unified Ideographs Extension E", # 2B820..2CEAF\n "CJK Unified Ideographs Extension F", # 2CEB0..2EBEF\n "CJK Unified Ideographs Extension I", # 2EBF0..2EE5F\n "No_Block", # 2EE60..2F7FF\n "CJK Compatibility Ideographs Supplement", # 2F800..2FA1F\n "No_Block", # 2FA20..2FFFF\n "CJK Unified Ideographs Extension G", # 30000..3134F\n "CJK Unified Ideographs Extension H", # 31350..323AF\n "No_Block", # 323B0..DFFFF\n "Tags", # E0000..E007F\n "No_Block", # E0080..E00FF\n "Variation Selectors Supplement", # E0100..E01EF\n "No_Block", # E01F0..EFFFF\n "Supplementary Private Use Area-A", # F0000..FFFFF\n "Supplementary Private Use Area-B", # 100000..10FFFF\n]\n
.venv\Lib\site-packages\fontTools\unicodedata\Blocks.py
Blocks.py
Python
33,216
0.8
0.007491
0.021277
python-kit
365
2024-10-04T04:34:24.774324
BSD-3-Clause
false
ab481a5a3b274187542d550efc52506b
# -*- coding: utf-8 -*-\n#\n# NOTE: The mappings in this file were generated from the command line:\n# cat BidiMirroring.txt | grep "^[0-9A-F]" | sed "s/;//" | awk '{print " 0x"$1": 0x"$2","}'\n#\n# Source: http://www.unicode.org/Public/UNIDATA/BidiMirroring.txt\n# License: http://unicode.org/copyright.html#License\n#\n# BidiMirroring-16.0.0.txt\n# Date: 2024-01-30\n# © 2024 Unicode®, Inc.\n# Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries.\n# For terms of use and license, see https://www.unicode.org/terms_of_use.html\n#\n# Unicode Character Database\n# For documentation, see https://www.unicode.org/reports/tr44/\nMIRRORED = {\n 0x0028: 0x0029,\n 0x0029: 0x0028,\n 0x003C: 0x003E,\n 0x003E: 0x003C,\n 0x005B: 0x005D,\n 0x005D: 0x005B,\n 0x007B: 0x007D,\n 0x007D: 0x007B,\n 0x00AB: 0x00BB,\n 0x00BB: 0x00AB,\n 0x0F3A: 0x0F3B,\n 0x0F3B: 0x0F3A,\n 0x0F3C: 0x0F3D,\n 0x0F3D: 0x0F3C,\n 0x169B: 0x169C,\n 0x169C: 0x169B,\n 0x2039: 0x203A,\n 0x203A: 0x2039,\n 0x2045: 0x2046,\n 0x2046: 0x2045,\n 0x207D: 0x207E,\n 0x207E: 0x207D,\n 0x208D: 0x208E,\n 0x208E: 0x208D,\n 0x2208: 0x220B,\n 0x2209: 0x220C,\n 0x220A: 0x220D,\n 0x220B: 0x2208,\n 0x220C: 0x2209,\n 0x220D: 0x220A,\n 0x2215: 0x29F5,\n 0x221F: 0x2BFE,\n 0x2220: 0x29A3,\n 0x2221: 0x299B,\n 0x2222: 0x29A0,\n 0x2224: 0x2AEE,\n 0x223C: 0x223D,\n 0x223D: 0x223C,\n 0x2243: 0x22CD,\n 0x2245: 0x224C,\n 0x224C: 0x2245,\n 0x2252: 0x2253,\n 0x2253: 0x2252,\n 0x2254: 0x2255,\n 0x2255: 0x2254,\n 0x2264: 0x2265,\n 0x2265: 0x2264,\n 0x2266: 0x2267,\n 0x2267: 0x2266,\n 0x2268: 0x2269,\n 0x2269: 0x2268,\n 0x226A: 0x226B,\n 0x226B: 0x226A,\n 0x226E: 0x226F,\n 0x226F: 0x226E,\n 0x2270: 0x2271,\n 0x2271: 0x2270,\n 0x2272: 0x2273,\n 0x2273: 0x2272,\n 0x2274: 0x2275,\n 0x2275: 0x2274,\n 0x2276: 0x2277,\n 0x2277: 0x2276,\n 0x2278: 0x2279,\n 0x2279: 0x2278,\n 0x227A: 0x227B,\n 0x227B: 0x227A,\n 0x227C: 0x227D,\n 0x227D: 0x227C,\n 0x227E: 0x227F,\n 0x227F: 0x227E,\n 0x2280: 0x2281,\n 0x2281: 0x2280,\n 0x2282: 0x2283,\n 0x2283: 0x2282,\n 0x2284: 0x2285,\n 0x2285: 0x2284,\n 0x2286: 0x2287,\n 0x2287: 0x2286,\n 0x2288: 0x2289,\n 0x2289: 0x2288,\n 0x228A: 0x228B,\n 0x228B: 0x228A,\n 0x228F: 0x2290,\n 0x2290: 0x228F,\n 0x2291: 0x2292,\n 0x2292: 0x2291,\n 0x2298: 0x29B8,\n 0x22A2: 0x22A3,\n 0x22A3: 0x22A2,\n 0x22A6: 0x2ADE,\n 0x22A8: 0x2AE4,\n 0x22A9: 0x2AE3,\n 0x22AB: 0x2AE5,\n 0x22B0: 0x22B1,\n 0x22B1: 0x22B0,\n 0x22B2: 0x22B3,\n 0x22B3: 0x22B2,\n 0x22B4: 0x22B5,\n 0x22B5: 0x22B4,\n 0x22B6: 0x22B7,\n 0x22B7: 0x22B6,\n 0x22B8: 0x27DC,\n 0x22C9: 0x22CA,\n 0x22CA: 0x22C9,\n 0x22CB: 0x22CC,\n 0x22CC: 0x22CB,\n 0x22CD: 0x2243,\n 0x22D0: 0x22D1,\n 0x22D1: 0x22D0,\n 0x22D6: 0x22D7,\n 0x22D7: 0x22D6,\n 0x22D8: 0x22D9,\n 0x22D9: 0x22D8,\n 0x22DA: 0x22DB,\n 0x22DB: 0x22DA,\n 0x22DC: 0x22DD,\n 0x22DD: 0x22DC,\n 0x22DE: 0x22DF,\n 0x22DF: 0x22DE,\n 0x22E0: 0x22E1,\n 0x22E1: 0x22E0,\n 0x22E2: 0x22E3,\n 0x22E3: 0x22E2,\n 0x22E4: 0x22E5,\n 0x22E5: 0x22E4,\n 0x22E6: 0x22E7,\n 0x22E7: 0x22E6,\n 0x22E8: 0x22E9,\n 0x22E9: 0x22E8,\n 0x22EA: 0x22EB,\n 0x22EB: 0x22EA,\n 0x22EC: 0x22ED,\n 0x22ED: 0x22EC,\n 0x22F0: 0x22F1,\n 0x22F1: 0x22F0,\n 0x22F2: 0x22FA,\n 0x22F3: 0x22FB,\n 0x22F4: 0x22FC,\n 0x22F6: 0x22FD,\n 0x22F7: 0x22FE,\n 0x22FA: 0x22F2,\n 0x22FB: 0x22F3,\n 0x22FC: 0x22F4,\n 0x22FD: 0x22F6,\n 0x22FE: 0x22F7,\n 0x2308: 0x2309,\n 0x2309: 0x2308,\n 0x230A: 0x230B,\n 0x230B: 0x230A,\n 0x2329: 0x232A,\n 0x232A: 0x2329,\n 0x2768: 0x2769,\n 0x2769: 0x2768,\n 0x276A: 0x276B,\n 0x276B: 0x276A,\n 0x276C: 0x276D,\n 0x276D: 0x276C,\n 0x276E: 0x276F,\n 0x276F: 0x276E,\n 0x2770: 0x2771,\n 0x2771: 0x2770,\n 0x2772: 0x2773,\n 0x2773: 0x2772,\n 0x2774: 0x2775,\n 0x2775: 0x2774,\n 0x27C3: 0x27C4,\n 0x27C4: 0x27C3,\n 0x27C5: 0x27C6,\n 0x27C6: 0x27C5,\n 0x27C8: 0x27C9,\n 0x27C9: 0x27C8,\n 0x27CB: 0x27CD,\n 0x27CD: 0x27CB,\n 0x27D5: 0x27D6,\n 0x27D6: 0x27D5,\n 0x27DC: 0x22B8,\n 0x27DD: 0x27DE,\n 0x27DE: 0x27DD,\n 0x27E2: 0x27E3,\n 0x27E3: 0x27E2,\n 0x27E4: 0x27E5,\n 0x27E5: 0x27E4,\n 0x27E6: 0x27E7,\n 0x27E7: 0x27E6,\n 0x27E8: 0x27E9,\n 0x27E9: 0x27E8,\n 0x27EA: 0x27EB,\n 0x27EB: 0x27EA,\n 0x27EC: 0x27ED,\n 0x27ED: 0x27EC,\n 0x27EE: 0x27EF,\n 0x27EF: 0x27EE,\n 0x2983: 0x2984,\n 0x2984: 0x2983,\n 0x2985: 0x2986,\n 0x2986: 0x2985,\n 0x2987: 0x2988,\n 0x2988: 0x2987,\n 0x2989: 0x298A,\n 0x298A: 0x2989,\n 0x298B: 0x298C,\n 0x298C: 0x298B,\n 0x298D: 0x2990,\n 0x298E: 0x298F,\n 0x298F: 0x298E,\n 0x2990: 0x298D,\n 0x2991: 0x2992,\n 0x2992: 0x2991,\n 0x2993: 0x2994,\n 0x2994: 0x2993,\n 0x2995: 0x2996,\n 0x2996: 0x2995,\n 0x2997: 0x2998,\n 0x2998: 0x2997,\n 0x299B: 0x2221,\n 0x29A0: 0x2222,\n 0x29A3: 0x2220,\n 0x29A4: 0x29A5,\n 0x29A5: 0x29A4,\n 0x29A8: 0x29A9,\n 0x29A9: 0x29A8,\n 0x29AA: 0x29AB,\n 0x29AB: 0x29AA,\n 0x29AC: 0x29AD,\n 0x29AD: 0x29AC,\n 0x29AE: 0x29AF,\n 0x29AF: 0x29AE,\n 0x29B8: 0x2298,\n 0x29C0: 0x29C1,\n 0x29C1: 0x29C0,\n 0x29C4: 0x29C5,\n 0x29C5: 0x29C4,\n 0x29CF: 0x29D0,\n 0x29D0: 0x29CF,\n 0x29D1: 0x29D2,\n 0x29D2: 0x29D1,\n 0x29D4: 0x29D5,\n 0x29D5: 0x29D4,\n 0x29D8: 0x29D9,\n 0x29D9: 0x29D8,\n 0x29DA: 0x29DB,\n 0x29DB: 0x29DA,\n 0x29E8: 0x29E9,\n 0x29E9: 0x29E8,\n 0x29F5: 0x2215,\n 0x29F8: 0x29F9,\n 0x29F9: 0x29F8,\n 0x29FC: 0x29FD,\n 0x29FD: 0x29FC,\n 0x2A2B: 0x2A2C,\n 0x2A2C: 0x2A2B,\n 0x2A2D: 0x2A2E,\n 0x2A2E: 0x2A2D,\n 0x2A34: 0x2A35,\n 0x2A35: 0x2A34,\n 0x2A3C: 0x2A3D,\n 0x2A3D: 0x2A3C,\n 0x2A64: 0x2A65,\n 0x2A65: 0x2A64,\n 0x2A79: 0x2A7A,\n 0x2A7A: 0x2A79,\n 0x2A7B: 0x2A7C,\n 0x2A7C: 0x2A7B,\n 0x2A7D: 0x2A7E,\n 0x2A7E: 0x2A7D,\n 0x2A7F: 0x2A80,\n 0x2A80: 0x2A7F,\n 0x2A81: 0x2A82,\n 0x2A82: 0x2A81,\n 0x2A83: 0x2A84,\n 0x2A84: 0x2A83,\n 0x2A85: 0x2A86,\n 0x2A86: 0x2A85,\n 0x2A87: 0x2A88,\n 0x2A88: 0x2A87,\n 0x2A89: 0x2A8A,\n 0x2A8A: 0x2A89,\n 0x2A8B: 0x2A8C,\n 0x2A8C: 0x2A8B,\n 0x2A8D: 0x2A8E,\n 0x2A8E: 0x2A8D,\n 0x2A8F: 0x2A90,\n 0x2A90: 0x2A8F,\n 0x2A91: 0x2A92,\n 0x2A92: 0x2A91,\n 0x2A93: 0x2A94,\n 0x2A94: 0x2A93,\n 0x2A95: 0x2A96,\n 0x2A96: 0x2A95,\n 0x2A97: 0x2A98,\n 0x2A98: 0x2A97,\n 0x2A99: 0x2A9A,\n 0x2A9A: 0x2A99,\n 0x2A9B: 0x2A9C,\n 0x2A9C: 0x2A9B,\n 0x2A9D: 0x2A9E,\n 0x2A9E: 0x2A9D,\n 0x2A9F: 0x2AA0,\n 0x2AA0: 0x2A9F,\n 0x2AA1: 0x2AA2,\n 0x2AA2: 0x2AA1,\n 0x2AA6: 0x2AA7,\n 0x2AA7: 0x2AA6,\n 0x2AA8: 0x2AA9,\n 0x2AA9: 0x2AA8,\n 0x2AAA: 0x2AAB,\n 0x2AAB: 0x2AAA,\n 0x2AAC: 0x2AAD,\n 0x2AAD: 0x2AAC,\n 0x2AAF: 0x2AB0,\n 0x2AB0: 0x2AAF,\n 0x2AB1: 0x2AB2,\n 0x2AB2: 0x2AB1,\n 0x2AB3: 0x2AB4,\n 0x2AB4: 0x2AB3,\n 0x2AB5: 0x2AB6,\n 0x2AB6: 0x2AB5,\n 0x2AB7: 0x2AB8,\n 0x2AB8: 0x2AB7,\n 0x2AB9: 0x2ABA,\n 0x2ABA: 0x2AB9,\n 0x2ABB: 0x2ABC,\n 0x2ABC: 0x2ABB,\n 0x2ABD: 0x2ABE,\n 0x2ABE: 0x2ABD,\n 0x2ABF: 0x2AC0,\n 0x2AC0: 0x2ABF,\n 0x2AC1: 0x2AC2,\n 0x2AC2: 0x2AC1,\n 0x2AC3: 0x2AC4,\n 0x2AC4: 0x2AC3,\n 0x2AC5: 0x2AC6,\n 0x2AC6: 0x2AC5,\n 0x2AC7: 0x2AC8,\n 0x2AC8: 0x2AC7,\n 0x2AC9: 0x2ACA,\n 0x2ACA: 0x2AC9,\n 0x2ACB: 0x2ACC,\n 0x2ACC: 0x2ACB,\n 0x2ACD: 0x2ACE,\n 0x2ACE: 0x2ACD,\n 0x2ACF: 0x2AD0,\n 0x2AD0: 0x2ACF,\n 0x2AD1: 0x2AD2,\n 0x2AD2: 0x2AD1,\n 0x2AD3: 0x2AD4,\n 0x2AD4: 0x2AD3,\n 0x2AD5: 0x2AD6,\n 0x2AD6: 0x2AD5,\n 0x2ADE: 0x22A6,\n 0x2AE3: 0x22A9,\n 0x2AE4: 0x22A8,\n 0x2AE5: 0x22AB,\n 0x2AEC: 0x2AED,\n 0x2AED: 0x2AEC,\n 0x2AEE: 0x2224,\n 0x2AF7: 0x2AF8,\n 0x2AF8: 0x2AF7,\n 0x2AF9: 0x2AFA,\n 0x2AFA: 0x2AF9,\n 0x2BFE: 0x221F,\n 0x2E02: 0x2E03,\n 0x2E03: 0x2E02,\n 0x2E04: 0x2E05,\n 0x2E05: 0x2E04,\n 0x2E09: 0x2E0A,\n 0x2E0A: 0x2E09,\n 0x2E0C: 0x2E0D,\n 0x2E0D: 0x2E0C,\n 0x2E1C: 0x2E1D,\n 0x2E1D: 0x2E1C,\n 0x2E20: 0x2E21,\n 0x2E21: 0x2E20,\n 0x2E22: 0x2E23,\n 0x2E23: 0x2E22,\n 0x2E24: 0x2E25,\n 0x2E25: 0x2E24,\n 0x2E26: 0x2E27,\n 0x2E27: 0x2E26,\n 0x2E28: 0x2E29,\n 0x2E29: 0x2E28,\n 0x2E55: 0x2E56,\n 0x2E56: 0x2E55,\n 0x2E57: 0x2E58,\n 0x2E58: 0x2E57,\n 0x2E59: 0x2E5A,\n 0x2E5A: 0x2E59,\n 0x2E5B: 0x2E5C,\n 0x2E5C: 0x2E5B,\n 0x3008: 0x3009,\n 0x3009: 0x3008,\n 0x300A: 0x300B,\n 0x300B: 0x300A,\n 0x300C: 0x300D,\n 0x300D: 0x300C,\n 0x300E: 0x300F,\n 0x300F: 0x300E,\n 0x3010: 0x3011,\n 0x3011: 0x3010,\n 0x3014: 0x3015,\n 0x3015: 0x3014,\n 0x3016: 0x3017,\n 0x3017: 0x3016,\n 0x3018: 0x3019,\n 0x3019: 0x3018,\n 0x301A: 0x301B,\n 0x301B: 0x301A,\n 0xFE59: 0xFE5A,\n 0xFE5A: 0xFE59,\n 0xFE5B: 0xFE5C,\n 0xFE5C: 0xFE5B,\n 0xFE5D: 0xFE5E,\n 0xFE5E: 0xFE5D,\n 0xFE64: 0xFE65,\n 0xFE65: 0xFE64,\n 0xFF08: 0xFF09,\n 0xFF09: 0xFF08,\n 0xFF1C: 0xFF1E,\n 0xFF1E: 0xFF1C,\n 0xFF3B: 0xFF3D,\n 0xFF3D: 0xFF3B,\n 0xFF5B: 0xFF5D,\n 0xFF5D: 0xFF5B,\n 0xFF5F: 0xFF60,\n 0xFF60: 0xFF5F,\n 0xFF62: 0xFF63,\n 0xFF63: 0xFF62,\n}\n
.venv\Lib\site-packages\fontTools\unicodedata\Mirrored.py
Mirrored.py
Python
9,688
0.8
0
0.035874
awesome-app
524
2024-11-28T19:13:25.521302
GPL-3.0
false
9d3a8ba870bce809e05aceed173e50c9
# Data updated to OpenType 1.8.2 as of January 2018.\n\n# Complete list of OpenType script tags at:\n# https://www.microsoft.com/typography/otspec/scripttags.htm\n\n# Most of the script tags are the same as the ISO 15924 tag but lowercased,\n# so we only have to handle the exceptional cases:\n# - KATAKANA and HIRAGANA both map to 'kana';\n# - spaces at the end are preserved, unlike ISO 15924;\n# - we map special script codes for Inherited, Common and Unknown to DFLT.\n\nDEFAULT_SCRIPT = "DFLT"\n\nSCRIPT_ALIASES = {\n "jamo": "hang",\n}\n\nSCRIPT_EXCEPTIONS = {\n "Hira": "kana",\n "Hrkt": "kana",\n "Laoo": "lao ",\n "Yiii": "yi ",\n "Nkoo": "nko ",\n "Vaii": "vai ",\n "Zmth": "math",\n "Zinh": DEFAULT_SCRIPT,\n "Zyyy": DEFAULT_SCRIPT,\n "Zzzz": DEFAULT_SCRIPT,\n}\n\nSCRIPT_EXCEPTIONS_REVERSED = {\n "math": "Zmth",\n}\n\nNEW_SCRIPT_TAGS = {\n "Beng": ("bng2",),\n "Deva": ("dev2",),\n "Gujr": ("gjr2",),\n "Guru": ("gur2",),\n "Knda": ("knd2",),\n "Mlym": ("mlm2",),\n "Orya": ("ory2",),\n "Taml": ("tml2",),\n "Telu": ("tel2",),\n "Mymr": ("mym2",),\n}\n\nNEW_SCRIPT_TAGS_REVERSED = {\n value: key for key, values in NEW_SCRIPT_TAGS.items() for value in values\n}\n
.venv\Lib\site-packages\fontTools\unicodedata\OTTags.py
OTTags.py
Python
1,246
0.8
0.06
0.190476
node-utils
614
2024-06-02T05:40:15.918431
Apache-2.0
false
de452cc94a795f8038752a6e005d8cd7
# -*- coding: utf-8 -*-\n#\n# NOTE: This file was auto-generated with MetaTools/buildUCD.py.\n# Source: https://unicode.org/Public/UNIDATA/ScriptExtensions.txt\n# License: http://unicode.org/copyright.html#License\n#\n# ScriptExtensions-16.0.0.txt\n# Date: 2024-07-30, 19:38:00 GMT\n# © 2024 Unicode®, Inc.\n# Unicode and the Unicode Logo are registered trademarks of Unicode, Inc. in the U.S. and other countries.\n# For terms of use and license, see https://www.unicode.org/terms_of_use.html\n#\n# Unicode Character Database\n# For documentation, see https://www.unicode.org/reports/tr44/\n#\n# The Script_Extensions property indicates which characters are commonly used\n# with more than one script, but with a limited number of scripts.\n# For each code point, there is one or more property values. Each such value is a Script property value.\n# For more information, see:\n# UAX #24, Unicode Script Property: https://www.unicode.org/reports/tr24/\n# Especially the sections:\n# https://www.unicode.org/reports/tr24/#Assignment_Script_Values\n# https://www.unicode.org/reports/tr24/#Assignment_ScriptX_Values\n#\n# Each Script_Extensions value in this file consists of a set\n# of one or more abbreviated Script property values. The ordering of the\n# values in that set is not material, but for stability in presentation\n# it is given here as alphabetical.\n#\n# All code points not explicitly listed for Script_Extensions\n# have as their value the corresponding Script property value.\n#\n# @missing: 0000..10FFFF; <script>\n\nRANGES = [\n 0x0000, # .. 0x00B6 ; None\n 0x00B7, # .. 0x00B7 ; {'Avst', 'Cari', 'Copt', 'Dupl', 'Elba', 'Geor', 'Glag', 'Gong', 'Goth', 'Grek', 'Hani', 'Latn', 'Lydi', 'Mahj', 'Perm', 'Shaw'}\n 0x00B8, # .. 0x02BB ; None\n 0x02BC, # .. 0x02BC ; {'Beng', 'Cyrl', 'Deva', 'Latn', 'Lisu', 'Thai', 'Toto'}\n 0x02BD, # .. 0x02C6 ; None\n 0x02C7, # .. 0x02C7 ; {'Bopo', 'Latn'}\n 0x02C8, # .. 0x02C8 ; None\n 0x02C9, # .. 0x02CB ; {'Bopo', 'Latn'}\n 0x02CC, # .. 0x02CC ; None\n 0x02CD, # .. 0x02CD ; {'Latn', 'Lisu'}\n 0x02CE, # .. 0x02D6 ; None\n 0x02D7, # .. 0x02D7 ; {'Latn', 'Thai'}\n 0x02D8, # .. 0x02D8 ; None\n 0x02D9, # .. 0x02D9 ; {'Bopo', 'Latn'}\n 0x02DA, # .. 0x02FF ; None\n 0x0300, # .. 0x0300 ; {'Cher', 'Copt', 'Cyrl', 'Grek', 'Latn', 'Perm', 'Sunu', 'Tale'}\n 0x0301, # .. 0x0301 ; {'Cher', 'Cyrl', 'Grek', 'Latn', 'Osge', 'Sunu', 'Tale', 'Todr'}\n 0x0302, # .. 0x0302 ; {'Cher', 'Cyrl', 'Latn', 'Tfng'}\n 0x0303, # .. 0x0303 ; {'Glag', 'Latn', 'Sunu', 'Syrc', 'Thai'}\n 0x0304, # .. 0x0304 ; {'Aghb', 'Cher', 'Copt', 'Cyrl', 'Goth', 'Grek', 'Latn', 'Osge', 'Syrc', 'Tfng', 'Todr'}\n 0x0305, # .. 0x0305 ; {'Copt', 'Elba', 'Glag', 'Goth', 'Kana', 'Latn'}\n 0x0306, # .. 0x0306 ; {'Cyrl', 'Grek', 'Latn', 'Perm'}\n 0x0307, # .. 0x0307 ; {'Copt', 'Dupl', 'Hebr', 'Latn', 'Perm', 'Syrc', 'Tale', 'Tfng', 'Todr'}\n 0x0308, # .. 0x0308 ; {'Armn', 'Cyrl', 'Dupl', 'Goth', 'Grek', 'Hebr', 'Latn', 'Perm', 'Syrc', 'Tale'}\n 0x0309, # .. 0x0309 ; {'Latn', 'Tfng'}\n 0x030A, # .. 0x030A ; {'Dupl', 'Latn', 'Syrc'}\n 0x030B, # .. 0x030B ; {'Cher', 'Cyrl', 'Latn', 'Osge'}\n 0x030C, # .. 0x030C ; {'Cher', 'Latn', 'Tale'}\n 0x030D, # .. 0x030D ; {'Latn', 'Sunu'}\n 0x030E, # .. 0x030E ; {'Ethi', 'Latn'}\n 0x030F, # .. 0x030F ; None\n 0x0310, # .. 0x0310 ; {'Latn', 'Sunu'}\n 0x0311, # .. 0x0311 ; {'Cyrl', 'Latn', 'Todr'}\n 0x0312, # .. 0x0312 ; None\n 0x0313, # .. 0x0313 ; {'Grek', 'Latn', 'Perm', 'Todr'}\n 0x0314, # .. 0x031F ; None\n 0x0320, # .. 0x0320 ; {'Latn', 'Syrc'}\n 0x0321, # .. 0x0322 ; None\n 0x0323, # .. 0x0323 ; {'Cher', 'Dupl', 'Kana', 'Latn', 'Syrc'}\n 0x0324, # .. 0x0324 ; {'Cher', 'Dupl', 'Latn', 'Syrc'}\n 0x0325, # .. 0x0325 ; {'Latn', 'Syrc'}\n 0x0326, # .. 0x032C ; None\n 0x032D, # .. 0x032D ; {'Latn', 'Sunu', 'Syrc'}\n 0x032E, # .. 0x032E ; {'Latn', 'Syrc'}\n 0x032F, # .. 0x032F ; None\n 0x0330, # .. 0x0330 ; {'Cher', 'Latn', 'Syrc'}\n 0x0331, # .. 0x0331 ; {'Aghb', 'Cher', 'Goth', 'Latn', 'Sunu', 'Thai'}\n 0x0332, # .. 0x0341 ; None\n 0x0342, # .. 0x0342 ; {'Grek'}\n 0x0343, # .. 0x0344 ; None\n 0x0345, # .. 0x0345 ; {'Grek'}\n 0x0346, # .. 0x0357 ; None\n 0x0358, # .. 0x0358 ; {'Latn', 'Osge'}\n 0x0359, # .. 0x035D ; None\n 0x035E, # .. 0x035E ; {'Aghb', 'Latn', 'Todr'}\n 0x035F, # .. 0x0362 ; None\n 0x0363, # .. 0x036F ; {'Latn'}\n 0x0370, # .. 0x0373 ; None\n 0x0374, # .. 0x0375 ; {'Copt', 'Grek'}\n 0x0376, # .. 0x0482 ; None\n 0x0483, # .. 0x0483 ; {'Cyrl', 'Perm'}\n 0x0484, # .. 0x0484 ; {'Cyrl', 'Glag'}\n 0x0485, # .. 0x0486 ; {'Cyrl', 'Latn'}\n 0x0487, # .. 0x0487 ; {'Cyrl', 'Glag'}\n 0x0488, # .. 0x0588 ; None\n 0x0589, # .. 0x0589 ; {'Armn', 'Geor', 'Glag'}\n 0x058A, # .. 0x060B ; None\n 0x060C, # .. 0x060C ; {'Arab', 'Gara', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}\n 0x060D, # .. 0x061A ; None\n 0x061B, # .. 0x061B ; {'Arab', 'Gara', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}\n 0x061C, # .. 0x061C ; {'Arab', 'Syrc', 'Thaa'}\n 0x061D, # .. 0x061E ; None\n 0x061F, # .. 0x061F ; {'Adlm', 'Arab', 'Gara', 'Nkoo', 'Rohg', 'Syrc', 'Thaa', 'Yezi'}\n 0x0620, # .. 0x063F ; None\n 0x0640, # .. 0x0640 ; {'Adlm', 'Arab', 'Mand', 'Mani', 'Ougr', 'Phlp', 'Rohg', 'Sogd', 'Syrc'}\n 0x0641, # .. 0x064A ; None\n 0x064B, # .. 0x0655 ; {'Arab', 'Syrc'}\n 0x0656, # .. 0x065F ; None\n 0x0660, # .. 0x0669 ; {'Arab', 'Thaa', 'Yezi'}\n 0x066A, # .. 0x066F ; None\n 0x0670, # .. 0x0670 ; {'Arab', 'Syrc'}\n 0x0671, # .. 0x06D3 ; None\n 0x06D4, # .. 0x06D4 ; {'Arab', 'Rohg'}\n 0x06D5, # .. 0x0950 ; None\n 0x0951, # .. 0x0951 ; {'Beng', 'Deva', 'Gran', 'Gujr', 'Guru', 'Knda', 'Latn', 'Mlym', 'Orya', 'Shrd', 'Taml', 'Telu', 'Tirh'}\n 0x0952, # .. 0x0952 ; {'Beng', 'Deva', 'Gran', 'Gujr', 'Guru', 'Knda', 'Latn', 'Mlym', 'Orya', 'Taml', 'Telu', 'Tirh'}\n 0x0953, # .. 0x0963 ; None\n 0x0964, # .. 0x0964 ; {'Beng', 'Deva', 'Dogr', 'Gong', 'Gonm', 'Gran', 'Gujr', 'Guru', 'Knda', 'Mahj', 'Mlym', 'Nand', 'Onao', 'Orya', 'Sind', 'Sinh', 'Sylo', 'Takr', 'Taml', 'Telu', 'Tirh'}\n 0x0965, # .. 0x0965 ; {'Beng', 'Deva', 'Dogr', 'Gong', 'Gonm', 'Gran', 'Gujr', 'Gukh', 'Guru', 'Knda', 'Limb', 'Mahj', 'Mlym', 'Nand', 'Onao', 'Orya', 'Sind', 'Sinh', 'Sylo', 'Takr', 'Taml', 'Telu', 'Tirh'}\n 0x0966, # .. 0x096F ; {'Deva', 'Dogr', 'Kthi', 'Mahj'}\n 0x0970, # .. 0x09E5 ; None\n 0x09E6, # .. 0x09EF ; {'Beng', 'Cakm', 'Sylo'}\n 0x09F0, # .. 0x0A65 ; None\n 0x0A66, # .. 0x0A6F ; {'Guru', 'Mult'}\n 0x0A70, # .. 0x0AE5 ; None\n 0x0AE6, # .. 0x0AEF ; {'Gujr', 'Khoj'}\n 0x0AF0, # .. 0x0BE5 ; None\n 0x0BE6, # .. 0x0BF3 ; {'Gran', 'Taml'}\n 0x0BF4, # .. 0x0CE5 ; None\n 0x0CE6, # .. 0x0CEF ; {'Knda', 'Nand', 'Tutg'}\n 0x0CF0, # .. 0x103F ; None\n 0x1040, # .. 0x1049 ; {'Cakm', 'Mymr', 'Tale'}\n 0x104A, # .. 0x10FA ; None\n 0x10FB, # .. 0x10FB ; {'Geor', 'Glag', 'Latn'}\n 0x10FC, # .. 0x16EA ; None\n 0x16EB, # .. 0x16ED ; {'Runr'}\n 0x16EE, # .. 0x1734 ; None\n 0x1735, # .. 0x1736 ; {'Buhd', 'Hano', 'Tagb', 'Tglg'}\n 0x1737, # .. 0x1801 ; None\n 0x1802, # .. 0x1803 ; {'Mong', 'Phag'}\n 0x1804, # .. 0x1804 ; None\n 0x1805, # .. 0x1805 ; {'Mong', 'Phag'}\n 0x1806, # .. 0x1CCF ; None\n 0x1CD0, # .. 0x1CD0 ; {'Beng', 'Deva', 'Gran', 'Knda'}\n 0x1CD1, # .. 0x1CD1 ; {'Deva'}\n 0x1CD2, # .. 0x1CD2 ; {'Beng', 'Deva', 'Gran', 'Knda'}\n 0x1CD3, # .. 0x1CD3 ; {'Deva', 'Gran', 'Knda'}\n 0x1CD4, # .. 0x1CD4 ; {'Deva'}\n 0x1CD5, # .. 0x1CD6 ; {'Beng', 'Deva'}\n 0x1CD7, # .. 0x1CD7 ; {'Deva', 'Shrd'}\n 0x1CD8, # .. 0x1CD8 ; {'Beng', 'Deva'}\n 0x1CD9, # .. 0x1CD9 ; {'Deva', 'Shrd'}\n 0x1CDA, # .. 0x1CDA ; {'Deva', 'Knda', 'Mlym', 'Orya', 'Taml', 'Telu'}\n 0x1CDB, # .. 0x1CDB ; {'Deva'}\n 0x1CDC, # .. 0x1CDD ; {'Deva', 'Shrd'}\n 0x1CDE, # .. 0x1CDF ; {'Deva'}\n 0x1CE0, # .. 0x1CE0 ; {'Deva', 'Shrd'}\n 0x1CE1, # .. 0x1CE1 ; {'Beng', 'Deva'}\n 0x1CE2, # .. 0x1CE8 ; {'Deva'}\n 0x1CE9, # .. 0x1CE9 ; {'Deva', 'Nand'}\n 0x1CEA, # .. 0x1CEA ; {'Beng', 'Deva'}\n 0x1CEB, # .. 0x1CEC ; {'Deva'}\n 0x1CED, # .. 0x1CED ; {'Beng', 'Deva'}\n 0x1CEE, # .. 0x1CF1 ; {'Deva'}\n 0x1CF2, # .. 0x1CF2 ; {'Beng', 'Deva', 'Gran', 'Knda', 'Mlym', 'Nand', 'Orya', 'Sinh', 'Telu', 'Tirh', 'Tutg'}\n 0x1CF3, # .. 0x1CF3 ; {'Deva', 'Gran'}\n 0x1CF4, # .. 0x1CF4 ; {'Deva', 'Gran', 'Knda', 'Tutg'}\n 0x1CF5, # .. 0x1CF6 ; {'Beng', 'Deva'}\n 0x1CF7, # .. 0x1CF7 ; {'Beng'}\n 0x1CF8, # .. 0x1CF9 ; {'Deva', 'Gran'}\n 0x1CFA, # .. 0x1CFA ; {'Nand'}\n 0x1CFB, # .. 0x1DBF ; None\n 0x1DC0, # .. 0x1DC1 ; {'Grek'}\n 0x1DC2, # .. 0x1DF7 ; None\n 0x1DF8, # .. 0x1DF8 ; {'Cyrl', 'Latn', 'Syrc'}\n 0x1DF9, # .. 0x1DF9 ; None\n 0x1DFA, # .. 0x1DFA ; {'Syrc'}\n 0x1DFB, # .. 0x202E ; None\n 0x202F, # .. 0x202F ; {'Latn', 'Mong', 'Phag'}\n 0x2030, # .. 0x204E ; None\n 0x204F, # .. 0x204F ; {'Adlm', 'Arab'}\n 0x2050, # .. 0x2059 ; None\n 0x205A, # .. 0x205A ; {'Cari', 'Geor', 'Glag', 'Hung', 'Lyci', 'Orkh'}\n 0x205B, # .. 0x205C ; None\n 0x205D, # .. 0x205D ; {'Cari', 'Grek', 'Hung', 'Mero'}\n 0x205E, # .. 0x20EF ; None\n 0x20F0, # .. 0x20F0 ; {'Deva', 'Gran', 'Latn'}\n 0x20F1, # .. 0x2E16 ; None\n 0x2E17, # .. 0x2E17 ; {'Copt', 'Latn'}\n 0x2E18, # .. 0x2E2F ; None\n 0x2E30, # .. 0x2E30 ; {'Avst', 'Orkh'}\n 0x2E31, # .. 0x2E31 ; {'Avst', 'Cari', 'Geor', 'Hung', 'Kthi', 'Lydi', 'Samr'}\n 0x2E32, # .. 0x2E3B ; None\n 0x2E3C, # .. 0x2E3C ; {'Dupl'}\n 0x2E3D, # .. 0x2E40 ; None\n 0x2E41, # .. 0x2E41 ; {'Adlm', 'Arab', 'Hung'}\n 0x2E42, # .. 0x2E42 ; None\n 0x2E43, # .. 0x2E43 ; {'Cyrl', 'Glag'}\n 0x2E44, # .. 0x2FEF ; None\n 0x2FF0, # .. 0x2FFF ; {'Hani', 'Tang'}\n 0x3000, # .. 0x3000 ; None\n 0x3001, # .. 0x3001 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Mong', 'Yiii'}\n 0x3002, # .. 0x3002 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Mong', 'Phag', 'Yiii'}\n 0x3003, # .. 0x3003 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}\n 0x3004, # .. 0x3005 ; None\n 0x3006, # .. 0x3006 ; {'Hani'}\n 0x3007, # .. 0x3007 ; None\n 0x3008, # .. 0x3009 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Mong', 'Tibt', 'Yiii'}\n 0x300A, # .. 0x300B ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Lisu', 'Mong', 'Tibt', 'Yiii'}\n 0x300C, # .. 0x3011 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'}\n 0x3012, # .. 0x3012 ; None\n 0x3013, # .. 0x3013 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}\n 0x3014, # .. 0x301B ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'}\n 0x301C, # .. 0x301F ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}\n 0x3020, # .. 0x3029 ; None\n 0x302A, # .. 0x302D ; {'Bopo', 'Hani'}\n 0x302E, # .. 0x302F ; None\n 0x3030, # .. 0x3030 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}\n 0x3031, # .. 0x3035 ; {'Hira', 'Kana'}\n 0x3036, # .. 0x3036 ; None\n 0x3037, # .. 0x3037 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}\n 0x3038, # .. 0x303B ; None\n 0x303C, # .. 0x303D ; {'Hani', 'Hira', 'Kana'}\n 0x303E, # .. 0x303F ; {'Hani'}\n 0x3040, # .. 0x3098 ; None\n 0x3099, # .. 0x309C ; {'Hira', 'Kana'}\n 0x309D, # .. 0x309F ; None\n 0x30A0, # .. 0x30A0 ; {'Hira', 'Kana'}\n 0x30A1, # .. 0x30FA ; None\n 0x30FB, # .. 0x30FB ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'}\n 0x30FC, # .. 0x30FC ; {'Hira', 'Kana'}\n 0x30FD, # .. 0x318F ; None\n 0x3190, # .. 0x319F ; {'Hani'}\n 0x31A0, # .. 0x31BF ; None\n 0x31C0, # .. 0x31E5 ; {'Hani'}\n 0x31E6, # .. 0x31EE ; None\n 0x31EF, # .. 0x31EF ; {'Hani', 'Tang'}\n 0x31F0, # .. 0x321F ; None\n 0x3220, # .. 0x3247 ; {'Hani'}\n 0x3248, # .. 0x327F ; None\n 0x3280, # .. 0x32B0 ; {'Hani'}\n 0x32B1, # .. 0x32BF ; None\n 0x32C0, # .. 0x32CB ; {'Hani'}\n 0x32CC, # .. 0x32FE ; None\n 0x32FF, # .. 0x32FF ; {'Hani'}\n 0x3300, # .. 0x3357 ; None\n 0x3358, # .. 0x3370 ; {'Hani'}\n 0x3371, # .. 0x337A ; None\n 0x337B, # .. 0x337F ; {'Hani'}\n 0x3380, # .. 0x33DF ; None\n 0x33E0, # .. 0x33FE ; {'Hani'}\n 0x33FF, # .. 0xA66E ; None\n 0xA66F, # .. 0xA66F ; {'Cyrl', 'Glag'}\n 0xA670, # .. 0xA6FF ; None\n 0xA700, # .. 0xA707 ; {'Hani', 'Latn'}\n 0xA708, # .. 0xA82F ; None\n 0xA830, # .. 0xA832 ; {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Knda', 'Kthi', 'Mahj', 'Mlym', 'Modi', 'Nand', 'Shrd', 'Sind', 'Takr', 'Tirh', 'Tutg'}\n 0xA833, # .. 0xA835 ; {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Knda', 'Kthi', 'Mahj', 'Modi', 'Nand', 'Shrd', 'Sind', 'Takr', 'Tirh', 'Tutg'}\n 0xA836, # .. 0xA837 ; {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Kthi', 'Mahj', 'Modi', 'Sind', 'Takr', 'Tirh'}\n 0xA838, # .. 0xA838 ; {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Kthi', 'Mahj', 'Modi', 'Shrd', 'Sind', 'Takr', 'Tirh'}\n 0xA839, # .. 0xA839 ; {'Deva', 'Dogr', 'Gujr', 'Guru', 'Khoj', 'Kthi', 'Mahj', 'Modi', 'Sind', 'Takr', 'Tirh'}\n 0xA83A, # .. 0xA8F0 ; None\n 0xA8F1, # .. 0xA8F1 ; {'Beng', 'Deva', 'Tutg'}\n 0xA8F2, # .. 0xA8F2 ; None\n 0xA8F3, # .. 0xA8F3 ; {'Deva', 'Taml'}\n 0xA8F4, # .. 0xA92D ; None\n 0xA92E, # .. 0xA92E ; {'Kali', 'Latn', 'Mymr'}\n 0xA92F, # .. 0xA9CE ; None\n 0xA9CF, # .. 0xA9CF ; {'Bugi', 'Java'}\n 0xA9D0, # .. 0xFD3D ; None\n 0xFD3E, # .. 0xFD3F ; {'Arab', 'Nkoo'}\n 0xFD40, # .. 0xFDF1 ; None\n 0xFDF2, # .. 0xFDF2 ; {'Arab', 'Thaa'}\n 0xFDF3, # .. 0xFDFC ; None\n 0xFDFD, # .. 0xFDFD ; {'Arab', 'Thaa'}\n 0xFDFE, # .. 0xFE44 ; None\n 0xFE45, # .. 0xFE46 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana'}\n 0xFE47, # .. 0xFF60 ; None\n 0xFF61, # .. 0xFF65 ; {'Bopo', 'Hang', 'Hani', 'Hira', 'Kana', 'Yiii'}\n 0xFF66, # .. 0xFF6F ; None\n 0xFF70, # .. 0xFF70 ; {'Hira', 'Kana'}\n 0xFF71, # .. 0xFF9D ; None\n 0xFF9E, # .. 0xFF9F ; {'Hira', 'Kana'}\n 0xFFA0, # .. 0x100FF ; None\n 0x10100, # .. 0x10101 ; {'Cpmn', 'Cprt', 'Linb'}\n 0x10102, # .. 0x10102 ; {'Cprt', 'Linb'}\n 0x10103, # .. 0x10106 ; None\n 0x10107, # .. 0x10133 ; {'Cprt', 'Lina', 'Linb'}\n 0x10134, # .. 0x10136 ; None\n 0x10137, # .. 0x1013F ; {'Cprt', 'Linb'}\n 0x10140, # .. 0x102DF ; None\n 0x102E0, # .. 0x102FB ; {'Arab', 'Copt'}\n 0x102FC, # .. 0x10AF1 ; None\n 0x10AF2, # .. 0x10AF2 ; {'Mani', 'Ougr'}\n 0x10AF3, # .. 0x11300 ; None\n 0x11301, # .. 0x11301 ; {'Gran', 'Taml'}\n 0x11302, # .. 0x11302 ; None\n 0x11303, # .. 0x11303 ; {'Gran', 'Taml'}\n 0x11304, # .. 0x1133A ; None\n 0x1133B, # .. 0x1133C ; {'Gran', 'Taml'}\n 0x1133D, # .. 0x11FCF ; None\n 0x11FD0, # .. 0x11FD1 ; {'Gran', 'Taml'}\n 0x11FD2, # .. 0x11FD2 ; None\n 0x11FD3, # .. 0x11FD3 ; {'Gran', 'Taml'}\n 0x11FD4, # .. 0x1BC9F ; None\n 0x1BCA0, # .. 0x1BCA3 ; {'Dupl'}\n 0x1BCA4, # .. 0x1D35F ; None\n 0x1D360, # .. 0x1D371 ; {'Hani'}\n 0x1D372, # .. 0x1F24F ; None\n 0x1F250, # .. 0x1F251 ; {'Hani'}\n 0x1F252, # .. 0x10FFFF ; None\n]\n\nVALUES = [\n None, # 0000..00B6\n {\n "Avst",\n "Cari",\n "Copt",\n "Dupl",\n "Elba",\n "Geor",\n "Glag",\n "Gong",\n "Goth",\n "Grek",\n "Hani",\n "Latn",\n "Lydi",\n "Mahj",\n "Perm",\n "Shaw",\n }, # 00B7..00B7\n None, # 00B8..02BB\n {"Beng", "Cyrl", "Deva", "Latn", "Lisu", "Thai", "Toto"}, # 02BC..02BC\n None, # 02BD..02C6\n {"Bopo", "Latn"}, # 02C7..02C7\n None, # 02C8..02C8\n {"Bopo", "Latn"}, # 02C9..02CB\n None, # 02CC..02CC\n {"Latn", "Lisu"}, # 02CD..02CD\n None, # 02CE..02D6\n {"Latn", "Thai"}, # 02D7..02D7\n None, # 02D8..02D8\n {"Bopo", "Latn"}, # 02D9..02D9\n None, # 02DA..02FF\n {"Cher", "Copt", "Cyrl", "Grek", "Latn", "Perm", "Sunu", "Tale"}, # 0300..0300\n {"Cher", "Cyrl", "Grek", "Latn", "Osge", "Sunu", "Tale", "Todr"}, # 0301..0301\n {"Cher", "Cyrl", "Latn", "Tfng"}, # 0302..0302\n {"Glag", "Latn", "Sunu", "Syrc", "Thai"}, # 0303..0303\n {\n "Aghb",\n "Cher",\n "Copt",\n "Cyrl",\n "Goth",\n "Grek",\n "Latn",\n "Osge",\n "Syrc",\n "Tfng",\n "Todr",\n }, # 0304..0304\n {"Copt", "Elba", "Glag", "Goth", "Kana", "Latn"}, # 0305..0305\n {"Cyrl", "Grek", "Latn", "Perm"}, # 0306..0306\n {\n "Copt",\n "Dupl",\n "Hebr",\n "Latn",\n "Perm",\n "Syrc",\n "Tale",\n "Tfng",\n "Todr",\n }, # 0307..0307\n {\n "Armn",\n "Cyrl",\n "Dupl",\n "Goth",\n "Grek",\n "Hebr",\n "Latn",\n "Perm",\n "Syrc",\n "Tale",\n }, # 0308..0308\n {"Latn", "Tfng"}, # 0309..0309\n {"Dupl", "Latn", "Syrc"}, # 030A..030A\n {"Cher", "Cyrl", "Latn", "Osge"}, # 030B..030B\n {"Cher", "Latn", "Tale"}, # 030C..030C\n {"Latn", "Sunu"}, # 030D..030D\n {"Ethi", "Latn"}, # 030E..030E\n None, # 030F..030F\n {"Latn", "Sunu"}, # 0310..0310\n {"Cyrl", "Latn", "Todr"}, # 0311..0311\n None, # 0312..0312\n {"Grek", "Latn", "Perm", "Todr"}, # 0313..0313\n None, # 0314..031F\n {"Latn", "Syrc"}, # 0320..0320\n None, # 0321..0322\n {"Cher", "Dupl", "Kana", "Latn", "Syrc"}, # 0323..0323\n {"Cher", "Dupl", "Latn", "Syrc"}, # 0324..0324\n {"Latn", "Syrc"}, # 0325..0325\n None, # 0326..032C\n {"Latn", "Sunu", "Syrc"}, # 032D..032D\n {"Latn", "Syrc"}, # 032E..032E\n None, # 032F..032F\n {"Cher", "Latn", "Syrc"}, # 0330..0330\n {"Aghb", "Cher", "Goth", "Latn", "Sunu", "Thai"}, # 0331..0331\n None, # 0332..0341\n {"Grek"}, # 0342..0342\n None, # 0343..0344\n {"Grek"}, # 0345..0345\n None, # 0346..0357\n {"Latn", "Osge"}, # 0358..0358\n None, # 0359..035D\n {"Aghb", "Latn", "Todr"}, # 035E..035E\n None, # 035F..0362\n {"Latn"}, # 0363..036F\n None, # 0370..0373\n {"Copt", "Grek"}, # 0374..0375\n None, # 0376..0482\n {"Cyrl", "Perm"}, # 0483..0483\n {"Cyrl", "Glag"}, # 0484..0484\n {"Cyrl", "Latn"}, # 0485..0486\n {"Cyrl", "Glag"}, # 0487..0487\n None, # 0488..0588\n {"Armn", "Geor", "Glag"}, # 0589..0589\n None, # 058A..060B\n {"Arab", "Gara", "Nkoo", "Rohg", "Syrc", "Thaa", "Yezi"}, # 060C..060C\n None, # 060D..061A\n {"Arab", "Gara", "Nkoo", "Rohg", "Syrc", "Thaa", "Yezi"}, # 061B..061B\n {"Arab", "Syrc", "Thaa"}, # 061C..061C\n None, # 061D..061E\n {"Adlm", "Arab", "Gara", "Nkoo", "Rohg", "Syrc", "Thaa", "Yezi"}, # 061F..061F\n None, # 0620..063F\n {\n "Adlm",\n "Arab",\n "Mand",\n "Mani",\n "Ougr",\n "Phlp",\n "Rohg",\n "Sogd",\n "Syrc",\n }, # 0640..0640\n None, # 0641..064A\n {"Arab", "Syrc"}, # 064B..0655\n None, # 0656..065F\n {"Arab", "Thaa", "Yezi"}, # 0660..0669\n None, # 066A..066F\n {"Arab", "Syrc"}, # 0670..0670\n None, # 0671..06D3\n {"Arab", "Rohg"}, # 06D4..06D4\n None, # 06D5..0950\n {\n "Beng",\n "Deva",\n "Gran",\n "Gujr",\n "Guru",\n "Knda",\n "Latn",\n "Mlym",\n "Orya",\n "Shrd",\n "Taml",\n "Telu",\n "Tirh",\n }, # 0951..0951\n {\n "Beng",\n "Deva",\n "Gran",\n "Gujr",\n "Guru",\n "Knda",\n "Latn",\n "Mlym",\n "Orya",\n "Taml",\n "Telu",\n "Tirh",\n }, # 0952..0952\n None, # 0953..0963\n {\n "Beng",\n "Deva",\n "Dogr",\n "Gong",\n "Gonm",\n "Gran",\n "Gujr",\n "Guru",\n "Knda",\n "Mahj",\n "Mlym",\n "Nand",\n "Onao",\n "Orya",\n "Sind",\n "Sinh",\n "Sylo",\n "Takr",\n "Taml",\n "Telu",\n "Tirh",\n }, # 0964..0964\n {\n "Beng",\n "Deva",\n "Dogr",\n "Gong",\n "Gonm",\n "Gran",\n "Gujr",\n "Gukh",\n "Guru",\n "Knda",\n "Limb",\n "Mahj",\n "Mlym",\n "Nand",\n "Onao",\n "Orya",\n "Sind",\n "Sinh",\n "Sylo",\n "Takr",\n "Taml",\n "Telu",\n "Tirh",\n }, # 0965..0965\n {"Deva", "Dogr", "Kthi", "Mahj"}, # 0966..096F\n None, # 0970..09E5\n {"Beng", "Cakm", "Sylo"}, # 09E6..09EF\n None, # 09F0..0A65\n {"Guru", "Mult"}, # 0A66..0A6F\n None, # 0A70..0AE5\n {"Gujr", "Khoj"}, # 0AE6..0AEF\n None, # 0AF0..0BE5\n {"Gran", "Taml"}, # 0BE6..0BF3\n None, # 0BF4..0CE5\n {"Knda", "Nand", "Tutg"}, # 0CE6..0CEF\n None, # 0CF0..103F\n {"Cakm", "Mymr", "Tale"}, # 1040..1049\n None, # 104A..10FA\n {"Geor", "Glag", "Latn"}, # 10FB..10FB\n None, # 10FC..16EA\n {"Runr"}, # 16EB..16ED\n None, # 16EE..1734\n {"Buhd", "Hano", "Tagb", "Tglg"}, # 1735..1736\n None, # 1737..1801\n {"Mong", "Phag"}, # 1802..1803\n None, # 1804..1804\n {"Mong", "Phag"}, # 1805..1805\n None, # 1806..1CCF\n {"Beng", "Deva", "Gran", "Knda"}, # 1CD0..1CD0\n {"Deva"}, # 1CD1..1CD1\n {"Beng", "Deva", "Gran", "Knda"}, # 1CD2..1CD2\n {"Deva", "Gran", "Knda"}, # 1CD3..1CD3\n {"Deva"}, # 1CD4..1CD4\n {"Beng", "Deva"}, # 1CD5..1CD6\n {"Deva", "Shrd"}, # 1CD7..1CD7\n {"Beng", "Deva"}, # 1CD8..1CD8\n {"Deva", "Shrd"}, # 1CD9..1CD9\n {"Deva", "Knda", "Mlym", "Orya", "Taml", "Telu"}, # 1CDA..1CDA\n {"Deva"}, # 1CDB..1CDB\n {"Deva", "Shrd"}, # 1CDC..1CDD\n {"Deva"}, # 1CDE..1CDF\n {"Deva", "Shrd"}, # 1CE0..1CE0\n {"Beng", "Deva"}, # 1CE1..1CE1\n {"Deva"}, # 1CE2..1CE8\n {"Deva", "Nand"}, # 1CE9..1CE9\n {"Beng", "Deva"}, # 1CEA..1CEA\n {"Deva"}, # 1CEB..1CEC\n {"Beng", "Deva"}, # 1CED..1CED\n {"Deva"}, # 1CEE..1CF1\n {\n "Beng",\n "Deva",\n "Gran",\n "Knda",\n "Mlym",\n "Nand",\n "Orya",\n "Sinh",\n "Telu",\n "Tirh",\n "Tutg",\n }, # 1CF2..1CF2\n {"Deva", "Gran"}, # 1CF3..1CF3\n {"Deva", "Gran", "Knda", "Tutg"}, # 1CF4..1CF4\n {"Beng", "Deva"}, # 1CF5..1CF6\n {"Beng"}, # 1CF7..1CF7\n {"Deva", "Gran"}, # 1CF8..1CF9\n {"Nand"}, # 1CFA..1CFA\n None, # 1CFB..1DBF\n {"Grek"}, # 1DC0..1DC1\n None, # 1DC2..1DF7\n {"Cyrl", "Latn", "Syrc"}, # 1DF8..1DF8\n None, # 1DF9..1DF9\n {"Syrc"}, # 1DFA..1DFA\n None, # 1DFB..202E\n {"Latn", "Mong", "Phag"}, # 202F..202F\n None, # 2030..204E\n {"Adlm", "Arab"}, # 204F..204F\n None, # 2050..2059\n {"Cari", "Geor", "Glag", "Hung", "Lyci", "Orkh"}, # 205A..205A\n None, # 205B..205C\n {"Cari", "Grek", "Hung", "Mero"}, # 205D..205D\n None, # 205E..20EF\n {"Deva", "Gran", "Latn"}, # 20F0..20F0\n None, # 20F1..2E16\n {"Copt", "Latn"}, # 2E17..2E17\n None, # 2E18..2E2F\n {"Avst", "Orkh"}, # 2E30..2E30\n {"Avst", "Cari", "Geor", "Hung", "Kthi", "Lydi", "Samr"}, # 2E31..2E31\n None, # 2E32..2E3B\n {"Dupl"}, # 2E3C..2E3C\n None, # 2E3D..2E40\n {"Adlm", "Arab", "Hung"}, # 2E41..2E41\n None, # 2E42..2E42\n {"Cyrl", "Glag"}, # 2E43..2E43\n None, # 2E44..2FEF\n {"Hani", "Tang"}, # 2FF0..2FFF\n None, # 3000..3000\n {"Bopo", "Hang", "Hani", "Hira", "Kana", "Mong", "Yiii"}, # 3001..3001\n {"Bopo", "Hang", "Hani", "Hira", "Kana", "Mong", "Phag", "Yiii"}, # 3002..3002\n {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3003..3003\n None, # 3004..3005\n {"Hani"}, # 3006..3006\n None, # 3007..3007\n {"Bopo", "Hang", "Hani", "Hira", "Kana", "Mong", "Tibt", "Yiii"}, # 3008..3009\n {\n "Bopo",\n "Hang",\n "Hani",\n "Hira",\n "Kana",\n "Lisu",\n "Mong",\n "Tibt",\n "Yiii",\n }, # 300A..300B\n {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 300C..3011\n None, # 3012..3012\n {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3013..3013\n {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 3014..301B\n {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 301C..301F\n None, # 3020..3029\n {"Bopo", "Hani"}, # 302A..302D\n None, # 302E..302F\n {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3030..3030\n {"Hira", "Kana"}, # 3031..3035\n None, # 3036..3036\n {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # 3037..3037\n None, # 3038..303B\n {"Hani", "Hira", "Kana"}, # 303C..303D\n {"Hani"}, # 303E..303F\n None, # 3040..3098\n {"Hira", "Kana"}, # 3099..309C\n None, # 309D..309F\n {"Hira", "Kana"}, # 30A0..30A0\n None, # 30A1..30FA\n {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # 30FB..30FB\n {"Hira", "Kana"}, # 30FC..30FC\n None, # 30FD..318F\n {"Hani"}, # 3190..319F\n None, # 31A0..31BF\n {"Hani"}, # 31C0..31E5\n None, # 31E6..31EE\n {"Hani", "Tang"}, # 31EF..31EF\n None, # 31F0..321F\n {"Hani"}, # 3220..3247\n None, # 3248..327F\n {"Hani"}, # 3280..32B0\n None, # 32B1..32BF\n {"Hani"}, # 32C0..32CB\n None, # 32CC..32FE\n {"Hani"}, # 32FF..32FF\n None, # 3300..3357\n {"Hani"}, # 3358..3370\n None, # 3371..337A\n {"Hani"}, # 337B..337F\n None, # 3380..33DF\n {"Hani"}, # 33E0..33FE\n None, # 33FF..A66E\n {"Cyrl", "Glag"}, # A66F..A66F\n None, # A670..A6FF\n {"Hani", "Latn"}, # A700..A707\n None, # A708..A82F\n {\n "Deva",\n "Dogr",\n "Gujr",\n "Guru",\n "Khoj",\n "Knda",\n "Kthi",\n "Mahj",\n "Mlym",\n "Modi",\n "Nand",\n "Shrd",\n "Sind",\n "Takr",\n "Tirh",\n "Tutg",\n }, # A830..A832\n {\n "Deva",\n "Dogr",\n "Gujr",\n "Guru",\n "Khoj",\n "Knda",\n "Kthi",\n "Mahj",\n "Modi",\n "Nand",\n "Shrd",\n "Sind",\n "Takr",\n "Tirh",\n "Tutg",\n }, # A833..A835\n {\n "Deva",\n "Dogr",\n "Gujr",\n "Guru",\n "Khoj",\n "Kthi",\n "Mahj",\n "Modi",\n "Sind",\n "Takr",\n "Tirh",\n }, # A836..A837\n {\n "Deva",\n "Dogr",\n "Gujr",\n "Guru",\n "Khoj",\n "Kthi",\n "Mahj",\n "Modi",\n "Shrd",\n "Sind",\n "Takr",\n "Tirh",\n }, # A838..A838\n {\n "Deva",\n "Dogr",\n "Gujr",\n "Guru",\n "Khoj",\n "Kthi",\n "Mahj",\n "Modi",\n "Sind",\n "Takr",\n "Tirh",\n }, # A839..A839\n None, # A83A..A8F0\n {"Beng", "Deva", "Tutg"}, # A8F1..A8F1\n None, # A8F2..A8F2\n {"Deva", "Taml"}, # A8F3..A8F3\n None, # A8F4..A92D\n {"Kali", "Latn", "Mymr"}, # A92E..A92E\n None, # A92F..A9CE\n {"Bugi", "Java"}, # A9CF..A9CF\n None, # A9D0..FD3D\n {"Arab", "Nkoo"}, # FD3E..FD3F\n None, # FD40..FDF1\n {"Arab", "Thaa"}, # FDF2..FDF2\n None, # FDF3..FDFC\n {"Arab", "Thaa"}, # FDFD..FDFD\n None, # FDFE..FE44\n {"Bopo", "Hang", "Hani", "Hira", "Kana"}, # FE45..FE46\n None, # FE47..FF60\n {"Bopo", "Hang", "Hani", "Hira", "Kana", "Yiii"}, # FF61..FF65\n None, # FF66..FF6F\n {"Hira", "Kana"}, # FF70..FF70\n None, # FF71..FF9D\n {"Hira", "Kana"}, # FF9E..FF9F\n None, # FFA0..100FF\n {"Cpmn", "Cprt", "Linb"}, # 10100..10101\n {"Cprt", "Linb"}, # 10102..10102\n None, # 10103..10106\n {"Cprt", "Lina", "Linb"}, # 10107..10133\n None, # 10134..10136\n {"Cprt", "Linb"}, # 10137..1013F\n None, # 10140..102DF\n {"Arab", "Copt"}, # 102E0..102FB\n None, # 102FC..10AF1\n {"Mani", "Ougr"}, # 10AF2..10AF2\n None, # 10AF3..11300\n {"Gran", "Taml"}, # 11301..11301\n None, # 11302..11302\n {"Gran", "Taml"}, # 11303..11303\n None, # 11304..1133A\n {"Gran", "Taml"}, # 1133B..1133C\n None, # 1133D..11FCF\n {"Gran", "Taml"}, # 11FD0..11FD1\n None, # 11FD2..11FD2\n {"Gran", "Taml"}, # 11FD3..11FD3\n None, # 11FD4..1BC9F\n {"Dupl"}, # 1BCA0..1BCA3\n None, # 1BCA4..1D35F\n {"Hani"}, # 1D360..1D371\n None, # 1D372..1F24F\n {"Hani"}, # 1F250..1F251\n None, # 1F252..10FFFF\n]\n
.venv\Lib\site-packages\fontTools\unicodedata\ScriptExtensions.py
ScriptExtensions.py
Python
29,033
0.8
0.002421
0.040049
react-lib
421
2024-06-10T10:10:23.581058
MIT
false
8675fb5b81b4c6c61309dc5a82197da6
from __future__ import annotations\n\nfrom fontTools.misc.textTools import byteord, tostr\n\nimport re\nfrom bisect import bisect_right\nfrom typing import Literal, TypeVar, overload\n\n\ntry:\n # use unicodedata backport compatible with python2:\n # https://github.com/fonttools/unicodedata2\n from unicodedata2 import *\nexcept ImportError: # pragma: no cover\n # fall back to built-in unicodedata (possibly outdated)\n from unicodedata import *\n\nfrom . import Blocks, Mirrored, Scripts, ScriptExtensions, OTTags\n\n__all__ = [\n # names from built-in unicodedata module\n "lookup",\n "name",\n "decimal",\n "digit",\n "numeric",\n "category",\n "bidirectional",\n "combining",\n "east_asian_width",\n "mirrored",\n "decomposition",\n "normalize",\n "unidata_version",\n "ucd_3_2_0",\n # additonal functions\n "block",\n "script",\n "script_extension",\n "script_name",\n "script_code",\n "script_horizontal_direction",\n "ot_tags_from_script",\n "ot_tag_to_script",\n]\n\n\ndef mirrored(code):\n """If code (unicode codepoint) has a mirrored version returns it, otherwise None."""\n return Mirrored.MIRRORED.get(code)\n\n\ndef script(char):\n """Return the four-letter script code assigned to the Unicode character\n 'char' as string.\n\n >>> script("a")\n 'Latn'\n >>> script(",")\n 'Zyyy'\n >>> script(chr(0x10FFFF))\n 'Zzzz'\n """\n code = byteord(char)\n # 'bisect_right(a, x, lo=0, hi=len(a))' returns an insertion point which\n # comes after (to the right of) any existing entries of x in a, and it\n # partitions array a into two halves so that, for the left side\n # all(val <= x for val in a[lo:i]), and for the right side\n # all(val > x for val in a[i:hi]).\n # Our 'SCRIPT_RANGES' is a sorted list of ranges (only their starting\n # breakpoints); we want to use `bisect_right` to look up the range that\n # contains the given codepoint: i.e. whose start is less than or equal\n # to the codepoint. Thus, we subtract -1 from the index returned.\n i = bisect_right(Scripts.RANGES, code)\n return Scripts.VALUES[i - 1]\n\n\ndef script_extension(char):\n """Return the script extension property assigned to the Unicode character\n 'char' as a set of string.\n\n >>> script_extension("a") == {'Latn'}\n True\n >>> script_extension(chr(0x060C)) == {'Nkoo', 'Arab', 'Rohg', 'Thaa', 'Syrc', 'Gara', 'Yezi'}\n True\n >>> script_extension(chr(0x10FFFF)) == {'Zzzz'}\n True\n """\n code = byteord(char)\n i = bisect_right(ScriptExtensions.RANGES, code)\n value = ScriptExtensions.VALUES[i - 1]\n if value is None:\n # code points not explicitly listed for Script Extensions\n # have as their value the corresponding Script property value\n return {script(char)}\n return value\n\n\ndef script_name(code, default=KeyError):\n """Return the long, human-readable script name given a four-letter\n Unicode script code.\n\n If no matching name is found, a KeyError is raised by default.\n\n You can use the 'default' argument to return a fallback value (e.g.\n 'Unknown' or None) instead of throwing an error.\n """\n try:\n return str(Scripts.NAMES[code].replace("_", " "))\n except KeyError:\n if isinstance(default, type) and issubclass(default, KeyError):\n raise\n return default\n\n\n_normalize_re = re.compile(r"[-_ ]+")\n\n\ndef _normalize_property_name(string):\n """Remove case, strip space, '-' and '_' for loose matching."""\n return _normalize_re.sub("", string).lower()\n\n\n_SCRIPT_CODES = {_normalize_property_name(v): k for k, v in Scripts.NAMES.items()}\n\n\ndef script_code(script_name, default=KeyError):\n """Returns the four-letter Unicode script code from its long name\n\n If no matching script code is found, a KeyError is raised by default.\n\n You can use the 'default' argument to return a fallback string (e.g.\n 'Zzzz' or None) instead of throwing an error.\n """\n normalized_name = _normalize_property_name(script_name)\n try:\n return _SCRIPT_CODES[normalized_name]\n except KeyError:\n if isinstance(default, type) and issubclass(default, KeyError):\n raise\n return default\n\n\n# The data on script direction is taken from Harfbuzz source code:\n# https://github.com/harfbuzz/harfbuzz/blob/3.2.0/src/hb-common.cc#L514-L613\n# This in turn references the following "Script_Metadata" document:\n# https://docs.google.com/spreadsheets/d/1Y90M0Ie3MUJ6UVCRDOypOtijlMDLNNyyLk36T6iMu0o\nRTL_SCRIPTS = {\n # Unicode-1.1 additions\n "Arab", # Arabic\n "Hebr", # Hebrew\n # Unicode-3.0 additions\n "Syrc", # Syriac\n "Thaa", # Thaana\n # Unicode-4.0 additions\n "Cprt", # Cypriot\n # Unicode-4.1 additions\n "Khar", # Kharoshthi\n # Unicode-5.0 additions\n "Phnx", # Phoenician\n "Nkoo", # Nko\n # Unicode-5.1 additions\n "Lydi", # Lydian\n # Unicode-5.2 additions\n "Avst", # Avestan\n "Armi", # Imperial Aramaic\n "Phli", # Inscriptional Pahlavi\n "Prti", # Inscriptional Parthian\n "Sarb", # Old South Arabian\n "Orkh", # Old Turkic\n "Samr", # Samaritan\n # Unicode-6.0 additions\n "Mand", # Mandaic\n # Unicode-6.1 additions\n "Merc", # Meroitic Cursive\n "Mero", # Meroitic Hieroglyphs\n # Unicode-7.0 additions\n "Mani", # Manichaean\n "Mend", # Mende Kikakui\n "Nbat", # Nabataean\n "Narb", # Old North Arabian\n "Palm", # Palmyrene\n "Phlp", # Psalter Pahlavi\n # Unicode-8.0 additions\n "Hatr", # Hatran\n "Hung", # Old Hungarian\n # Unicode-9.0 additions\n "Adlm", # Adlam\n # Unicode-11.0 additions\n "Rohg", # Hanifi Rohingya\n "Sogo", # Old Sogdian\n "Sogd", # Sogdian\n # Unicode-12.0 additions\n "Elym", # Elymaic\n # Unicode-13.0 additions\n "Chrs", # Chorasmian\n "Yezi", # Yezidi\n # Unicode-14.0 additions\n "Ougr", # Old Uyghur\n}\n\n\nHorizDirection = Literal["RTL", "LTR"]\nT = TypeVar("T")\n\n\n@overload\ndef script_horizontal_direction(script_code: str, default: T) -> HorizDirection | T: ...\n\n\n@overload\ndef script_horizontal_direction(\n script_code: str, default: type[KeyError] = KeyError\n) -> HorizDirection: ...\n\n\ndef script_horizontal_direction(\n script_code: str, default: T | type[KeyError] = KeyError\n) -> HorizDirection | T:\n """Return "RTL" for scripts that contain right-to-left characters\n according to the Bidi_Class property. Otherwise return "LTR".\n """\n if script_code not in Scripts.NAMES:\n if isinstance(default, type) and issubclass(default, KeyError):\n raise default(script_code)\n return default\n return "RTL" if script_code in RTL_SCRIPTS else "LTR"\n\n\ndef block(char):\n """Return the block property assigned to the Unicode character 'char'\n as a string.\n\n >>> block("a")\n 'Basic Latin'\n >>> block(chr(0x060C))\n 'Arabic'\n >>> block(chr(0xEFFFF))\n 'No_Block'\n """\n code = byteord(char)\n i = bisect_right(Blocks.RANGES, code)\n return Blocks.VALUES[i - 1]\n\n\ndef ot_tags_from_script(script_code):\n """Return a list of OpenType script tags associated with a given\n Unicode script code.\n Return ['DFLT'] script tag for invalid/unknown script codes.\n """\n if script_code in OTTags.SCRIPT_EXCEPTIONS:\n return [OTTags.SCRIPT_EXCEPTIONS[script_code]]\n\n if script_code not in Scripts.NAMES:\n return [OTTags.DEFAULT_SCRIPT]\n\n script_tags = [script_code[0].lower() + script_code[1:]]\n if script_code in OTTags.NEW_SCRIPT_TAGS:\n script_tags.extend(OTTags.NEW_SCRIPT_TAGS[script_code])\n script_tags.reverse() # last in, first out\n\n return script_tags\n\n\ndef ot_tag_to_script(tag):\n """Return the Unicode script code for the given OpenType script tag, or\n None for "DFLT" tag or if there is no Unicode script associated with it.\n Raises ValueError if the tag is invalid.\n """\n tag = tostr(tag).strip()\n if not tag or " " in tag or len(tag) > 4:\n raise ValueError("invalid OpenType tag: %r" % tag)\n\n if tag in OTTags.SCRIPT_ALIASES:\n tag = OTTags.SCRIPT_ALIASES[tag]\n\n while len(tag) != 4:\n tag += str(" ") # pad with spaces\n\n if tag == OTTags.DEFAULT_SCRIPT:\n # it's unclear which Unicode script the "DFLT" OpenType tag maps to,\n # so here we return None\n return None\n\n if tag in OTTags.NEW_SCRIPT_TAGS_REVERSED:\n return OTTags.NEW_SCRIPT_TAGS_REVERSED[tag]\n\n if tag in OTTags.SCRIPT_EXCEPTIONS_REVERSED:\n return OTTags.SCRIPT_EXCEPTIONS_REVERSED[tag]\n\n # This side of the conversion is fully algorithmic\n\n # Any spaces at the end of the tag are replaced by repeating the last\n # letter. Eg 'nko ' -> 'Nkoo'.\n # Change first char to uppercase\n script_code = tag[0].upper() + tag[1]\n for i in range(2, 4):\n script_code += script_code[i - 1] if tag[i] == " " else tag[i]\n\n if script_code not in Scripts.NAMES:\n return None\n return script_code\n
.venv\Lib\site-packages\fontTools\unicodedata\__init__.py
__init__.py
Python
9,285
0.95
0.152318
0.170732
react-lib
281
2025-02-26T02:06:58.038095
MIT
false
639a3f3f594bb86413cf79afbd35168e
\n\n
.venv\Lib\site-packages\fontTools\unicodedata\__pycache__\Blocks.cpython-313.pyc
Blocks.cpython-313.pyc
Other
8,445
0.8
0.081081
0
node-utils
31
2024-05-27T20:54:33.300662
GPL-3.0
false
3b97090145eb85b2116831e1d14d85d4
\n\n
.venv\Lib\site-packages\fontTools\unicodedata\__pycache__\Mirrored.cpython-313.pyc
Mirrored.cpython-313.pyc
Other
12,047
0.8
0.009091
0.009174
node-utils
904
2024-07-28T05:20:29.428857
Apache-2.0
false
d7ab6bd495d8476b28fd963394d7e668
\n\n
.venv\Lib\site-packages\fontTools\unicodedata\__pycache__\OTTags.cpython-313.pyc
OTTags.cpython-313.pyc
Other
965
0.8
0
0
awesome-app
916
2023-10-15T15:27:27.169456
Apache-2.0
false
9f19074bfb2614d5b983688ee71d64c3
\n\n
.venv\Lib\site-packages\fontTools\unicodedata\__pycache__\ScriptExtensions.cpython-313.pyc
ScriptExtensions.cpython-313.pyc
Other
10,193
0.8
0.008287
0
python-kit
965
2024-03-28T15:08:20.096805
MIT
false
673d097ce06ddc6d2412becf551021c5
\n\n
.venv\Lib\site-packages\fontTools\unicodedata\__pycache__\Scripts.cpython-313.pyc
Scripts.cpython-313.pyc
Other
23,906
0.8
0.076023
0
python-kit
691
2024-09-29T05:42:06.236871
MIT
false
33212439e8246dde216039de8fb7b5c3
\n\n
.venv\Lib\site-packages\fontTools\unicodedata\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
8,588
0.8
0.067961
0
awesome-app
409
2023-11-19T16:43:29.771260
MIT
false
ad6ce990c192a1fa5ac9e591a7678465
from fontTools.varLib import _add_avar, load_designspace\nfrom fontTools.varLib.models import VariationModel\nfrom fontTools.varLib.varStore import VarStoreInstancer\nfrom fontTools.misc.fixedTools import fixedToFloat as fi2fl\nfrom fontTools.misc.cliTools import makeOutputFileName\nfrom itertools import product\nimport logging\n\nlog = logging.getLogger("fontTools.varLib.avar")\n\n\ndef _denormalize(v, axis):\n if v >= 0:\n return axis.defaultValue + v * (axis.maxValue - axis.defaultValue)\n else:\n return axis.defaultValue + v * (axis.defaultValue - axis.minValue)\n\n\ndef _pruneLocations(locations, poles, axisTags):\n # Now we have all the input locations, find which ones are\n # not needed and remove them.\n\n # Note: This algorithm is heavily tied to how VariationModel\n # is implemented. It assumes that input was extracted from\n # VariationModel-generated object, like an ItemVariationStore\n # created by fontmake using varLib.models.VariationModel.\n # Some CoPilot blabbering:\n # I *think* I can prove that this algorithm is correct, but\n # I'm not 100% sure. It's possible that there are edge cases\n # where this algorithm will fail. I'm not sure how to prove\n # that it's correct, but I'm also not sure how to prove that\n # it's incorrect. I'm not sure how to write a test case that\n # would prove that it's incorrect. I'm not sure how to write\n # a test case that would prove that it's correct.\n\n model = VariationModel(locations, axisTags)\n modelMapping = model.mapping\n modelSupports = model.supports\n pins = {tuple(k.items()): None for k in poles}\n for location in poles:\n i = locations.index(location)\n i = modelMapping[i]\n support = modelSupports[i]\n supportAxes = set(support.keys())\n for axisTag, (minV, _, maxV) in support.items():\n for v in (minV, maxV):\n if v in (-1, 0, 1):\n continue\n for pin in pins.keys():\n pinLocation = dict(pin)\n pinAxes = set(pinLocation.keys())\n if pinAxes != supportAxes:\n continue\n if axisTag not in pinAxes:\n continue\n if pinLocation[axisTag] == v:\n break\n else:\n # No pin found. Go through the previous masters\n # and find a suitable pin. Going backwards is\n # better because it can find a pin that is close\n # to the pole in more dimensions, and reducing\n # the total number of pins needed.\n for candidateIdx in range(i - 1, -1, -1):\n candidate = modelSupports[candidateIdx]\n candidateAxes = set(candidate.keys())\n if candidateAxes != supportAxes:\n continue\n if axisTag not in candidateAxes:\n continue\n candidate = {\n k: defaultV for k, (_, defaultV, _) in candidate.items()\n }\n if candidate[axisTag] == v:\n pins[tuple(candidate.items())] = None\n break\n else:\n assert False, "No pin found"\n return [dict(t) for t in pins.keys()]\n\n\ndef mappings_from_avar(font, denormalize=True):\n fvarAxes = font["fvar"].axes\n axisMap = {a.axisTag: a for a in fvarAxes}\n axisTags = [a.axisTag for a in fvarAxes]\n axisIndexes = {a.axisTag: i for i, a in enumerate(fvarAxes)}\n if "avar" not in font:\n return {}, {}\n avar = font["avar"]\n axisMaps = {\n tag: seg\n for tag, seg in avar.segments.items()\n if seg and seg != {-1: -1, 0: 0, 1: 1}\n }\n mappings = []\n\n if getattr(avar, "majorVersion", 1) == 2:\n varStore = avar.table.VarStore\n regions = varStore.VarRegionList.Region\n\n # Find all the input locations; this finds "poles", that are\n # locations of the peaks, and "corners", that are locations\n # of the corners of the regions. These two sets of locations\n # together constitute inputLocations to consider.\n\n poles = {(): None} # Just using it as an ordered set\n inputLocations = set({()})\n for varData in varStore.VarData:\n regionIndices = varData.VarRegionIndex\n for regionIndex in regionIndices:\n peakLocation = []\n corners = []\n region = regions[regionIndex]\n for axisIndex, axis in enumerate(region.VarRegionAxis):\n if axis.PeakCoord == 0:\n continue\n axisTag = axisTags[axisIndex]\n peakLocation.append((axisTag, axis.PeakCoord))\n corner = []\n if axis.StartCoord != 0:\n corner.append((axisTag, axis.StartCoord))\n if axis.EndCoord != 0:\n corner.append((axisTag, axis.EndCoord))\n corners.append(corner)\n corners = set(product(*corners))\n peakLocation = tuple(peakLocation)\n poles[peakLocation] = None\n inputLocations.add(peakLocation)\n inputLocations.update(corners)\n\n # Sort them by number of axes, then by axis order\n inputLocations = [\n dict(t)\n for t in sorted(\n inputLocations,\n key=lambda t: (len(t), tuple(axisIndexes[tag] for tag, _ in t)),\n )\n ]\n poles = [dict(t) for t in poles.keys()]\n inputLocations = _pruneLocations(inputLocations, list(poles), axisTags)\n\n # Find the output locations, at input locations\n varIdxMap = avar.table.VarIdxMap\n instancer = VarStoreInstancer(varStore, fvarAxes)\n for location in inputLocations:\n instancer.setLocation(location)\n outputLocation = {}\n for axisIndex, axisTag in enumerate(axisTags):\n varIdx = axisIndex\n if varIdxMap is not None:\n varIdx = varIdxMap[varIdx]\n delta = instancer[varIdx]\n if delta != 0:\n v = location.get(axisTag, 0)\n v = v + fi2fl(delta, 14)\n # See https://github.com/fonttools/fonttools/pull/3598#issuecomment-2266082009\n # v = max(-1, min(1, v))\n outputLocation[axisTag] = v\n mappings.append((location, outputLocation))\n\n # Remove base master we added, if it maps to the default location\n assert mappings[0][0] == {}\n if mappings[0][1] == {}:\n mappings.pop(0)\n\n if denormalize:\n for tag, seg in axisMaps.items():\n if tag not in axisMap:\n raise ValueError(f"Unknown axis tag {tag}")\n denorm = lambda v: _denormalize(v, axisMap[tag])\n axisMaps[tag] = {denorm(k): denorm(v) for k, v in seg.items()}\n\n for i, (inputLoc, outputLoc) in enumerate(mappings):\n inputLoc = {\n tag: _denormalize(val, axisMap[tag]) for tag, val in inputLoc.items()\n }\n outputLoc = {\n tag: _denormalize(val, axisMap[tag]) for tag, val in outputLoc.items()\n }\n mappings[i] = (inputLoc, outputLoc)\n\n return axisMaps, mappings\n\n\ndef main(args=None):\n """Add `avar` table from designspace file to variable font."""\n\n if args is None:\n import sys\n\n args = sys.argv[1:]\n\n from fontTools import configLogger\n from fontTools.ttLib import TTFont\n from fontTools.designspaceLib import DesignSpaceDocument\n import argparse\n\n parser = argparse.ArgumentParser(\n "fonttools varLib.avar",\n description="Add `avar` table from designspace file to variable font.",\n )\n parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")\n parser.add_argument(\n "designspace",\n metavar="family.designspace",\n help="Designspace file.",\n nargs="?",\n default=None,\n )\n parser.add_argument(\n "-o",\n "--output-file",\n type=str,\n help="Output font file name.",\n )\n parser.add_argument(\n "-v", "--verbose", action="store_true", help="Run more verbosely."\n )\n\n options = parser.parse_args(args)\n\n configLogger(level=("INFO" if options.verbose else "WARNING"))\n\n font = TTFont(options.font)\n if not "fvar" in font:\n log.error("Not a variable font.")\n return 1\n\n if options.designspace is None:\n from pprint import pprint\n\n segments, mappings = mappings_from_avar(font)\n pprint(segments)\n pprint(mappings)\n print(len(mappings), "mappings")\n return\n\n axisTags = [a.axisTag for a in font["fvar"].axes]\n\n ds = load_designspace(options.designspace, require_sources=False)\n\n if "avar" in font:\n log.warning("avar table already present, overwriting.")\n del font["avar"]\n\n _add_avar(font, ds.axes, ds.axisMappings, axisTags)\n\n if options.output_file is None:\n outfile = makeOutputFileName(options.font, overWrite=True, suffix=".avar")\n else:\n outfile = options.output_file\n if outfile:\n log.info("Saving %s", outfile)\n font.save(outfile)\n\n\nif __name__ == "__main__":\n import sys\n\n sys.exit(main())\n
.venv\Lib\site-packages\fontTools\varLib\avar.py
avar.py
Python
9,907
0.95
0.223077
0.125561
python-kit
762
2024-11-14T01:24:44.186511
MIT
false
969de40593869117d3d0ac1c0919f3bf
from fontTools.ttLib import newTable\nfrom fontTools.ttLib.tables._f_v_a_r import Axis as fvarAxis\nfrom fontTools.pens.areaPen import AreaPen\nfrom fontTools.pens.basePen import NullPen\nfrom fontTools.pens.statisticsPen import StatisticsPen\nfrom fontTools.varLib.models import piecewiseLinearMap, normalizeValue\nfrom fontTools.misc.cliTools import makeOutputFileName\nimport math\nimport logging\nfrom pprint import pformat\n\n__all__ = [\n "planWeightAxis",\n "planWidthAxis",\n "planSlantAxis",\n "planOpticalSizeAxis",\n "planAxis",\n "sanitizeWeight",\n "sanitizeWidth",\n "sanitizeSlant",\n "measureWeight",\n "measureWidth",\n "measureSlant",\n "normalizeLinear",\n "normalizeLog",\n "normalizeDegrees",\n "interpolateLinear",\n "interpolateLog",\n "processAxis",\n "makeDesignspaceSnippet",\n "addEmptyAvar",\n "main",\n]\n\nlog = logging.getLogger("fontTools.varLib.avarPlanner")\n\nWEIGHTS = [\n 50,\n 100,\n 150,\n 200,\n 250,\n 300,\n 350,\n 400,\n 450,\n 500,\n 550,\n 600,\n 650,\n 700,\n 750,\n 800,\n 850,\n 900,\n 950,\n]\n\nWIDTHS = [\n 25.0,\n 37.5,\n 50.0,\n 62.5,\n 75.0,\n 87.5,\n 100.0,\n 112.5,\n 125.0,\n 137.5,\n 150.0,\n 162.5,\n 175.0,\n 187.5,\n 200.0,\n]\n\nSLANTS = list(math.degrees(math.atan(d / 20.0)) for d in range(-20, 21))\n\nSIZES = [\n 5,\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n 14,\n 18,\n 24,\n 30,\n 36,\n 48,\n 60,\n 72,\n 96,\n 120,\n 144,\n 192,\n 240,\n 288,\n]\n\n\nSAMPLES = 8\n\n\ndef normalizeLinear(value, rangeMin, rangeMax):\n """Linearly normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation."""\n return (value - rangeMin) / (rangeMax - rangeMin)\n\n\ndef interpolateLinear(t, a, b):\n """Linear interpolation between a and b, with t typically in [0, 1]."""\n return a + t * (b - a)\n\n\ndef normalizeLog(value, rangeMin, rangeMax):\n """Logarithmically normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation."""\n logMin = math.log(rangeMin)\n logMax = math.log(rangeMax)\n return (math.log(value) - logMin) / (logMax - logMin)\n\n\ndef interpolateLog(t, a, b):\n """Logarithmic interpolation between a and b, with t typically in [0, 1]."""\n logA = math.log(a)\n logB = math.log(b)\n return math.exp(logA + t * (logB - logA))\n\n\ndef normalizeDegrees(value, rangeMin, rangeMax):\n """Angularly normalize value in [rangeMin, rangeMax] to [0, 1], with extrapolation."""\n tanMin = math.tan(math.radians(rangeMin))\n tanMax = math.tan(math.radians(rangeMax))\n return (math.tan(math.radians(value)) - tanMin) / (tanMax - tanMin)\n\n\ndef measureWeight(glyphset, glyphs=None):\n """Measure the perceptual average weight of the given glyphs."""\n if isinstance(glyphs, dict):\n frequencies = glyphs\n else:\n frequencies = {g: 1 for g in glyphs}\n\n wght_sum = wdth_sum = 0\n for glyph_name in glyphs:\n if frequencies is not None:\n frequency = frequencies.get(glyph_name, 0)\n if frequency == 0:\n continue\n else:\n frequency = 1\n\n glyph = glyphset[glyph_name]\n\n pen = AreaPen(glyphset=glyphset)\n glyph.draw(pen)\n\n mult = glyph.width * frequency\n wght_sum += mult * abs(pen.value)\n wdth_sum += mult\n\n return wght_sum / wdth_sum\n\n\ndef measureWidth(glyphset, glyphs=None):\n """Measure the average width of the given glyphs."""\n if isinstance(glyphs, dict):\n frequencies = glyphs\n else:\n frequencies = {g: 1 for g in glyphs}\n\n wdth_sum = 0\n freq_sum = 0\n for glyph_name in glyphs:\n if frequencies is not None:\n frequency = frequencies.get(glyph_name, 0)\n if frequency == 0:\n continue\n else:\n frequency = 1\n\n glyph = glyphset[glyph_name]\n\n pen = NullPen()\n glyph.draw(pen)\n\n wdth_sum += glyph.width * frequency\n freq_sum += frequency\n\n return wdth_sum / freq_sum\n\n\ndef measureSlant(glyphset, glyphs=None):\n """Measure the perceptual average slant angle of the given glyphs."""\n if isinstance(glyphs, dict):\n frequencies = glyphs\n else:\n frequencies = {g: 1 for g in glyphs}\n\n slnt_sum = 0\n freq_sum = 0\n for glyph_name in glyphs:\n if frequencies is not None:\n frequency = frequencies.get(glyph_name, 0)\n if frequency == 0:\n continue\n else:\n frequency = 1\n\n glyph = glyphset[glyph_name]\n\n pen = StatisticsPen(glyphset=glyphset)\n glyph.draw(pen)\n\n mult = glyph.width * frequency\n slnt_sum += mult * pen.slant\n freq_sum += mult\n\n return -math.degrees(math.atan(slnt_sum / freq_sum))\n\n\ndef sanitizeWidth(userTriple, designTriple, pins, measurements):\n """Sanitize the width axis limits."""\n\n minVal, defaultVal, maxVal = (\n measurements[designTriple[0]],\n measurements[designTriple[1]],\n measurements[designTriple[2]],\n )\n\n calculatedMinVal = userTriple[1] * (minVal / defaultVal)\n calculatedMaxVal = userTriple[1] * (maxVal / defaultVal)\n\n log.info("Original width axis limits: %g:%g:%g", *userTriple)\n log.info(\n "Calculated width axis limits: %g:%g:%g",\n calculatedMinVal,\n userTriple[1],\n calculatedMaxVal,\n )\n\n if (\n abs(calculatedMinVal - userTriple[0]) / userTriple[1] > 0.05\n or abs(calculatedMaxVal - userTriple[2]) / userTriple[1] > 0.05\n ):\n log.warning("Calculated width axis min/max do not match user input.")\n log.warning(\n " Current width axis limits: %g:%g:%g",\n *userTriple,\n )\n log.warning(\n " Suggested width axis limits: %g:%g:%g",\n calculatedMinVal,\n userTriple[1],\n calculatedMaxVal,\n )\n\n return False\n\n return True\n\n\ndef sanitizeWeight(userTriple, designTriple, pins, measurements):\n """Sanitize the weight axis limits."""\n\n if len(set(userTriple)) < 3:\n return True\n\n minVal, defaultVal, maxVal = (\n measurements[designTriple[0]],\n measurements[designTriple[1]],\n measurements[designTriple[2]],\n )\n\n logMin = math.log(minVal)\n logDefault = math.log(defaultVal)\n logMax = math.log(maxVal)\n\n t = (userTriple[1] - userTriple[0]) / (userTriple[2] - userTriple[0])\n y = math.exp(logMin + t * (logMax - logMin))\n t = (y - minVal) / (maxVal - minVal)\n calculatedDefaultVal = userTriple[0] + t * (userTriple[2] - userTriple[0])\n\n log.info("Original weight axis limits: %g:%g:%g", *userTriple)\n log.info(\n "Calculated weight axis limits: %g:%g:%g",\n userTriple[0],\n calculatedDefaultVal,\n userTriple[2],\n )\n\n if abs(calculatedDefaultVal - userTriple[1]) / userTriple[1] > 0.05:\n log.warning("Calculated weight axis default does not match user input.")\n\n log.warning(\n " Current weight axis limits: %g:%g:%g",\n *userTriple,\n )\n\n log.warning(\n " Suggested weight axis limits, changing default: %g:%g:%g",\n userTriple[0],\n calculatedDefaultVal,\n userTriple[2],\n )\n\n t = (userTriple[2] - userTriple[0]) / (userTriple[1] - userTriple[0])\n y = math.exp(logMin + t * (logDefault - logMin))\n t = (y - minVal) / (defaultVal - minVal)\n calculatedMaxVal = userTriple[0] + t * (userTriple[1] - userTriple[0])\n log.warning(\n " Suggested weight axis limits, changing maximum: %g:%g:%g",\n userTriple[0],\n userTriple[1],\n calculatedMaxVal,\n )\n\n t = (userTriple[0] - userTriple[2]) / (userTriple[1] - userTriple[2])\n y = math.exp(logMax + t * (logDefault - logMax))\n t = (y - maxVal) / (defaultVal - maxVal)\n calculatedMinVal = userTriple[2] + t * (userTriple[1] - userTriple[2])\n log.warning(\n " Suggested weight axis limits, changing minimum: %g:%g:%g",\n calculatedMinVal,\n userTriple[1],\n userTriple[2],\n )\n\n return False\n\n return True\n\n\ndef sanitizeSlant(userTriple, designTriple, pins, measurements):\n """Sanitize the slant axis limits."""\n\n log.info("Original slant axis limits: %g:%g:%g", *userTriple)\n log.info(\n "Calculated slant axis limits: %g:%g:%g",\n measurements[designTriple[0]],\n measurements[designTriple[1]],\n measurements[designTriple[2]],\n )\n\n if (\n abs(measurements[designTriple[0]] - userTriple[0]) > 1\n or abs(measurements[designTriple[1]] - userTriple[1]) > 1\n or abs(measurements[designTriple[2]] - userTriple[2]) > 1\n ):\n log.warning("Calculated slant axis min/default/max do not match user input.")\n log.warning(\n " Current slant axis limits: %g:%g:%g",\n *userTriple,\n )\n log.warning(\n " Suggested slant axis limits: %g:%g:%g",\n measurements[designTriple[0]],\n measurements[designTriple[1]],\n measurements[designTriple[2]],\n )\n\n return False\n\n return True\n\n\ndef planAxis(\n measureFunc,\n normalizeFunc,\n interpolateFunc,\n glyphSetFunc,\n axisTag,\n axisLimits,\n values,\n samples=None,\n glyphs=None,\n designLimits=None,\n pins=None,\n sanitizeFunc=None,\n):\n """Plan an axis.\n\n measureFunc: callable that takes a glyphset and an optional\n list of glyphnames, and returns the glyphset-wide measurement\n to be used for the axis.\n\n normalizeFunc: callable that takes a measurement and a minimum\n and maximum, and normalizes the measurement into the range 0..1,\n possibly extrapolating too.\n\n interpolateFunc: callable that takes a normalized t value, and a\n minimum and maximum, and returns the interpolated value,\n possibly extrapolating too.\n\n glyphSetFunc: callable that takes a variations "location" dictionary,\n and returns a glyphset.\n\n axisTag: the axis tag string.\n\n axisLimits: a triple of minimum, default, and maximum values for\n the axis. Or an `fvar` Axis object.\n\n values: a list of output values to map for this axis.\n\n samples: the number of samples to use when sampling. Default 8.\n\n glyphs: a list of glyph names to use when sampling. Defaults to None,\n which will process all glyphs.\n\n designLimits: an optional triple of minimum, default, and maximum values\n represenging the "design" limits for the axis. If not provided, the\n axisLimits will be used.\n\n pins: an optional dictionary of before/after mapping entries to pin in\n the output.\n\n sanitizeFunc: an optional callable to call to sanitize the axis limits.\n """\n\n if isinstance(axisLimits, fvarAxis):\n axisLimits = (axisLimits.minValue, axisLimits.defaultValue, axisLimits.maxValue)\n minValue, defaultValue, maxValue = axisLimits\n\n if samples is None:\n samples = SAMPLES\n if glyphs is None:\n glyphs = glyphSetFunc({}).keys()\n if pins is None:\n pins = {}\n else:\n pins = pins.copy()\n\n log.info(\n "Axis limits min %g / default %g / max %g", minValue, defaultValue, maxValue\n )\n triple = (minValue, defaultValue, maxValue)\n\n if designLimits is not None:\n log.info("Axis design-limits min %g / default %g / max %g", *designLimits)\n else:\n designLimits = triple\n\n if pins:\n log.info("Pins %s", sorted(pins.items()))\n pins.update(\n {\n minValue: designLimits[0],\n defaultValue: designLimits[1],\n maxValue: designLimits[2],\n }\n )\n\n out = {}\n outNormalized = {}\n\n axisMeasurements = {}\n for value in sorted({minValue, defaultValue, maxValue} | set(pins.keys())):\n glyphset = glyphSetFunc(location={axisTag: value})\n designValue = pins[value]\n axisMeasurements[designValue] = measureFunc(glyphset, glyphs)\n\n if sanitizeFunc is not None:\n log.info("Sanitizing axis limit values for the `%s` axis.", axisTag)\n sanitizeFunc(triple, designLimits, pins, axisMeasurements)\n\n log.debug("Calculated average value:\n%s", pformat(axisMeasurements))\n\n for (rangeMin, targetMin), (rangeMax, targetMax) in zip(\n list(sorted(pins.items()))[:-1],\n list(sorted(pins.items()))[1:],\n ):\n targetValues = {w for w in values if rangeMin < w < rangeMax}\n if not targetValues:\n continue\n\n normalizedMin = normalizeValue(rangeMin, triple)\n normalizedMax = normalizeValue(rangeMax, triple)\n normalizedTargetMin = normalizeValue(targetMin, designLimits)\n normalizedTargetMax = normalizeValue(targetMax, designLimits)\n\n log.info("Planning target values %s.", sorted(targetValues))\n log.info("Sampling %u points in range %g,%g.", samples, rangeMin, rangeMax)\n valueMeasurements = axisMeasurements.copy()\n for sample in range(1, samples + 1):\n value = rangeMin + (rangeMax - rangeMin) * sample / (samples + 1)\n log.debug("Sampling value %g.", value)\n glyphset = glyphSetFunc(location={axisTag: value})\n designValue = piecewiseLinearMap(value, pins)\n valueMeasurements[designValue] = measureFunc(glyphset, glyphs)\n log.debug("Sampled average value:\n%s", pformat(valueMeasurements))\n\n measurementValue = {}\n for value in sorted(valueMeasurements):\n measurementValue[valueMeasurements[value]] = value\n\n out[rangeMin] = targetMin\n outNormalized[normalizedMin] = normalizedTargetMin\n for value in sorted(targetValues):\n t = normalizeFunc(value, rangeMin, rangeMax)\n targetMeasurement = interpolateFunc(\n t, valueMeasurements[targetMin], valueMeasurements[targetMax]\n )\n targetValue = piecewiseLinearMap(targetMeasurement, measurementValue)\n log.debug("Planned mapping value %g to %g." % (value, targetValue))\n out[value] = targetValue\n valueNormalized = normalizedMin + (value - rangeMin) / (\n rangeMax - rangeMin\n ) * (normalizedMax - normalizedMin)\n outNormalized[valueNormalized] = normalizedTargetMin + (\n targetValue - targetMin\n ) / (targetMax - targetMin) * (normalizedTargetMax - normalizedTargetMin)\n out[rangeMax] = targetMax\n outNormalized[normalizedMax] = normalizedTargetMax\n\n log.info("Planned mapping for the `%s` axis:\n%s", axisTag, pformat(out))\n log.info(\n "Planned normalized mapping for the `%s` axis:\n%s",\n axisTag,\n pformat(outNormalized),\n )\n\n if all(abs(k - v) < 0.01 for k, v in outNormalized.items()):\n log.info("Detected identity mapping for the `%s` axis. Dropping.", axisTag)\n out = {}\n outNormalized = {}\n\n return out, outNormalized\n\n\ndef planWeightAxis(\n glyphSetFunc,\n axisLimits,\n weights=None,\n samples=None,\n glyphs=None,\n designLimits=None,\n pins=None,\n sanitize=False,\n):\n """Plan a weight (`wght`) axis.\n\n weights: A list of weight values to plan for. If None, the default\n values are used.\n\n This function simply calls planAxis with values=weights, and the appropriate\n arguments. See documenation for planAxis for more information.\n """\n\n if weights is None:\n weights = WEIGHTS\n\n return planAxis(\n measureWeight,\n normalizeLinear,\n interpolateLog,\n glyphSetFunc,\n "wght",\n axisLimits,\n values=weights,\n samples=samples,\n glyphs=glyphs,\n designLimits=designLimits,\n pins=pins,\n sanitizeFunc=sanitizeWeight if sanitize else None,\n )\n\n\ndef planWidthAxis(\n glyphSetFunc,\n axisLimits,\n widths=None,\n samples=None,\n glyphs=None,\n designLimits=None,\n pins=None,\n sanitize=False,\n):\n """Plan a width (`wdth`) axis.\n\n widths: A list of width values (percentages) to plan for. If None, the default\n values are used.\n\n This function simply calls planAxis with values=widths, and the appropriate\n arguments. See documenation for planAxis for more information.\n """\n\n if widths is None:\n widths = WIDTHS\n\n return planAxis(\n measureWidth,\n normalizeLinear,\n interpolateLinear,\n glyphSetFunc,\n "wdth",\n axisLimits,\n values=widths,\n samples=samples,\n glyphs=glyphs,\n designLimits=designLimits,\n pins=pins,\n sanitizeFunc=sanitizeWidth if sanitize else None,\n )\n\n\ndef planSlantAxis(\n glyphSetFunc,\n axisLimits,\n slants=None,\n samples=None,\n glyphs=None,\n designLimits=None,\n pins=None,\n sanitize=False,\n):\n """Plan a slant (`slnt`) axis.\n\n slants: A list slant angles to plan for. If None, the default\n values are used.\n\n This function simply calls planAxis with values=slants, and the appropriate\n arguments. See documenation for planAxis for more information.\n """\n\n if slants is None:\n slants = SLANTS\n\n return planAxis(\n measureSlant,\n normalizeDegrees,\n interpolateLinear,\n glyphSetFunc,\n "slnt",\n axisLimits,\n values=slants,\n samples=samples,\n glyphs=glyphs,\n designLimits=designLimits,\n pins=pins,\n sanitizeFunc=sanitizeSlant if sanitize else None,\n )\n\n\ndef planOpticalSizeAxis(\n glyphSetFunc,\n axisLimits,\n sizes=None,\n samples=None,\n glyphs=None,\n designLimits=None,\n pins=None,\n sanitize=False,\n):\n """Plan a optical-size (`opsz`) axis.\n\n sizes: A list of optical size values to plan for. If None, the default\n values are used.\n\n This function simply calls planAxis with values=sizes, and the appropriate\n arguments. See documenation for planAxis for more information.\n """\n\n if sizes is None:\n sizes = SIZES\n\n return planAxis(\n measureWeight,\n normalizeLog,\n interpolateLog,\n glyphSetFunc,\n "opsz",\n axisLimits,\n values=sizes,\n samples=samples,\n glyphs=glyphs,\n designLimits=designLimits,\n pins=pins,\n )\n\n\ndef makeDesignspaceSnippet(axisTag, axisName, axisLimit, mapping):\n """Make a designspace snippet for a single axis."""\n\n designspaceSnippet = (\n ' <axis tag="%s" name="%s" minimum="%g" default="%g" maximum="%g"'\n % ((axisTag, axisName) + axisLimit)\n )\n if mapping:\n designspaceSnippet += ">\n"\n else:\n designspaceSnippet += "/>"\n\n for key, value in mapping.items():\n designspaceSnippet += ' <map input="%g" output="%g"/>\n' % (key, value)\n\n if mapping:\n designspaceSnippet += " </axis>"\n\n return designspaceSnippet\n\n\ndef addEmptyAvar(font):\n """Add an empty `avar` table to the font."""\n font["avar"] = avar = newTable("avar")\n for axis in fvar.axes:\n avar.segments[axis.axisTag] = {}\n\n\ndef processAxis(\n font,\n planFunc,\n axisTag,\n axisName,\n values,\n samples=None,\n glyphs=None,\n designLimits=None,\n pins=None,\n sanitize=False,\n plot=False,\n):\n """Process a single axis."""\n\n axisLimits = None\n for axis in font["fvar"].axes:\n if axis.axisTag == axisTag:\n axisLimits = axis\n break\n if axisLimits is None:\n return ""\n axisLimits = (axisLimits.minValue, axisLimits.defaultValue, axisLimits.maxValue)\n\n log.info("Planning %s axis.", axisName)\n\n if "avar" in font:\n existingMapping = font["avar"].segments[axisTag]\n font["avar"].segments[axisTag] = {}\n else:\n existingMapping = None\n\n if values is not None and isinstance(values, str):\n values = [float(w) for w in values.split()]\n\n if designLimits is not None and isinstance(designLimits, str):\n designLimits = [float(d) for d in options.designLimits.split(":")]\n assert (\n len(designLimits) == 3\n and designLimits[0] <= designLimits[1] <= designLimits[2]\n )\n else:\n designLimits = None\n\n if pins is not None and isinstance(pins, str):\n newPins = {}\n for pin in pins.split():\n before, after = pin.split(":")\n newPins[float(before)] = float(after)\n pins = newPins\n del newPins\n\n mapping, mappingNormalized = planFunc(\n font.getGlyphSet,\n axisLimits,\n values,\n samples=samples,\n glyphs=glyphs,\n designLimits=designLimits,\n pins=pins,\n sanitize=sanitize,\n )\n\n if plot:\n from matplotlib import pyplot\n\n pyplot.plot(\n sorted(mappingNormalized),\n [mappingNormalized[k] for k in sorted(mappingNormalized)],\n )\n pyplot.show()\n\n if existingMapping is not None:\n log.info("Existing %s mapping:\n%s", axisName, pformat(existingMapping))\n\n if mapping:\n if "avar" not in font:\n addEmptyAvar(font)\n font["avar"].segments[axisTag] = mappingNormalized\n else:\n if "avar" in font:\n font["avar"].segments[axisTag] = {}\n\n designspaceSnippet = makeDesignspaceSnippet(\n axisTag,\n axisName,\n axisLimits,\n mapping,\n )\n return designspaceSnippet\n\n\ndef main(args=None):\n """Plan the standard axis mappings for a variable font"""\n\n if args is None:\n import sys\n\n args = sys.argv[1:]\n\n from fontTools import configLogger\n from fontTools.ttLib import TTFont\n import argparse\n\n parser = argparse.ArgumentParser(\n "fonttools varLib.avarPlanner",\n description="Plan `avar` table for variable font",\n )\n parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")\n parser.add_argument(\n "-o",\n "--output-file",\n type=str,\n help="Output font file name.",\n )\n parser.add_argument(\n "--weights", type=str, help="Space-separate list of weights to generate."\n )\n parser.add_argument(\n "--widths", type=str, help="Space-separate list of widths to generate."\n )\n parser.add_argument(\n "--slants", type=str, help="Space-separate list of slants to generate."\n )\n parser.add_argument(\n "--sizes", type=str, help="Space-separate list of optical-sizes to generate."\n )\n parser.add_argument("--samples", type=int, help="Number of samples.")\n parser.add_argument(\n "-s", "--sanitize", action="store_true", help="Sanitize axis limits"\n )\n parser.add_argument(\n "-g",\n "--glyphs",\n type=str,\n help="Space-separate list of glyphs to use for sampling.",\n )\n parser.add_argument(\n "--weight-design-limits",\n type=str,\n help="min:default:max in design units for the `wght` axis.",\n )\n parser.add_argument(\n "--width-design-limits",\n type=str,\n help="min:default:max in design units for the `wdth` axis.",\n )\n parser.add_argument(\n "--slant-design-limits",\n type=str,\n help="min:default:max in design units for the `slnt` axis.",\n )\n parser.add_argument(\n "--optical-size-design-limits",\n type=str,\n help="min:default:max in design units for the `opsz` axis.",\n )\n parser.add_argument(\n "--weight-pins",\n type=str,\n help="Space-separate list of before:after pins for the `wght` axis.",\n )\n parser.add_argument(\n "--width-pins",\n type=str,\n help="Space-separate list of before:after pins for the `wdth` axis.",\n )\n parser.add_argument(\n "--slant-pins",\n type=str,\n help="Space-separate list of before:after pins for the `slnt` axis.",\n )\n parser.add_argument(\n "--optical-size-pins",\n type=str,\n help="Space-separate list of before:after pins for the `opsz` axis.",\n )\n parser.add_argument(\n "-p", "--plot", action="store_true", help="Plot the resulting mapping."\n )\n\n logging_group = parser.add_mutually_exclusive_group(required=False)\n logging_group.add_argument(\n "-v", "--verbose", action="store_true", help="Run more verbosely."\n )\n logging_group.add_argument(\n "-q", "--quiet", action="store_true", help="Turn verbosity off."\n )\n\n options = parser.parse_args(args)\n\n configLogger(\n level=("DEBUG" if options.verbose else "WARNING" if options.quiet else "INFO")\n )\n\n font = TTFont(options.font)\n if not "fvar" in font:\n log.error("Not a variable font.")\n return 1\n\n if options.glyphs is not None:\n glyphs = options.glyphs.split()\n if ":" in options.glyphs:\n glyphs = {}\n for g in options.glyphs.split():\n if ":" in g:\n glyph, frequency = g.split(":")\n glyphs[glyph] = float(frequency)\n else:\n glyphs[g] = 1.0\n else:\n glyphs = None\n\n designspaceSnippets = []\n\n designspaceSnippets.append(\n processAxis(\n font,\n planWeightAxis,\n "wght",\n "Weight",\n values=options.weights,\n samples=options.samples,\n glyphs=glyphs,\n designLimits=options.weight_design_limits,\n pins=options.weight_pins,\n sanitize=options.sanitize,\n plot=options.plot,\n )\n )\n designspaceSnippets.append(\n processAxis(\n font,\n planWidthAxis,\n "wdth",\n "Width",\n values=options.widths,\n samples=options.samples,\n glyphs=glyphs,\n designLimits=options.width_design_limits,\n pins=options.width_pins,\n sanitize=options.sanitize,\n plot=options.plot,\n )\n )\n designspaceSnippets.append(\n processAxis(\n font,\n planSlantAxis,\n "slnt",\n "Slant",\n values=options.slants,\n samples=options.samples,\n glyphs=glyphs,\n designLimits=options.slant_design_limits,\n pins=options.slant_pins,\n sanitize=options.sanitize,\n plot=options.plot,\n )\n )\n designspaceSnippets.append(\n processAxis(\n font,\n planOpticalSizeAxis,\n "opsz",\n "OpticalSize",\n values=options.sizes,\n samples=options.samples,\n glyphs=glyphs,\n designLimits=options.optical_size_design_limits,\n pins=options.optical_size_pins,\n sanitize=options.sanitize,\n plot=options.plot,\n )\n )\n\n log.info("Designspace snippet:")\n for snippet in designspaceSnippets:\n if snippet:\n print(snippet)\n\n if options.output_file is None:\n outfile = makeOutputFileName(options.font, overWrite=True, suffix=".avar")\n else:\n outfile = options.output_file\n if outfile:\n log.info("Saving %s", outfile)\n font.save(outfile)\n\n\nif __name__ == "__main__":\n import sys\n\n sys.exit(main())\n
.venv\Lib\site-packages\fontTools\varLib\avarPlanner.py
avarPlanner.py
Python
28,362
0.85
0.13247
0.003563
vue-tools
522
2023-09-05T05:31:40.666513
Apache-2.0
false
11a5283af3e0cc2a83781a6b31f7c67e
from fontTools import ttLib\nfrom fontTools.ttLib.tables import otTables as ot\n\n# VariationStore\n\n\ndef buildVarRegionAxis(axisSupport):\n self = ot.VarRegionAxis()\n self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]\n return self\n\n\ndef buildSparseVarRegionAxis(axisIndex, axisSupport):\n self = ot.SparseVarRegionAxis()\n self.AxisIndex = axisIndex\n self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]\n return self\n\n\ndef buildVarRegion(support, axisTags):\n assert all(tag in axisTags for tag in support.keys()), (\n "Unknown axis tag found.",\n support,\n axisTags,\n )\n self = ot.VarRegion()\n self.VarRegionAxis = []\n for tag in axisTags:\n self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0, 0, 0))))\n return self\n\n\ndef buildSparseVarRegion(support, axisTags):\n assert all(tag in axisTags for tag in support.keys()), (\n "Unknown axis tag found.",\n support,\n axisTags,\n )\n self = ot.SparseVarRegion()\n self.SparseVarRegionAxis = []\n for i, tag in enumerate(axisTags):\n if tag not in support:\n continue\n self.SparseVarRegionAxis.append(\n buildSparseVarRegionAxis(i, support.get(tag, (0, 0, 0)))\n )\n self.SparseRegionCount = len(self.SparseVarRegionAxis)\n return self\n\n\ndef buildVarRegionList(supports, axisTags):\n self = ot.VarRegionList()\n self.RegionAxisCount = len(axisTags)\n self.Region = []\n for support in supports:\n self.Region.append(buildVarRegion(support, axisTags))\n self.RegionCount = len(self.Region)\n return self\n\n\ndef buildSparseVarRegionList(supports, axisTags):\n self = ot.SparseVarRegionList()\n self.RegionAxisCount = len(axisTags)\n self.Region = []\n for support in supports:\n self.Region.append(buildSparseVarRegion(support, axisTags))\n self.RegionCount = len(self.Region)\n return self\n\n\ndef _reorderItem(lst, mapping):\n return [lst[i] for i in mapping]\n\n\ndef VarData_calculateNumShorts(self, optimize=False):\n count = self.VarRegionCount\n items = self.Item\n bit_lengths = [0] * count\n for item in items:\n # The "+ (i < -1)" magic is to handle two's-compliment.\n # That is, we want to get back 7 for -128, whereas\n # bit_length() returns 8. Similarly for -65536.\n # The reason "i < -1" is used instead of "i < 0" is that\n # the latter would make it return 0 for "-1" instead of 1.\n bl = [(i + (i < -1)).bit_length() for i in item]\n bit_lengths = [max(*pair) for pair in zip(bl, bit_lengths)]\n # The addition of 8, instead of seven, is to account for the sign bit.\n # This "((b + 8) >> 3) if b else 0" when combined with the above\n # "(i + (i < -1)).bit_length()" is a faster way to compute byte-lengths\n # conforming to:\n #\n # byte_length = (0 if i == 0 else\n # 1 if -128 <= i < 128 else\n # 2 if -65536 <= i < 65536 else\n # ...)\n byte_lengths = [((b + 8) >> 3) if b else 0 for b in bit_lengths]\n\n # https://github.com/fonttools/fonttools/issues/2279\n longWords = any(b > 2 for b in byte_lengths)\n\n if optimize:\n # Reorder columns such that wider columns come before narrower columns\n mapping = []\n mapping.extend(i for i, b in enumerate(byte_lengths) if b > 2)\n mapping.extend(i for i, b in enumerate(byte_lengths) if b == 2)\n mapping.extend(i for i, b in enumerate(byte_lengths) if b == 1)\n\n byte_lengths = _reorderItem(byte_lengths, mapping)\n self.VarRegionIndex = _reorderItem(self.VarRegionIndex, mapping)\n self.VarRegionCount = len(self.VarRegionIndex)\n for i in range(len(items)):\n items[i] = _reorderItem(items[i], mapping)\n\n if longWords:\n self.NumShorts = (\n max((i for i, b in enumerate(byte_lengths) if b > 2), default=-1) + 1\n )\n self.NumShorts |= 0x8000\n else:\n self.NumShorts = (\n max((i for i, b in enumerate(byte_lengths) if b > 1), default=-1) + 1\n )\n\n self.VarRegionCount = len(self.VarRegionIndex)\n return self\n\n\not.VarData.calculateNumShorts = VarData_calculateNumShorts\n\n\ndef VarData_CalculateNumShorts(self, optimize=True):\n """Deprecated name for VarData_calculateNumShorts() which\n defaults to optimize=True. Use varData.calculateNumShorts()\n or varData.optimize()."""\n return VarData_calculateNumShorts(self, optimize=optimize)\n\n\ndef VarData_optimize(self):\n return VarData_calculateNumShorts(self, optimize=True)\n\n\not.VarData.optimize = VarData_optimize\n\n\ndef buildVarData(varRegionIndices, items, optimize=True):\n self = ot.VarData()\n self.VarRegionIndex = list(varRegionIndices)\n regionCount = self.VarRegionCount = len(self.VarRegionIndex)\n records = self.Item = []\n if items:\n for item in items:\n assert len(item) == regionCount\n records.append(list(item))\n self.ItemCount = len(self.Item)\n self.calculateNumShorts(optimize=optimize)\n return self\n\n\ndef buildVarStore(varRegionList, varDataList):\n self = ot.VarStore()\n self.Format = 1\n self.VarRegionList = varRegionList\n self.VarData = list(varDataList)\n self.VarDataCount = len(self.VarData)\n return self\n\n\ndef buildMultiVarData(varRegionIndices, items):\n self = ot.MultiVarData()\n self.Format = 1\n self.VarRegionIndex = list(varRegionIndices)\n regionCount = self.VarRegionCount = len(self.VarRegionIndex)\n records = self.Item = []\n if items:\n for item in items:\n assert len(item) == regionCount\n records.append(list(item))\n self.ItemCount = len(self.Item)\n return self\n\n\ndef buildMultiVarStore(varRegionList, multiVarDataList):\n self = ot.MultiVarStore()\n self.Format = 1\n self.SparseVarRegionList = varRegionList\n self.MultiVarData = list(multiVarDataList)\n self.MultiVarDataCount = len(self.MultiVarData)\n return self\n\n\n# Variation helpers\n\n\ndef buildVarIdxMap(varIdxes, glyphOrder):\n self = ot.VarIdxMap()\n self.mapping = {g: v for g, v in zip(glyphOrder, varIdxes)}\n return self\n\n\ndef buildDeltaSetIndexMap(varIdxes):\n mapping = list(varIdxes)\n if all(i == v for i, v in enumerate(mapping)):\n return None\n self = ot.DeltaSetIndexMap()\n self.mapping = mapping\n self.Format = 1 if len(mapping) > 0xFFFF else 0\n return self\n\n\ndef buildVarDevTable(varIdx):\n self = ot.Device()\n self.DeltaFormat = 0x8000\n self.StartSize = varIdx >> 16\n self.EndSize = varIdx & 0xFFFF\n return self\n
.venv\Lib\site-packages\fontTools\varLib\builder.py
builder.py
Python
6,824
0.95
0.293023
0.106509
vue-tools
32
2025-04-30T17:21:08.168056
MIT
false
f523afbae57d03236952db7e208489d5
from collections import namedtuple\nfrom fontTools.cffLib import (\n maxStackLimit,\n TopDictIndex,\n buildOrder,\n topDictOperators,\n topDictOperators2,\n privateDictOperators,\n privateDictOperators2,\n FDArrayIndex,\n FontDict,\n VarStoreData,\n)\nfrom io import BytesIO\nfrom fontTools.cffLib.specializer import specializeCommands, commandsToProgram\nfrom fontTools.ttLib import newTable\nfrom fontTools import varLib\nfrom fontTools.varLib.models import allEqual\nfrom fontTools.misc.loggingTools import deprecateFunction\nfrom fontTools.misc.roundTools import roundFunc\nfrom fontTools.misc.psCharStrings import T2CharString, T2OutlineExtractor\nfrom fontTools.pens.t2CharStringPen import T2CharStringPen\nfrom functools import partial\n\nfrom .errors import (\n VarLibCFFDictMergeError,\n VarLibCFFPointTypeMergeError,\n VarLibCFFHintTypeMergeError,\n VarLibMergeError,\n)\n\n\n# Backwards compatibility\nMergeDictError = VarLibCFFDictMergeError\nMergeTypeError = VarLibCFFPointTypeMergeError\n\n\ndef addCFFVarStore(varFont, varModel, varDataList, masterSupports):\n fvarTable = varFont["fvar"]\n axisKeys = [axis.axisTag for axis in fvarTable.axes]\n varTupleList = varLib.builder.buildVarRegionList(masterSupports, axisKeys)\n varStoreCFFV = varLib.builder.buildVarStore(varTupleList, varDataList)\n\n topDict = varFont["CFF2"].cff.topDictIndex[0]\n topDict.VarStore = VarStoreData(otVarStore=varStoreCFFV)\n if topDict.FDArray[0].vstore is None:\n fdArray = topDict.FDArray\n for fontDict in fdArray:\n if hasattr(fontDict, "Private"):\n fontDict.Private.vstore = topDict.VarStore\n\n\n@deprecateFunction("Use fontTools.cffLib.CFFToCFF2.convertCFFToCFF2 instead.")\ndef convertCFFtoCFF2(varFont):\n from fontTools.cffLib.CFFToCFF2 import convertCFFToCFF2\n\n return convertCFFToCFF2(varFont)\n\n\ndef conv_to_int(num):\n if isinstance(num, float) and num.is_integer():\n return int(num)\n return num\n\n\npd_blend_fields = (\n "BlueValues",\n "OtherBlues",\n "FamilyBlues",\n "FamilyOtherBlues",\n "BlueScale",\n "BlueShift",\n "BlueFuzz",\n "StdHW",\n "StdVW",\n "StemSnapH",\n "StemSnapV",\n)\n\n\ndef get_private(regionFDArrays, fd_index, ri, fd_map):\n region_fdArray = regionFDArrays[ri]\n region_fd_map = fd_map[fd_index]\n if ri in region_fd_map:\n region_fdIndex = region_fd_map[ri]\n private = region_fdArray[region_fdIndex].Private\n else:\n private = None\n return private\n\n\ndef merge_PrivateDicts(top_dicts, vsindex_dict, var_model, fd_map):\n """\n I step through the FontDicts in the FDArray of the varfont TopDict.\n For each varfont FontDict:\n\n * step through each key in FontDict.Private.\n * For each key, step through each relevant source font Private dict, and\n build a list of values to blend.\n\n The 'relevant' source fonts are selected by first getting the right\n submodel using ``vsindex_dict[vsindex]``. The indices of the\n ``subModel.locations`` are mapped to source font list indices by\n assuming the latter order is the same as the order of the\n ``var_model.locations``. I can then get the index of each subModel\n location in the list of ``var_model.locations``.\n """\n\n topDict = top_dicts[0]\n region_top_dicts = top_dicts[1:]\n if hasattr(region_top_dicts[0], "FDArray"):\n regionFDArrays = [fdTopDict.FDArray for fdTopDict in region_top_dicts]\n else:\n regionFDArrays = [[fdTopDict] for fdTopDict in region_top_dicts]\n for fd_index, font_dict in enumerate(topDict.FDArray):\n private_dict = font_dict.Private\n vsindex = getattr(private_dict, "vsindex", 0)\n # At the moment, no PrivateDict has a vsindex key, but let's support\n # how it should work. See comment at end of\n # merge_charstrings() - still need to optimize use of vsindex.\n sub_model, _ = vsindex_dict[vsindex]\n master_indices = []\n for loc in sub_model.locations[1:]:\n i = var_model.locations.index(loc) - 1\n master_indices.append(i)\n pds = [private_dict]\n last_pd = private_dict\n for ri in master_indices:\n pd = get_private(regionFDArrays, fd_index, ri, fd_map)\n # If the region font doesn't have this FontDict, just reference\n # the last one used.\n if pd is None:\n pd = last_pd\n else:\n last_pd = pd\n pds.append(pd)\n num_masters = len(pds)\n for key, value in private_dict.rawDict.items():\n dataList = []\n if key not in pd_blend_fields:\n continue\n if isinstance(value, list):\n try:\n values = [pd.rawDict[key] for pd in pds]\n except KeyError:\n print(\n "Warning: {key} in default font Private dict is "\n "missing from another font, and was "\n "discarded.".format(key=key)\n )\n continue\n try:\n values = zip(*values)\n except IndexError:\n raise VarLibCFFDictMergeError(key, value, values)\n """\n Row 0 contains the first value from each master.\n Convert each row from absolute values to relative\n values from the previous row.\n e.g for three masters, a list of values was:\n master 0 OtherBlues = [-217,-205]\n master 1 OtherBlues = [-234,-222]\n master 1 OtherBlues = [-188,-176]\n The call to zip() converts this to:\n [(-217, -234, -188), (-205, -222, -176)]\n and is converted finally to:\n OtherBlues = [[-217, 17.0, 46.0], [-205, 0.0, 0.0]]\n """\n prev_val_list = [0] * num_masters\n any_points_differ = False\n for val_list in values:\n rel_list = [\n (val - prev_val_list[i]) for (i, val) in enumerate(val_list)\n ]\n if (not any_points_differ) and not allEqual(rel_list):\n any_points_differ = True\n prev_val_list = val_list\n deltas = sub_model.getDeltas(rel_list)\n # For PrivateDict BlueValues, the default font\n # values are absolute, not relative to the prior value.\n deltas[0] = val_list[0]\n dataList.append(deltas)\n # If there are no blend values,then\n # we can collapse the blend lists.\n if not any_points_differ:\n dataList = [data[0] for data in dataList]\n else:\n values = [pd.rawDict[key] for pd in pds]\n if not allEqual(values):\n dataList = sub_model.getDeltas(values)\n else:\n dataList = values[0]\n\n # Convert numbers with no decimal part to an int\n if isinstance(dataList, list):\n for i, item in enumerate(dataList):\n if isinstance(item, list):\n for j, jtem in enumerate(item):\n dataList[i][j] = conv_to_int(jtem)\n else:\n dataList[i] = conv_to_int(item)\n else:\n dataList = conv_to_int(dataList)\n\n private_dict.rawDict[key] = dataList\n\n\ndef _cff_or_cff2(font):\n if "CFF " in font:\n return font["CFF "]\n return font["CFF2"]\n\n\ndef getfd_map(varFont, fonts_list):\n """Since a subset source font may have fewer FontDicts in their\n FDArray than the default font, we have to match up the FontDicts in\n the different fonts . We do this with the FDSelect array, and by\n assuming that the same glyph will reference matching FontDicts in\n each source font. We return a mapping from fdIndex in the default\n font to a dictionary which maps each master list index of each\n region font to the equivalent fdIndex in the region font."""\n fd_map = {}\n default_font = fonts_list[0]\n region_fonts = fonts_list[1:]\n num_regions = len(region_fonts)\n topDict = _cff_or_cff2(default_font).cff.topDictIndex[0]\n if not hasattr(topDict, "FDSelect"):\n # All glyphs reference only one FontDict.\n # Map the FD index for regions to index 0.\n fd_map[0] = {ri: 0 for ri in range(num_regions)}\n return fd_map\n\n gname_mapping = {}\n default_fdSelect = topDict.FDSelect\n glyphOrder = default_font.getGlyphOrder()\n for gid, fdIndex in enumerate(default_fdSelect):\n gname_mapping[glyphOrder[gid]] = fdIndex\n if fdIndex not in fd_map:\n fd_map[fdIndex] = {}\n for ri, region_font in enumerate(region_fonts):\n region_glyphOrder = region_font.getGlyphOrder()\n region_topDict = _cff_or_cff2(region_font).cff.topDictIndex[0]\n if not hasattr(region_topDict, "FDSelect"):\n # All the glyphs share the same FontDict. Pick any glyph.\n default_fdIndex = gname_mapping[region_glyphOrder[0]]\n fd_map[default_fdIndex][ri] = 0\n else:\n region_fdSelect = region_topDict.FDSelect\n for gid, fdIndex in enumerate(region_fdSelect):\n default_fdIndex = gname_mapping[region_glyphOrder[gid]]\n region_map = fd_map[default_fdIndex]\n if ri not in region_map:\n region_map[ri] = fdIndex\n return fd_map\n\n\nCVarData = namedtuple("CVarData", "varDataList masterSupports vsindex_dict")\n\n\ndef merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder):\n topDict = varFont["CFF2"].cff.topDictIndex[0]\n top_dicts = [topDict] + [\n _cff_or_cff2(ttFont).cff.topDictIndex[0] for ttFont in ordered_fonts_list[1:]\n ]\n num_masters = len(model.mapping)\n cvData = merge_charstrings(glyphOrder, num_masters, top_dicts, model)\n fd_map = getfd_map(varFont, ordered_fonts_list)\n merge_PrivateDicts(top_dicts, cvData.vsindex_dict, model, fd_map)\n addCFFVarStore(varFont, model, cvData.varDataList, cvData.masterSupports)\n\n\ndef _get_cs(charstrings, glyphName, filterEmpty=False):\n if glyphName not in charstrings:\n return None\n cs = charstrings[glyphName]\n\n if filterEmpty:\n cs.decompile()\n if cs.program == []: # CFF2 empty charstring\n return None\n elif (\n len(cs.program) <= 2\n and cs.program[-1] == "endchar"\n and (len(cs.program) == 1 or type(cs.program[0]) in (int, float))\n ): # CFF1 empty charstring\n return None\n\n return cs\n\n\ndef _add_new_vsindex(\n model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList\n):\n varTupleIndexes = []\n for support in model.supports[1:]:\n if support not in masterSupports:\n masterSupports.append(support)\n varTupleIndexes.append(masterSupports.index(support))\n var_data = varLib.builder.buildVarData(varTupleIndexes, None, False)\n vsindex = len(vsindex_dict)\n vsindex_by_key[key] = vsindex\n vsindex_dict[vsindex] = (model, [key])\n varDataList.append(var_data)\n return vsindex\n\n\ndef merge_charstrings(glyphOrder, num_masters, top_dicts, masterModel):\n vsindex_dict = {}\n vsindex_by_key = {}\n varDataList = []\n masterSupports = []\n default_charstrings = top_dicts[0].CharStrings\n for gid, gname in enumerate(glyphOrder):\n # interpret empty non-default masters as missing glyphs from a sparse master\n all_cs = [\n _get_cs(td.CharStrings, gname, i != 0) for i, td in enumerate(top_dicts)\n ]\n model, model_cs = masterModel.getSubModel(all_cs)\n # create the first pass CFF2 charstring, from\n # the default charstring.\n default_charstring = model_cs[0]\n var_pen = CFF2CharStringMergePen([], gname, num_masters, 0)\n # We need to override outlineExtractor because these\n # charstrings do have widths in the 'program'; we need to drop these\n # values rather than post assertion error for them.\n default_charstring.outlineExtractor = MergeOutlineExtractor\n default_charstring.draw(var_pen)\n\n # Add the coordinates from all the other regions to the\n # blend lists in the CFF2 charstring.\n region_cs = model_cs[1:]\n for region_idx, region_charstring in enumerate(region_cs, start=1):\n var_pen.restart(region_idx)\n region_charstring.outlineExtractor = MergeOutlineExtractor\n region_charstring.draw(var_pen)\n\n # Collapse each coordinate list to a blend operator and its args.\n new_cs = var_pen.getCharString(\n private=default_charstring.private,\n globalSubrs=default_charstring.globalSubrs,\n var_model=model,\n optimize=True,\n )\n default_charstrings[gname] = new_cs\n\n if not region_cs:\n continue\n\n if (not var_pen.seen_moveto) or ("blend" not in new_cs.program):\n # If this is not a marking glyph, or if there are no blend\n # arguments, then we can use vsindex 0. No need to\n # check if we need a new vsindex.\n continue\n\n # If the charstring required a new model, create\n # a VarData table to go with, and set vsindex.\n key = tuple(v is not None for v in all_cs)\n try:\n vsindex = vsindex_by_key[key]\n except KeyError:\n vsindex = _add_new_vsindex(\n model, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList\n )\n # We do not need to check for an existing new_cs.private.vsindex,\n # as we know it doesn't exist yet.\n if vsindex != 0:\n new_cs.program[:0] = [vsindex, "vsindex"]\n\n # If there is no variation in any of the charstrings, then vsindex_dict\n # never gets built. This could still be needed if there is variation\n # in the PrivatDict, so we will build the default data for vsindex = 0.\n if not vsindex_dict:\n key = (True,) * num_masters\n _add_new_vsindex(\n masterModel, key, masterSupports, vsindex_dict, vsindex_by_key, varDataList\n )\n cvData = CVarData(\n varDataList=varDataList,\n masterSupports=masterSupports,\n vsindex_dict=vsindex_dict,\n )\n # XXX To do: optimize use of vsindex between the PrivateDicts and\n # charstrings\n return cvData\n\n\nclass CFFToCFF2OutlineExtractor(T2OutlineExtractor):\n """This class is used to remove the initial width from the CFF\n charstring without trying to add the width to self.nominalWidthX,\n which is None."""\n\n def popallWidth(self, evenOdd=0):\n args = self.popall()\n if not self.gotWidth:\n if evenOdd ^ (len(args) % 2):\n args = args[1:]\n self.width = self.defaultWidthX\n self.gotWidth = 1\n return args\n\n\nclass MergeOutlineExtractor(CFFToCFF2OutlineExtractor):\n """Used to extract the charstring commands - including hints - from a\n CFF charstring in order to merge it as another set of region data\n into a CFF2 variable font charstring."""\n\n def __init__(\n self,\n pen,\n localSubrs,\n globalSubrs,\n nominalWidthX,\n defaultWidthX,\n private=None,\n blender=None,\n ):\n super().__init__(\n pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private, blender\n )\n\n def countHints(self):\n args = self.popallWidth()\n self.hintCount = self.hintCount + len(args) // 2\n return args\n\n def _hint_op(self, type, args):\n self.pen.add_hint(type, args)\n\n def op_hstem(self, index):\n args = self.countHints()\n self._hint_op("hstem", args)\n\n def op_vstem(self, index):\n args = self.countHints()\n self._hint_op("vstem", args)\n\n def op_hstemhm(self, index):\n args = self.countHints()\n self._hint_op("hstemhm", args)\n\n def op_vstemhm(self, index):\n args = self.countHints()\n self._hint_op("vstemhm", args)\n\n def _get_hintmask(self, index):\n if not self.hintMaskBytes:\n args = self.countHints()\n if args:\n self._hint_op("vstemhm", args)\n self.hintMaskBytes = (self.hintCount + 7) // 8\n hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes)\n return index, hintMaskBytes\n\n def op_hintmask(self, index):\n index, hintMaskBytes = self._get_hintmask(index)\n self.pen.add_hintmask("hintmask", [hintMaskBytes])\n return hintMaskBytes, index\n\n def op_cntrmask(self, index):\n index, hintMaskBytes = self._get_hintmask(index)\n self.pen.add_hintmask("cntrmask", [hintMaskBytes])\n return hintMaskBytes, index\n\n\nclass CFF2CharStringMergePen(T2CharStringPen):\n """Pen to merge Type 2 CharStrings."""\n\n def __init__(\n self, default_commands, glyphName, num_masters, master_idx, roundTolerance=0.01\n ):\n # For roundTolerance see https://github.com/fonttools/fonttools/issues/2838\n super().__init__(\n width=None, glyphSet=None, CFF2=True, roundTolerance=roundTolerance\n )\n self.pt_index = 0\n self._commands = default_commands\n self.m_index = master_idx\n self.num_masters = num_masters\n self.prev_move_idx = 0\n self.seen_moveto = False\n self.glyphName = glyphName\n self.round = roundFunc(roundTolerance, round=round)\n\n def add_point(self, point_type, pt_coords):\n if self.m_index == 0:\n self._commands.append([point_type, [pt_coords]])\n else:\n cmd = self._commands[self.pt_index]\n if cmd[0] != point_type:\n raise VarLibCFFPointTypeMergeError(\n point_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName\n )\n cmd[1].append(pt_coords)\n self.pt_index += 1\n\n def add_hint(self, hint_type, args):\n if self.m_index == 0:\n self._commands.append([hint_type, [args]])\n else:\n cmd = self._commands[self.pt_index]\n if cmd[0] != hint_type:\n raise VarLibCFFHintTypeMergeError(\n hint_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName\n )\n cmd[1].append(args)\n self.pt_index += 1\n\n def add_hintmask(self, hint_type, abs_args):\n # For hintmask, fonttools.cffLib.specializer.py expects\n # each of these to be represented by two sequential commands:\n # first holding only the operator name, with an empty arg list,\n # second with an empty string as the op name, and the mask arg list.\n if self.m_index == 0:\n self._commands.append([hint_type, []])\n self._commands.append(["", [abs_args]])\n else:\n cmd = self._commands[self.pt_index]\n if cmd[0] != hint_type:\n raise VarLibCFFHintTypeMergeError(\n hint_type, self.pt_index, len(cmd[1]), cmd[0], self.glyphName\n )\n self.pt_index += 1\n cmd = self._commands[self.pt_index]\n cmd[1].append(abs_args)\n self.pt_index += 1\n\n def _moveTo(self, pt):\n if not self.seen_moveto:\n self.seen_moveto = True\n pt_coords = self._p(pt)\n self.add_point("rmoveto", pt_coords)\n # I set prev_move_idx here because add_point()\n # can change self.pt_index.\n self.prev_move_idx = self.pt_index - 1\n\n def _lineTo(self, pt):\n pt_coords = self._p(pt)\n self.add_point("rlineto", pt_coords)\n\n def _curveToOne(self, pt1, pt2, pt3):\n _p = self._p\n pt_coords = _p(pt1) + _p(pt2) + _p(pt3)\n self.add_point("rrcurveto", pt_coords)\n\n def _closePath(self):\n pass\n\n def _endPath(self):\n pass\n\n def restart(self, region_idx):\n self.pt_index = 0\n self.m_index = region_idx\n self._p0 = (0, 0)\n\n def getCommands(self):\n return self._commands\n\n def reorder_blend_args(self, commands, get_delta_func):\n """\n We first re-order the master coordinate values.\n For a moveto to lineto, the args are now arranged as::\n\n [ [master_0 x,y], [master_1 x,y], [master_2 x,y] ]\n\n We re-arrange this to::\n\n [ [master_0 x, master_1 x, master_2 x],\n [master_0 y, master_1 y, master_2 y]\n ]\n\n If the master values are all the same, we collapse the list to\n as single value instead of a list.\n\n We then convert this to::\n\n [ [master_0 x] + [x delta tuple] + [numBlends=1]\n [master_0 y] + [y delta tuple] + [numBlends=1]\n ]\n """\n for cmd in commands:\n # arg[i] is the set of arguments for this operator from master i.\n args = cmd[1]\n m_args = zip(*args)\n # m_args[n] is now all num_master args for the i'th argument\n # for this operation.\n cmd[1] = list(m_args)\n lastOp = None\n for cmd in commands:\n op = cmd[0]\n # masks are represented by two cmd's: first has only op names,\n # second has only args.\n if lastOp in ["hintmask", "cntrmask"]:\n coord = list(cmd[1])\n if not allEqual(coord):\n raise VarLibMergeError(\n "Hintmask values cannot differ between source fonts."\n )\n cmd[1] = [coord[0][0]]\n else:\n coords = cmd[1]\n new_coords = []\n for coord in coords:\n if allEqual(coord):\n new_coords.append(coord[0])\n else:\n # convert to deltas\n deltas = get_delta_func(coord)[1:]\n coord = [coord[0]] + deltas\n coord.append(1)\n new_coords.append(coord)\n cmd[1] = new_coords\n lastOp = op\n return commands\n\n def getCharString(\n self, private=None, globalSubrs=None, var_model=None, optimize=True\n ):\n commands = self._commands\n commands = self.reorder_blend_args(\n commands, partial(var_model.getDeltas, round=self.round)\n )\n if optimize:\n commands = specializeCommands(\n commands, generalizeFirst=False, maxstack=maxStackLimit\n )\n program = commandsToProgram(commands)\n charString = T2CharString(\n program=program, private=private, globalSubrs=globalSubrs\n )\n return charString\n
.venv\Lib\site-packages\fontTools\varLib\cff.py
cff.py
Python
23,532
0.95
0.193344
0.090909
node-utils
187
2023-11-25T16:04:24.962585
MIT
false
afcdd824d97dd61c954db692ae3cce74
import textwrap\n\n\nclass VarLibError(Exception):\n """Base exception for the varLib module."""\n\n\nclass VarLibValidationError(VarLibError):\n """Raised when input data is invalid from varLib's point of view."""\n\n\nclass VarLibMergeError(VarLibError):\n """Raised when input data cannot be merged into a variable font."""\n\n def __init__(self, merger=None, **kwargs):\n self.merger = merger\n if not kwargs:\n kwargs = {}\n if "stack" in kwargs:\n self.stack = kwargs["stack"]\n del kwargs["stack"]\n else:\n self.stack = []\n self.cause = kwargs\n\n @property\n def reason(self):\n return self.__doc__\n\n def _master_name(self, ix):\n if self.merger is not None:\n ttf = self.merger.ttfs[ix]\n if "name" in ttf and ttf["name"].getBestFullName():\n return ttf["name"].getBestFullName()\n elif hasattr(ttf.reader, "file") and hasattr(ttf.reader.file, "name"):\n return ttf.reader.file.name\n return f"master number {ix}"\n\n @property\n def offender(self):\n if "expected" in self.cause and "got" in self.cause:\n index = [x == self.cause["expected"] for x in self.cause["got"]].index(\n False\n )\n master_name = self._master_name(index)\n if "location" in self.cause:\n master_name = f"{master_name} ({self.cause['location']})"\n return index, master_name\n return None, None\n\n @property\n def details(self):\n if "expected" in self.cause and "got" in self.cause:\n offender_index, offender = self.offender\n got = self.cause["got"][offender_index]\n return f"Expected to see {self.stack[0]}=={self.cause['expected']!r}, instead saw {got!r}\n"\n return ""\n\n def __str__(self):\n offender_index, offender = self.offender\n location = ""\n if offender:\n location = f"\n\nThe problem is likely to be in {offender}:\n"\n context = "".join(reversed(self.stack))\n basic = textwrap.fill(\n f"Couldn't merge the fonts, because {self.reason}. "\n f"This happened while performing the following operation: {context}",\n width=78,\n )\n return "\n\n" + basic + location + self.details\n\n\nclass ShouldBeConstant(VarLibMergeError):\n """some values were different, but should have been the same"""\n\n @property\n def details(self):\n basic_message = super().details\n\n if self.stack[0] != ".FeatureCount" or self.merger is None:\n return basic_message\n\n assert self.stack[0] == ".FeatureCount"\n offender_index, _ = self.offender\n bad_ttf = self.merger.ttfs[offender_index]\n good_ttf = next(\n ttf\n for ttf in self.merger.ttfs\n if self.stack[-1] in ttf\n and ttf[self.stack[-1]].table.FeatureList.FeatureCount\n == self.cause["expected"]\n )\n\n good_features = [\n x.FeatureTag\n for x in good_ttf[self.stack[-1]].table.FeatureList.FeatureRecord\n ]\n bad_features = [\n x.FeatureTag\n for x in bad_ttf[self.stack[-1]].table.FeatureList.FeatureRecord\n ]\n return basic_message + (\n "\nIncompatible features between masters.\n"\n f"Expected: {', '.join(good_features)}.\n"\n f"Got: {', '.join(bad_features)}.\n"\n )\n\n\nclass FoundANone(VarLibMergeError):\n """one of the values in a list was empty when it shouldn't have been"""\n\n @property\n def offender(self):\n index = [x is None for x in self.cause["got"]].index(True)\n return index, self._master_name(index)\n\n @property\n def details(self):\n cause, stack = self.cause, self.stack\n return f"{stack[0]}=={cause['got']}\n"\n\n\nclass NotANone(VarLibMergeError):\n """one of the values in a list was not empty when it should have been"""\n\n @property\n def offender(self):\n index = [x is not None for x in self.cause["got"]].index(True)\n return index, self._master_name(index)\n\n @property\n def details(self):\n cause, stack = self.cause, self.stack\n return f"{stack[0]}=={cause['got']}\n"\n\n\nclass MismatchedTypes(VarLibMergeError):\n """data had inconsistent types"""\n\n\nclass LengthsDiffer(VarLibMergeError):\n """a list of objects had inconsistent lengths"""\n\n\nclass KeysDiffer(VarLibMergeError):\n """a list of objects had different keys"""\n\n\nclass InconsistentGlyphOrder(VarLibMergeError):\n """the glyph order was inconsistent between masters"""\n\n\nclass InconsistentExtensions(VarLibMergeError):\n """the masters use extension lookups in inconsistent ways"""\n\n\nclass UnsupportedFormat(VarLibMergeError):\n """an OpenType subtable (%s) had a format I didn't expect"""\n\n def __init__(self, merger=None, **kwargs):\n super().__init__(merger, **kwargs)\n if not self.stack:\n self.stack = [".Format"]\n\n @property\n def reason(self):\n s = self.__doc__ % self.cause["subtable"]\n if "value" in self.cause:\n s += f" ({self.cause['value']!r})"\n return s\n\n\nclass InconsistentFormats(UnsupportedFormat):\n """an OpenType subtable (%s) had inconsistent formats between masters"""\n\n\nclass VarLibCFFMergeError(VarLibError):\n pass\n\n\nclass VarLibCFFDictMergeError(VarLibCFFMergeError):\n """Raised when a CFF PrivateDict cannot be merged."""\n\n def __init__(self, key, value, values):\n error_msg = (\n f"For the Private Dict key '{key}', the default font value list:"\n f"\n\t{value}\nhad a different number of values than a region font:"\n )\n for region_value in values:\n error_msg += f"\n\t{region_value}"\n self.args = (error_msg,)\n\n\nclass VarLibCFFPointTypeMergeError(VarLibCFFMergeError):\n """Raised when a CFF glyph cannot be merged because of point type differences."""\n\n def __init__(self, point_type, pt_index, m_index, default_type, glyph_name):\n error_msg = (\n f"Glyph '{glyph_name}': '{point_type}' at point index {pt_index} in "\n f"master index {m_index} differs from the default font point type "\n f"'{default_type}'"\n )\n self.args = (error_msg,)\n\n\nclass VarLibCFFHintTypeMergeError(VarLibCFFMergeError):\n """Raised when a CFF glyph cannot be merged because of hint type differences."""\n\n def __init__(self, hint_type, cmd_index, m_index, default_type, glyph_name):\n error_msg = (\n f"Glyph '{glyph_name}': '{hint_type}' at index {cmd_index} in "\n f"master index {m_index} differs from the default font hint type "\n f"'{default_type}'"\n )\n self.args = (error_msg,)\n\n\nclass VariationModelError(VarLibError):\n """Raised when a variation model is faulty."""\n
.venv\Lib\site-packages\fontTools\varLib\errors.py
errors.py
Python
7,153
0.85
0.251142
0
react-lib
176
2024-09-07T05:55:24.158530
Apache-2.0
false
c099b9ec0b3a5f512a190b65e86b4b80
from fontTools.misc.roundTools import noRound\nfrom fontTools.ttLib import TTFont, newTable\nfrom fontTools.ttLib.tables import otTables as ot\nfrom fontTools.ttLib.tables.otBase import OTTableWriter\nfrom fontTools.varLib import HVAR_FIELDS, VVAR_FIELDS, _add_VHVAR\nfrom fontTools.varLib import builder, models, varStore\nfrom fontTools.misc.fixedTools import fixedToFloat as fi2fl\nfrom fontTools.misc.cliTools import makeOutputFileName\nfrom functools import partial\nimport logging\n\nlog = logging.getLogger("fontTools.varLib.avar")\n\n\ndef _get_advance_metrics(font, axisTags, tableFields):\n # There's two ways we can go from here:\n # 1. For each glyph, at each master peak, compute the value of the\n # advance width at that peak. Then pass these all to a VariationModel\n # builder to compute back the deltas.\n # 2. For each master peak, pull out the deltas of the advance width directly,\n # and feed these to the VarStoreBuilder, forgoing the remodeling step.\n # We'll go with the second option, as it's simpler, faster, and more direct.\n gvar = font["gvar"]\n vhAdvanceDeltasAndSupports = {}\n glyphOrder = font.getGlyphOrder()\n phantomIndex = tableFields.phantomIndex\n for glyphName in glyphOrder:\n supports = []\n deltas = []\n variations = gvar.variations.get(glyphName, [])\n\n for tv in variations:\n supports.append(tv.axes)\n phantoms = tv.coordinates[-4:]\n phantoms = phantoms[phantomIndex * 2 : phantomIndex * 2 + 2]\n assert len(phantoms) == 2\n phantoms[0] = phantoms[0][phantomIndex] if phantoms[0] is not None else 0\n phantoms[1] = phantoms[1][phantomIndex] if phantoms[1] is not None else 0\n deltas.append(phantoms[1] - phantoms[0])\n\n vhAdvanceDeltasAndSupports[glyphName] = (deltas, supports)\n\n vOrigDeltasAndSupports = None # TODO\n\n return vhAdvanceDeltasAndSupports, vOrigDeltasAndSupports\n\n\ndef add_HVAR(font):\n if "HVAR" in font:\n del font["HVAR"]\n axisTags = [axis.axisTag for axis in font["fvar"].axes]\n getAdvanceMetrics = partial(_get_advance_metrics, font, axisTags, HVAR_FIELDS)\n _add_VHVAR(font, axisTags, HVAR_FIELDS, getAdvanceMetrics)\n\n\ndef add_VVAR(font):\n if "VVAR" in font:\n del font["VVAR"]\n getAdvanceMetrics = partial(_get_advance_metrics, font, axisTags, HVAR_FIELDS)\n axisTags = [axis.axisTag for axis in font["fvar"].axes]\n _add_VHVAR(font, axisTags, VVAR_FIELDS, getAdvanceMetrics)\n\n\ndef main(args=None):\n """Add `HVAR` table to variable font."""\n\n if args is None:\n import sys\n\n args = sys.argv[1:]\n\n from fontTools import configLogger\n from fontTools.designspaceLib import DesignSpaceDocument\n import argparse\n\n parser = argparse.ArgumentParser(\n "fonttools varLib.hvar",\n description="Add `HVAR` table from to variable font.",\n )\n parser.add_argument("font", metavar="varfont.ttf", help="Variable-font file.")\n parser.add_argument(\n "-o",\n "--output-file",\n type=str,\n help="Output font file name.",\n )\n\n options = parser.parse_args(args)\n\n configLogger(level="WARNING")\n\n font = TTFont(options.font)\n if not "fvar" in font:\n log.error("Not a variable font.")\n return 1\n\n add_HVAR(font)\n if "vmtx" in font:\n add_VVAR(font)\n\n if options.output_file is None:\n outfile = makeOutputFileName(options.font, overWrite=True, suffix=".hvar")\n else:\n outfile = options.output_file\n if outfile:\n log.info("Saving %s", outfile)\n font.save(outfile)\n\n\nif __name__ == "__main__":\n import sys\n\n sys.exit(main())\n
.venv\Lib\site-packages\fontTools\varLib\hvar.py
hvar.py
Python
3,808
0.95
0.159292
0.079545
vue-tools
291
2024-02-18T20:55:40.163471
MIT
false
9b482de6c3090f119ade1727e9896c91
"""\nTool to find wrong contour order between different masters, and\nother interpolatability (or lack thereof) issues.\n\nCall as:\n$ fonttools varLib.interpolatable font1 font2 ...\n"""\n\nfrom .interpolatableHelpers import *\nfrom .interpolatableTestContourOrder import test_contour_order\nfrom .interpolatableTestStartingPoint import test_starting_point\nfrom fontTools.pens.recordingPen import (\n RecordingPen,\n DecomposingRecordingPen,\n lerpRecordings,\n)\nfrom fontTools.pens.transformPen import TransformPen\nfrom fontTools.pens.statisticsPen import StatisticsPen, StatisticsControlPen\nfrom fontTools.pens.momentsPen import OpenContourError\nfrom fontTools.varLib.models import piecewiseLinearMap, normalizeLocation\nfrom fontTools.misc.fixedTools import floatToFixedToStr\nfrom fontTools.misc.transform import Transform\nfrom collections import defaultdict\nfrom types import SimpleNamespace\nfrom functools import wraps\nfrom pprint import pformat\nfrom math import sqrt, atan2, pi\nimport logging\nimport os\n\nlog = logging.getLogger("fontTools.varLib.interpolatable")\n\nDEFAULT_TOLERANCE = 0.95\nDEFAULT_KINKINESS = 0.5\nDEFAULT_KINKINESS_LENGTH = 0.002 # ratio of UPEM\nDEFAULT_UPEM = 1000\n\n\nclass Glyph:\n ITEMS = (\n "recordings",\n "greenStats",\n "controlStats",\n "greenVectors",\n "controlVectors",\n "nodeTypes",\n "isomorphisms",\n "points",\n "openContours",\n )\n\n def __init__(self, glyphname, glyphset):\n self.name = glyphname\n for item in self.ITEMS:\n setattr(self, item, [])\n self._populate(glyphset)\n\n def _fill_in(self, ix):\n for item in self.ITEMS:\n if len(getattr(self, item)) == ix:\n getattr(self, item).append(None)\n\n def _populate(self, glyphset):\n glyph = glyphset[self.name]\n self.doesnt_exist = glyph is None\n if self.doesnt_exist:\n return\n\n perContourPen = PerContourOrComponentPen(RecordingPen, glyphset=glyphset)\n try:\n glyph.draw(perContourPen, outputImpliedClosingLine=True)\n except TypeError:\n glyph.draw(perContourPen)\n self.recordings = perContourPen.value\n del perContourPen\n\n for ix, contour in enumerate(self.recordings):\n nodeTypes = [op for op, arg in contour.value]\n self.nodeTypes.append(nodeTypes)\n\n greenStats = StatisticsPen(glyphset=glyphset)\n controlStats = StatisticsControlPen(glyphset=glyphset)\n try:\n contour.replay(greenStats)\n contour.replay(controlStats)\n self.openContours.append(False)\n except OpenContourError as e:\n self.openContours.append(True)\n self._fill_in(ix)\n continue\n self.greenStats.append(greenStats)\n self.controlStats.append(controlStats)\n self.greenVectors.append(contour_vector_from_stats(greenStats))\n self.controlVectors.append(contour_vector_from_stats(controlStats))\n\n # Check starting point\n if nodeTypes[0] == "addComponent":\n self._fill_in(ix)\n continue\n\n assert nodeTypes[0] == "moveTo"\n assert nodeTypes[-1] in ("closePath", "endPath")\n points = SimpleRecordingPointPen()\n converter = SegmentToPointPen(points, False)\n contour.replay(converter)\n # points.value is a list of pt,bool where bool is true if on-curve and false if off-curve;\n # now check all rotations and mirror-rotations of the contour and build list of isomorphic\n # possible starting points.\n self.points.append(points.value)\n\n isomorphisms = []\n self.isomorphisms.append(isomorphisms)\n\n # Add rotations\n add_isomorphisms(points.value, isomorphisms, False)\n # Add mirrored rotations\n add_isomorphisms(points.value, isomorphisms, True)\n\n def draw(self, pen, countor_idx=None):\n if countor_idx is None:\n for contour in self.recordings:\n contour.draw(pen)\n else:\n self.recordings[countor_idx].draw(pen)\n\n\ndef test_gen(\n glyphsets,\n glyphs=None,\n names=None,\n ignore_missing=False,\n *,\n locations=None,\n tolerance=DEFAULT_TOLERANCE,\n kinkiness=DEFAULT_KINKINESS,\n upem=DEFAULT_UPEM,\n show_all=False,\n discrete_axes=[],\n):\n if tolerance >= 10:\n tolerance *= 0.01\n assert 0 <= tolerance <= 1\n if kinkiness >= 10:\n kinkiness *= 0.01\n assert 0 <= kinkiness\n\n names = names or [repr(g) for g in glyphsets]\n\n if glyphs is None:\n # `glyphs = glyphsets[0].keys()` is faster, certainly, but doesn't allow for sparse TTFs/OTFs given out of order\n # ... risks the sparse master being the first one, and only processing a subset of the glyphs\n glyphs = {g for glyphset in glyphsets for g in glyphset.keys()}\n\n parents, order = find_parents_and_order(\n glyphsets, locations, discrete_axes=discrete_axes\n )\n\n def grand_parent(i, glyphname):\n if i is None:\n return None\n i = parents[i]\n if i is None:\n return None\n while parents[i] is not None and glyphsets[i][glyphname] is None:\n i = parents[i]\n return i\n\n for glyph_name in glyphs:\n log.info("Testing glyph %s", glyph_name)\n allGlyphs = [Glyph(glyph_name, glyphset) for glyphset in glyphsets]\n if len([1 for glyph in allGlyphs if glyph is not None]) <= 1:\n continue\n for master_idx, (glyph, glyphset, name) in enumerate(\n zip(allGlyphs, glyphsets, names)\n ):\n if glyph.doesnt_exist:\n if not ignore_missing:\n yield (\n glyph_name,\n {\n "type": InterpolatableProblem.MISSING,\n "master": name,\n "master_idx": master_idx,\n },\n )\n continue\n\n has_open = False\n for ix, open in enumerate(glyph.openContours):\n if not open:\n continue\n has_open = True\n yield (\n glyph_name,\n {\n "type": InterpolatableProblem.OPEN_PATH,\n "master": name,\n "master_idx": master_idx,\n "contour": ix,\n },\n )\n if has_open:\n continue\n\n matchings = [None] * len(glyphsets)\n\n for m1idx in order:\n glyph1 = allGlyphs[m1idx]\n if glyph1 is None or not glyph1.nodeTypes:\n continue\n m0idx = grand_parent(m1idx, glyph_name)\n if m0idx is None:\n continue\n glyph0 = allGlyphs[m0idx]\n if glyph0 is None or not glyph0.nodeTypes:\n continue\n\n #\n # Basic compatibility checks\n #\n\n m1 = glyph0.nodeTypes\n m0 = glyph1.nodeTypes\n if len(m0) != len(m1):\n yield (\n glyph_name,\n {\n "type": InterpolatableProblem.PATH_COUNT,\n "master_1": names[m0idx],\n "master_2": names[m1idx],\n "master_1_idx": m0idx,\n "master_2_idx": m1idx,\n "value_1": len(m0),\n "value_2": len(m1),\n },\n )\n continue\n\n if m0 != m1:\n for pathIx, (nodes1, nodes2) in enumerate(zip(m0, m1)):\n if nodes1 == nodes2:\n continue\n if len(nodes1) != len(nodes2):\n yield (\n glyph_name,\n {\n "type": InterpolatableProblem.NODE_COUNT,\n "path": pathIx,\n "master_1": names[m0idx],\n "master_2": names[m1idx],\n "master_1_idx": m0idx,\n "master_2_idx": m1idx,\n "value_1": len(nodes1),\n "value_2": len(nodes2),\n },\n )\n continue\n for nodeIx, (n1, n2) in enumerate(zip(nodes1, nodes2)):\n if n1 != n2:\n yield (\n glyph_name,\n {\n "type": InterpolatableProblem.NODE_INCOMPATIBILITY,\n "path": pathIx,\n "node": nodeIx,\n "master_1": names[m0idx],\n "master_2": names[m1idx],\n "master_1_idx": m0idx,\n "master_2_idx": m1idx,\n "value_1": n1,\n "value_2": n2,\n },\n )\n continue\n\n #\n # InterpolatableProblem.CONTOUR_ORDER check\n #\n\n this_tolerance, matching = test_contour_order(glyph0, glyph1)\n if this_tolerance < tolerance:\n yield (\n glyph_name,\n {\n "type": InterpolatableProblem.CONTOUR_ORDER,\n "master_1": names[m0idx],\n "master_2": names[m1idx],\n "master_1_idx": m0idx,\n "master_2_idx": m1idx,\n "value_1": list(range(len(matching))),\n "value_2": matching,\n "tolerance": this_tolerance,\n },\n )\n matchings[m1idx] = matching\n\n #\n # wrong-start-point / weight check\n #\n\n m0Isomorphisms = glyph0.isomorphisms\n m1Isomorphisms = glyph1.isomorphisms\n m0Vectors = glyph0.greenVectors\n m1Vectors = glyph1.greenVectors\n recording0 = glyph0.recordings\n recording1 = glyph1.recordings\n\n # If contour-order is wrong, adjust it\n matching = matchings[m1idx]\n if (\n matching is not None and m1Isomorphisms\n ): # m1 is empty for composite glyphs\n m1Isomorphisms = [m1Isomorphisms[i] for i in matching]\n m1Vectors = [m1Vectors[i] for i in matching]\n recording1 = [recording1[i] for i in matching]\n\n midRecording = []\n for c0, c1 in zip(recording0, recording1):\n try:\n r = RecordingPen()\n r.value = list(lerpRecordings(c0.value, c1.value))\n midRecording.append(r)\n except ValueError:\n # Mismatch because of the reordering above\n midRecording.append(None)\n\n for ix, (contour0, contour1) in enumerate(\n zip(m0Isomorphisms, m1Isomorphisms)\n ):\n if (\n contour0 is None\n or contour1 is None\n or len(contour0) == 0\n or len(contour0) != len(contour1)\n ):\n # We already reported this; or nothing to do; or not compatible\n # after reordering above.\n continue\n\n this_tolerance, proposed_point, reverse = test_starting_point(\n glyph0, glyph1, ix, tolerance, matching\n )\n\n if this_tolerance < tolerance:\n yield (\n glyph_name,\n {\n "type": InterpolatableProblem.WRONG_START_POINT,\n "contour": ix,\n "master_1": names[m0idx],\n "master_2": names[m1idx],\n "master_1_idx": m0idx,\n "master_2_idx": m1idx,\n "value_1": 0,\n "value_2": proposed_point,\n "reversed": reverse,\n "tolerance": this_tolerance,\n },\n )\n\n # Weight check.\n #\n # If contour could be mid-interpolated, and the two\n # contours have the same area sign, proceeed.\n #\n # The sign difference can happen if it's a weirdo\n # self-intersecting contour; ignore it.\n contour = midRecording[ix]\n\n if contour and (m0Vectors[ix][0] < 0) == (m1Vectors[ix][0] < 0):\n midStats = StatisticsPen(glyphset=None)\n contour.replay(midStats)\n\n midVector = contour_vector_from_stats(midStats)\n\n m0Vec = m0Vectors[ix]\n m1Vec = m1Vectors[ix]\n size0 = m0Vec[0] * m0Vec[0]\n size1 = m1Vec[0] * m1Vec[0]\n midSize = midVector[0] * midVector[0]\n\n for overweight, problem_type in enumerate(\n (\n InterpolatableProblem.UNDERWEIGHT,\n InterpolatableProblem.OVERWEIGHT,\n )\n ):\n if overweight:\n expectedSize = max(size0, size1)\n continue\n else:\n expectedSize = sqrt(size0 * size1)\n\n log.debug(\n "%s: actual size %g; threshold size %g, master sizes: %g, %g",\n problem_type,\n midSize,\n expectedSize,\n size0,\n size1,\n )\n\n if (\n not overweight and expectedSize * tolerance > midSize + 1e-5\n ) or (overweight and 1e-5 + expectedSize / tolerance < midSize):\n try:\n if overweight:\n this_tolerance = expectedSize / midSize\n else:\n this_tolerance = midSize / expectedSize\n except ZeroDivisionError:\n this_tolerance = 0\n log.debug("tolerance %g", this_tolerance)\n yield (\n glyph_name,\n {\n "type": problem_type,\n "contour": ix,\n "master_1": names[m0idx],\n "master_2": names[m1idx],\n "master_1_idx": m0idx,\n "master_2_idx": m1idx,\n "tolerance": this_tolerance,\n },\n )\n\n #\n # "kink" detector\n #\n m0 = glyph0.points\n m1 = glyph1.points\n\n # If contour-order is wrong, adjust it\n if matchings[m1idx] is not None and m1: # m1 is empty for composite glyphs\n m1 = [m1[i] for i in matchings[m1idx]]\n\n t = 0.1 # ~sin(radian(6)) for tolerance 0.95\n deviation_threshold = (\n upem * DEFAULT_KINKINESS_LENGTH * DEFAULT_KINKINESS / kinkiness\n )\n\n for ix, (contour0, contour1) in enumerate(zip(m0, m1)):\n if (\n contour0 is None\n or contour1 is None\n or len(contour0) == 0\n or len(contour0) != len(contour1)\n ):\n # We already reported this; or nothing to do; or not compatible\n # after reordering above.\n continue\n\n # Walk the contour, keeping track of three consecutive points, with\n # middle one being an on-curve. If the three are co-linear then\n # check for kinky-ness.\n for i in range(len(contour0)):\n pt0 = contour0[i]\n pt1 = contour1[i]\n if not pt0[1] or not pt1[1]:\n # Skip off-curves\n continue\n pt0_prev = contour0[i - 1]\n pt1_prev = contour1[i - 1]\n pt0_next = contour0[(i + 1) % len(contour0)]\n pt1_next = contour1[(i + 1) % len(contour1)]\n\n if pt0_prev[1] and pt1_prev[1]:\n # At least one off-curve is required\n continue\n if pt0_prev[1] and pt1_prev[1]:\n # At least one off-curve is required\n continue\n\n pt0 = complex(*pt0[0])\n pt1 = complex(*pt1[0])\n pt0_prev = complex(*pt0_prev[0])\n pt1_prev = complex(*pt1_prev[0])\n pt0_next = complex(*pt0_next[0])\n pt1_next = complex(*pt1_next[0])\n\n # We have three consecutive points. Check whether\n # they are colinear.\n d0_prev = pt0 - pt0_prev\n d0_next = pt0_next - pt0\n d1_prev = pt1 - pt1_prev\n d1_next = pt1_next - pt1\n\n sin0 = d0_prev.real * d0_next.imag - d0_prev.imag * d0_next.real\n sin1 = d1_prev.real * d1_next.imag - d1_prev.imag * d1_next.real\n try:\n sin0 /= abs(d0_prev) * abs(d0_next)\n sin1 /= abs(d1_prev) * abs(d1_next)\n except ZeroDivisionError:\n continue\n\n if abs(sin0) > t or abs(sin1) > t:\n # Not colinear / not smooth.\n continue\n\n # Check the mid-point is actually, well, in the middle.\n dot0 = d0_prev.real * d0_next.real + d0_prev.imag * d0_next.imag\n dot1 = d1_prev.real * d1_next.real + d1_prev.imag * d1_next.imag\n if dot0 < 0 or dot1 < 0:\n # Sharp corner.\n continue\n\n # Fine, if handle ratios are similar...\n r0 = abs(d0_prev) / (abs(d0_prev) + abs(d0_next))\n r1 = abs(d1_prev) / (abs(d1_prev) + abs(d1_next))\n r_diff = abs(r0 - r1)\n if abs(r_diff) < t:\n # Smooth enough.\n continue\n\n mid = (pt0 + pt1) / 2\n mid_prev = (pt0_prev + pt1_prev) / 2\n mid_next = (pt0_next + pt1_next) / 2\n\n mid_d0 = mid - mid_prev\n mid_d1 = mid_next - mid\n\n sin_mid = mid_d0.real * mid_d1.imag - mid_d0.imag * mid_d1.real\n try:\n sin_mid /= abs(mid_d0) * abs(mid_d1)\n except ZeroDivisionError:\n continue\n\n # ...or if the angles are similar.\n if abs(sin_mid) * (tolerance * kinkiness) <= t:\n # Smooth enough.\n continue\n\n # How visible is the kink?\n\n cross = sin_mid * abs(mid_d0) * abs(mid_d1)\n arc_len = abs(mid_d0 + mid_d1)\n deviation = abs(cross / arc_len)\n if deviation < deviation_threshold:\n continue\n deviation_ratio = deviation / arc_len\n if deviation_ratio > t:\n continue\n\n this_tolerance = t / (abs(sin_mid) * kinkiness)\n\n log.debug(\n "kink: deviation %g; deviation_ratio %g; sin_mid %g; r_diff %g",\n deviation,\n deviation_ratio,\n sin_mid,\n r_diff,\n )\n log.debug("tolerance %g", this_tolerance)\n yield (\n glyph_name,\n {\n "type": InterpolatableProblem.KINK,\n "contour": ix,\n "master_1": names[m0idx],\n "master_2": names[m1idx],\n "master_1_idx": m0idx,\n "master_2_idx": m1idx,\n "value": i,\n "tolerance": this_tolerance,\n },\n )\n\n #\n # --show-all\n #\n\n if show_all:\n yield (\n glyph_name,\n {\n "type": InterpolatableProblem.NOTHING,\n "master_1": names[m0idx],\n "master_2": names[m1idx],\n "master_1_idx": m0idx,\n "master_2_idx": m1idx,\n },\n )\n\n\n@wraps(test_gen)\ndef test(*args, **kwargs):\n problems = defaultdict(list)\n for glyphname, problem in test_gen(*args, **kwargs):\n problems[glyphname].append(problem)\n return problems\n\n\ndef recursivelyAddGlyph(glyphname, glyphset, ttGlyphSet, glyf):\n if glyphname in glyphset:\n return\n glyphset[glyphname] = ttGlyphSet[glyphname]\n\n for component in getattr(glyf[glyphname], "components", []):\n recursivelyAddGlyph(component.glyphName, glyphset, ttGlyphSet, glyf)\n\n\ndef ensure_parent_dir(path):\n dirname = os.path.dirname(path)\n if dirname:\n os.makedirs(dirname, exist_ok=True)\n return path\n\n\ndef main(args=None):\n """Test for interpolatability issues between fonts"""\n import argparse\n import sys\n\n parser = argparse.ArgumentParser(\n "fonttools varLib.interpolatable",\n description=main.__doc__,\n )\n parser.add_argument(\n "--glyphs",\n action="store",\n help="Space-separate name of glyphs to check",\n )\n parser.add_argument(\n "--show-all",\n action="store_true",\n help="Show all glyph pairs, even if no problems are found",\n )\n parser.add_argument(\n "--tolerance",\n action="store",\n type=float,\n help="Error tolerance. Between 0 and 1. Default %s" % DEFAULT_TOLERANCE,\n )\n parser.add_argument(\n "--kinkiness",\n action="store",\n type=float,\n help="How aggressively report kinks. Default %s" % DEFAULT_KINKINESS,\n )\n parser.add_argument(\n "--json",\n action="store_true",\n help="Output report in JSON format",\n )\n parser.add_argument(\n "--pdf",\n action="store",\n help="Output report in PDF format",\n )\n parser.add_argument(\n "--ps",\n action="store",\n help="Output report in PostScript format",\n )\n parser.add_argument(\n "--html",\n action="store",\n help="Output report in HTML format",\n )\n parser.add_argument(\n "--quiet",\n action="store_true",\n help="Only exit with code 1 or 0, no output",\n )\n parser.add_argument(\n "--output",\n action="store",\n help="Output file for the problem report; Default: stdout",\n )\n parser.add_argument(\n "--ignore-missing",\n action="store_true",\n help="Will not report glyphs missing from sparse masters as errors",\n )\n parser.add_argument(\n "inputs",\n metavar="FILE",\n type=str,\n nargs="+",\n help="Input a single variable font / DesignSpace / Glyphs file, or multiple TTF/UFO files",\n )\n parser.add_argument(\n "--name",\n metavar="NAME",\n type=str,\n action="append",\n help="Name of the master to use in the report. If not provided, all are used.",\n )\n parser.add_argument("-v", "--verbose", action="store_true", help="Run verbosely.")\n parser.add_argument("--debug", action="store_true", help="Run with debug output.")\n\n args = parser.parse_args(args)\n\n from fontTools import configLogger\n\n configLogger(level=("INFO" if args.verbose else "WARNING"))\n if args.debug:\n configLogger(level="DEBUG")\n\n glyphs = args.glyphs.split() if args.glyphs else None\n\n from os.path import basename\n\n fonts = []\n names = []\n locations = []\n discrete_axes = set()\n upem = DEFAULT_UPEM\n\n original_args_inputs = tuple(args.inputs)\n\n if len(args.inputs) == 1:\n designspace = None\n if args.inputs[0].endswith(".designspace"):\n from fontTools.designspaceLib import DesignSpaceDocument\n\n designspace = DesignSpaceDocument.fromfile(args.inputs[0])\n args.inputs = [master.path for master in designspace.sources]\n locations = [master.location for master in designspace.sources]\n discrete_axes = {\n a.name for a in designspace.axes if not hasattr(a, "minimum")\n }\n axis_triples = {\n a.name: (a.minimum, a.default, a.maximum)\n for a in designspace.axes\n if a.name not in discrete_axes\n }\n axis_mappings = {a.name: a.map for a in designspace.axes}\n axis_triples = {\n k: tuple(piecewiseLinearMap(v, dict(axis_mappings[k])) for v in vv)\n for k, vv in axis_triples.items()\n }\n\n elif args.inputs[0].endswith((".glyphs", ".glyphspackage")):\n from glyphsLib import GSFont, to_designspace\n\n gsfont = GSFont(args.inputs[0])\n upem = gsfont.upm\n designspace = to_designspace(gsfont)\n fonts = [source.font for source in designspace.sources]\n names = ["%s-%s" % (f.info.familyName, f.info.styleName) for f in fonts]\n args.inputs = []\n locations = [master.location for master in designspace.sources]\n axis_triples = {\n a.name: (a.minimum, a.default, a.maximum) for a in designspace.axes\n }\n axis_mappings = {a.name: a.map for a in designspace.axes}\n axis_triples = {\n k: tuple(piecewiseLinearMap(v, dict(axis_mappings[k])) for v in vv)\n for k, vv in axis_triples.items()\n }\n\n elif args.inputs[0].endswith(".ttf") or args.inputs[0].endswith(".otf"):\n from fontTools.ttLib import TTFont\n\n # Is variable font?\n\n font = TTFont(args.inputs[0])\n upem = font["head"].unitsPerEm\n\n fvar = font["fvar"]\n axisMapping = {}\n for axis in fvar.axes:\n axisMapping[axis.axisTag] = {\n -1: axis.minValue,\n 0: axis.defaultValue,\n 1: axis.maxValue,\n }\n normalized = False\n if "avar" in font:\n avar = font["avar"]\n if getattr(avar.table, "VarStore", None):\n axisMapping = {tag: {-1: -1, 0: 0, 1: 1} for tag in axisMapping}\n normalized = True\n else:\n for axisTag, segments in avar.segments.items():\n fvarMapping = axisMapping[axisTag].copy()\n for location, value in segments.items():\n axisMapping[axisTag][value] = piecewiseLinearMap(\n location, fvarMapping\n )\n\n # Gather all glyphs at their "master" locations\n ttGlyphSets = {}\n glyphsets = defaultdict(dict)\n\n if "gvar" in font:\n gvar = font["gvar"]\n glyf = font["glyf"]\n\n if glyphs is None:\n glyphs = sorted(gvar.variations.keys())\n for glyphname in glyphs:\n for var in gvar.variations[glyphname]:\n locDict = {}\n loc = []\n for tag, val in sorted(var.axes.items()):\n locDict[tag] = val[1]\n loc.append((tag, val[1]))\n\n locTuple = tuple(loc)\n if locTuple not in ttGlyphSets:\n ttGlyphSets[locTuple] = font.getGlyphSet(\n location=locDict, normalized=True, recalcBounds=False\n )\n\n recursivelyAddGlyph(\n glyphname, glyphsets[locTuple], ttGlyphSets[locTuple], glyf\n )\n\n elif "CFF2" in font:\n fvarAxes = font["fvar"].axes\n cff2 = font["CFF2"].cff.topDictIndex[0]\n charstrings = cff2.CharStrings\n\n if glyphs is None:\n glyphs = sorted(charstrings.keys())\n for glyphname in glyphs:\n cs = charstrings[glyphname]\n private = cs.private\n\n # Extract vsindex for the glyph\n vsindices = {getattr(private, "vsindex", 0)}\n vsindex = getattr(private, "vsindex", 0)\n last_op = 0\n # The spec says vsindex can only appear once and must be the first\n # operator in the charstring, but we support multiple.\n # https://github.com/harfbuzz/boring-expansion-spec/issues/158\n for op in enumerate(cs.program):\n if op == "blend":\n vsindices.add(vsindex)\n elif op == "vsindex":\n assert isinstance(last_op, int)\n vsindex = last_op\n last_op = op\n\n if not hasattr(private, "vstore"):\n continue\n\n varStore = private.vstore.otVarStore\n for vsindex in vsindices:\n varData = varStore.VarData[vsindex]\n for regionIndex in varData.VarRegionIndex:\n region = varStore.VarRegionList.Region[regionIndex]\n\n locDict = {}\n loc = []\n for axisIndex, axis in enumerate(region.VarRegionAxis):\n tag = fvarAxes[axisIndex].axisTag\n val = axis.PeakCoord\n locDict[tag] = val\n loc.append((tag, val))\n\n locTuple = tuple(loc)\n if locTuple not in ttGlyphSets:\n ttGlyphSets[locTuple] = font.getGlyphSet(\n location=locDict,\n normalized=True,\n recalcBounds=False,\n )\n\n glyphset = glyphsets[locTuple]\n glyphset[glyphname] = ttGlyphSets[locTuple][glyphname]\n\n names = ["''"]\n fonts = [font.getGlyphSet()]\n locations = [{}]\n axis_triples = {a: (-1, 0, +1) for a in sorted(axisMapping.keys())}\n for locTuple in sorted(glyphsets.keys(), key=lambda v: (len(v), v)):\n name = (\n "'"\n + " ".join(\n "%s=%s"\n % (\n k,\n floatToFixedToStr(\n piecewiseLinearMap(v, axisMapping[k]), 14\n ),\n )\n for k, v in locTuple\n )\n + "'"\n )\n if normalized:\n name += " (normalized)"\n names.append(name)\n fonts.append(glyphsets[locTuple])\n locations.append(dict(locTuple))\n\n args.ignore_missing = True\n args.inputs = []\n\n if not locations:\n locations = [{} for _ in fonts]\n\n for filename in args.inputs:\n if filename.endswith(".ufo"):\n from fontTools.ufoLib import UFOReader\n\n font = UFOReader(filename)\n info = SimpleNamespace()\n font.readInfo(info)\n upem = info.unitsPerEm\n fonts.append(font)\n else:\n from fontTools.ttLib import TTFont\n\n font = TTFont(filename)\n upem = font["head"].unitsPerEm\n fonts.append(font)\n\n names.append(basename(filename).rsplit(".", 1)[0])\n\n if len(fonts) < 2:\n log.warning("Font file does not seem to be variable. Nothing to check.")\n return\n\n glyphsets = []\n for font in fonts:\n if hasattr(font, "getGlyphSet"):\n glyphset = font.getGlyphSet()\n else:\n glyphset = font\n glyphsets.append({k: glyphset[k] for k in glyphset.keys()})\n\n if args.name:\n accepted_names = set(args.name)\n glyphsets = [\n glyphset\n for name, glyphset in zip(names, glyphsets)\n if name in accepted_names\n ]\n locations = [\n location\n for name, location in zip(names, locations)\n if name in accepted_names\n ]\n names = [name for name in names if name in accepted_names]\n\n if not glyphs:\n glyphs = sorted(set([gn for glyphset in glyphsets for gn in glyphset.keys()]))\n\n glyphsSet = set(glyphs)\n for glyphset in glyphsets:\n glyphSetGlyphNames = set(glyphset.keys())\n diff = glyphsSet - glyphSetGlyphNames\n if diff:\n for gn in diff:\n glyphset[gn] = None\n\n # Normalize locations\n locations = [\n {\n **normalizeLocation(loc, axis_triples),\n **{k: v for k, v in loc.items() if k in discrete_axes},\n }\n for loc in locations\n ]\n tolerance = args.tolerance or DEFAULT_TOLERANCE\n kinkiness = args.kinkiness if args.kinkiness is not None else DEFAULT_KINKINESS\n\n try:\n log.info("Running on %d glyphsets", len(glyphsets))\n log.info("Locations: %s", pformat(locations))\n problems_gen = test_gen(\n glyphsets,\n glyphs=glyphs,\n names=names,\n locations=locations,\n upem=upem,\n ignore_missing=args.ignore_missing,\n tolerance=tolerance,\n kinkiness=kinkiness,\n show_all=args.show_all,\n discrete_axes=discrete_axes,\n )\n problems = defaultdict(list)\n\n f = (\n sys.stdout\n if args.output is None\n else open(ensure_parent_dir(args.output), "w")\n )\n\n if not args.quiet:\n if args.json:\n import json\n\n for glyphname, problem in problems_gen:\n problems[glyphname].append(problem)\n\n print(json.dumps(problems), file=f)\n else:\n last_glyphname = None\n for glyphname, p in problems_gen:\n problems[glyphname].append(p)\n\n if glyphname != last_glyphname:\n print(f"Glyph {glyphname} was not compatible:", file=f)\n last_glyphname = glyphname\n last_master_idxs = None\n\n master_idxs = (\n (p["master_idx"],)\n if "master_idx" in p\n else (p["master_1_idx"], p["master_2_idx"])\n )\n if master_idxs != last_master_idxs:\n master_names = (\n (p["master"],)\n if "master" in p\n else (p["master_1"], p["master_2"])\n )\n print(f" Masters: %s:" % ", ".join(master_names), file=f)\n last_master_idxs = master_idxs\n\n if p["type"] == InterpolatableProblem.MISSING:\n print(\n " Glyph was missing in master %s" % p["master"], file=f\n )\n elif p["type"] == InterpolatableProblem.OPEN_PATH:\n print(\n " Glyph has an open path in master %s" % p["master"],\n file=f,\n )\n elif p["type"] == InterpolatableProblem.PATH_COUNT:\n print(\n " Path count differs: %i in %s, %i in %s"\n % (\n p["value_1"],\n p["master_1"],\n p["value_2"],\n p["master_2"],\n ),\n file=f,\n )\n elif p["type"] == InterpolatableProblem.NODE_COUNT:\n print(\n " Node count differs in path %i: %i in %s, %i in %s"\n % (\n p["path"],\n p["value_1"],\n p["master_1"],\n p["value_2"],\n p["master_2"],\n ),\n file=f,\n )\n elif p["type"] == InterpolatableProblem.NODE_INCOMPATIBILITY:\n print(\n " Node %o incompatible in path %i: %s in %s, %s in %s"\n % (\n p["node"],\n p["path"],\n p["value_1"],\n p["master_1"],\n p["value_2"],\n p["master_2"],\n ),\n file=f,\n )\n elif p["type"] == InterpolatableProblem.CONTOUR_ORDER:\n print(\n " Contour order differs: %s in %s, %s in %s"\n % (\n p["value_1"],\n p["master_1"],\n p["value_2"],\n p["master_2"],\n ),\n file=f,\n )\n elif p["type"] == InterpolatableProblem.WRONG_START_POINT:\n print(\n " Contour %d start point differs: %s in %s, %s in %s; reversed: %s"\n % (\n p["contour"],\n p["value_1"],\n p["master_1"],\n p["value_2"],\n p["master_2"],\n p["reversed"],\n ),\n file=f,\n )\n elif p["type"] == InterpolatableProblem.UNDERWEIGHT:\n print(\n " Contour %d interpolation is underweight: %s, %s"\n % (\n p["contour"],\n p["master_1"],\n p["master_2"],\n ),\n file=f,\n )\n elif p["type"] == InterpolatableProblem.OVERWEIGHT:\n print(\n " Contour %d interpolation is overweight: %s, %s"\n % (\n p["contour"],\n p["master_1"],\n p["master_2"],\n ),\n file=f,\n )\n elif p["type"] == InterpolatableProblem.KINK:\n print(\n " Contour %d has a kink at %s: %s, %s"\n % (\n p["contour"],\n p["value"],\n p["master_1"],\n p["master_2"],\n ),\n file=f,\n )\n elif p["type"] == InterpolatableProblem.NOTHING:\n print(\n " Showing %s and %s"\n % (\n p["master_1"],\n p["master_2"],\n ),\n file=f,\n )\n else:\n for glyphname, problem in problems_gen:\n problems[glyphname].append(problem)\n\n problems = sort_problems(problems)\n\n for p in "ps", "pdf":\n arg = getattr(args, p)\n if arg is None:\n continue\n log.info("Writing %s to %s", p.upper(), arg)\n from .interpolatablePlot import InterpolatablePS, InterpolatablePDF\n\n PlotterClass = InterpolatablePS if p == "ps" else InterpolatablePDF\n\n with PlotterClass(\n ensure_parent_dir(arg), glyphsets=glyphsets, names=names\n ) as doc:\n doc.add_title_page(\n original_args_inputs, tolerance=tolerance, kinkiness=kinkiness\n )\n if problems:\n doc.add_summary(problems)\n doc.add_problems(problems)\n if not problems and not args.quiet:\n doc.draw_cupcake()\n if problems:\n doc.add_index()\n doc.add_table_of_contents()\n\n if args.html:\n log.info("Writing HTML to %s", args.html)\n from .interpolatablePlot import InterpolatableSVG\n\n svgs = []\n glyph_starts = {}\n with InterpolatableSVG(svgs, glyphsets=glyphsets, names=names) as svg:\n svg.add_title_page(\n original_args_inputs,\n show_tolerance=False,\n tolerance=tolerance,\n kinkiness=kinkiness,\n )\n for glyph, glyph_problems in problems.items():\n glyph_starts[len(svgs)] = glyph\n svg.add_problems(\n {glyph: glyph_problems},\n show_tolerance=False,\n show_page_number=False,\n )\n if not problems and not args.quiet:\n svg.draw_cupcake()\n\n import base64\n\n with open(ensure_parent_dir(args.html), "wb") as f:\n f.write(b"<!DOCTYPE html>\n")\n f.write(\n b'<html><body align="center" style="font-family: sans-serif; text-color: #222">\n'\n )\n f.write(b"<title>fonttools varLib.interpolatable report</title>\n")\n for i, svg in enumerate(svgs):\n if i in glyph_starts:\n f.write(f"<h1>Glyph {glyph_starts[i]}</h1>\n".encode("utf-8"))\n f.write("<img src='data:image/svg+xml;base64,".encode("utf-8"))\n f.write(base64.b64encode(svg))\n f.write(b"' />\n")\n f.write(b"<hr>\n")\n f.write(b"</body></html>\n")\n\n except Exception as e:\n e.args += original_args_inputs\n log.error(e)\n raise\n\n if problems:\n return problems\n\n\nif __name__ == "__main__":\n import sys\n\n problems = main()\n sys.exit(int(bool(problems)))\n
.venv\Lib\site-packages\fontTools\varLib\interpolatable.py
interpolatable.py
Python
46,430
0.95
0.165426
0.058934
python-kit
103
2025-04-27T06:49:30.291165
BSD-3-Clause
false
db17609330a8bbc26b4e3d3a6dc03f8d
from fontTools.ttLib.ttGlyphSet import LerpGlyphSet\nfrom fontTools.pens.basePen import AbstractPen, BasePen, DecomposingPen\nfrom fontTools.pens.pointPen import AbstractPointPen, SegmentToPointPen\nfrom fontTools.pens.recordingPen import RecordingPen, DecomposingRecordingPen\nfrom fontTools.misc.transform import Transform\nfrom collections import defaultdict, deque\nfrom math import sqrt, copysign, atan2, pi\nfrom enum import Enum\nimport itertools\n\nimport logging\n\nlog = logging.getLogger("fontTools.varLib.interpolatable")\n\n\nclass InterpolatableProblem:\n NOTHING = "nothing"\n MISSING = "missing"\n OPEN_PATH = "open_path"\n PATH_COUNT = "path_count"\n NODE_COUNT = "node_count"\n NODE_INCOMPATIBILITY = "node_incompatibility"\n CONTOUR_ORDER = "contour_order"\n WRONG_START_POINT = "wrong_start_point"\n KINK = "kink"\n UNDERWEIGHT = "underweight"\n OVERWEIGHT = "overweight"\n\n severity = {\n MISSING: 1,\n OPEN_PATH: 2,\n PATH_COUNT: 3,\n NODE_COUNT: 4,\n NODE_INCOMPATIBILITY: 5,\n CONTOUR_ORDER: 6,\n WRONG_START_POINT: 7,\n KINK: 8,\n UNDERWEIGHT: 9,\n OVERWEIGHT: 10,\n NOTHING: 11,\n }\n\n\ndef sort_problems(problems):\n """Sort problems by severity, then by glyph name, then by problem message."""\n return dict(\n sorted(\n problems.items(),\n key=lambda _: -min(\n (\n (InterpolatableProblem.severity[p["type"]] + p.get("tolerance", 0))\n for p in _[1]\n ),\n ),\n reverse=True,\n )\n )\n\n\ndef rot_list(l, k):\n """Rotate list by k items forward. Ie. item at position 0 will be\n at position k in returned list. Negative k is allowed."""\n return l[-k:] + l[:-k]\n\n\nclass PerContourPen(BasePen):\n def __init__(self, Pen, glyphset=None):\n BasePen.__init__(self, glyphset)\n self._glyphset = glyphset\n self._Pen = Pen\n self._pen = None\n self.value = []\n\n def _moveTo(self, p0):\n self._newItem()\n self._pen.moveTo(p0)\n\n def _lineTo(self, p1):\n self._pen.lineTo(p1)\n\n def _qCurveToOne(self, p1, p2):\n self._pen.qCurveTo(p1, p2)\n\n def _curveToOne(self, p1, p2, p3):\n self._pen.curveTo(p1, p2, p3)\n\n def _closePath(self):\n self._pen.closePath()\n self._pen = None\n\n def _endPath(self):\n self._pen.endPath()\n self._pen = None\n\n def _newItem(self):\n self._pen = pen = self._Pen()\n self.value.append(pen)\n\n\nclass PerContourOrComponentPen(PerContourPen):\n def addComponent(self, glyphName, transformation):\n self._newItem()\n self.value[-1].addComponent(glyphName, transformation)\n\n\nclass SimpleRecordingPointPen(AbstractPointPen):\n def __init__(self):\n self.value = []\n\n def beginPath(self, identifier=None, **kwargs):\n pass\n\n def endPath(self) -> None:\n pass\n\n def addPoint(self, pt, segmentType=None):\n self.value.append((pt, False if segmentType is None else True))\n\n\ndef vdiff_hypot2(v0, v1):\n s = 0\n for x0, x1 in zip(v0, v1):\n d = x1 - x0\n s += d * d\n return s\n\n\ndef vdiff_hypot2_complex(v0, v1):\n s = 0\n for x0, x1 in zip(v0, v1):\n d = x1 - x0\n s += d.real * d.real + d.imag * d.imag\n # This does the same but seems to be slower:\n # s += (d * d.conjugate()).real\n return s\n\n\ndef matching_cost(G, matching):\n return sum(G[i][j] for i, j in enumerate(matching))\n\n\ndef min_cost_perfect_bipartite_matching_scipy(G):\n n = len(G)\n rows, cols = linear_sum_assignment(G)\n assert (rows == list(range(n))).all()\n # Convert numpy array and integer to Python types,\n # to ensure that this is JSON-serializable.\n cols = list(int(e) for e in cols)\n return list(cols), matching_cost(G, cols)\n\n\ndef min_cost_perfect_bipartite_matching_munkres(G):\n n = len(G)\n cols = [None] * n\n for row, col in Munkres().compute(G):\n cols[row] = col\n return cols, matching_cost(G, cols)\n\n\ndef min_cost_perfect_bipartite_matching_bruteforce(G):\n n = len(G)\n\n if n > 6:\n raise Exception("Install Python module 'munkres' or 'scipy >= 0.17.0'")\n\n # Otherwise just brute-force\n permutations = itertools.permutations(range(n))\n best = list(next(permutations))\n best_cost = matching_cost(G, best)\n for p in permutations:\n cost = matching_cost(G, p)\n if cost < best_cost:\n best, best_cost = list(p), cost\n return best, best_cost\n\n\ntry:\n from scipy.optimize import linear_sum_assignment\n\n min_cost_perfect_bipartite_matching = min_cost_perfect_bipartite_matching_scipy\nexcept ImportError:\n try:\n from munkres import Munkres\n\n min_cost_perfect_bipartite_matching = (\n min_cost_perfect_bipartite_matching_munkres\n )\n except ImportError:\n min_cost_perfect_bipartite_matching = (\n min_cost_perfect_bipartite_matching_bruteforce\n )\n\n\ndef contour_vector_from_stats(stats):\n # Don't change the order of items here.\n # It's okay to add to the end, but otherwise, other\n # code depends on it. Search for "covariance".\n size = sqrt(abs(stats.area))\n return (\n copysign((size), stats.area),\n stats.meanX,\n stats.meanY,\n stats.stddevX * 2,\n stats.stddevY * 2,\n stats.correlation * size,\n )\n\n\ndef matching_for_vectors(m0, m1):\n n = len(m0)\n\n identity_matching = list(range(n))\n\n costs = [[vdiff_hypot2(v0, v1) for v1 in m1] for v0 in m0]\n (\n matching,\n matching_cost,\n ) = min_cost_perfect_bipartite_matching(costs)\n identity_cost = sum(costs[i][i] for i in range(n))\n return matching, matching_cost, identity_cost\n\n\ndef points_characteristic_bits(points):\n bits = 0\n for pt, b in reversed(points):\n bits = (bits << 1) | b\n return bits\n\n\n_NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR = 4\n\n\ndef points_complex_vector(points):\n vector = []\n if not points:\n return vector\n points = [complex(*pt) for pt, _ in points]\n n = len(points)\n assert _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR == 4\n points.extend(points[: _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR - 1])\n while len(points) < _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR:\n points.extend(points[: _NUM_ITEMS_PER_POINTS_COMPLEX_VECTOR - 1])\n for i in range(n):\n # The weights are magic numbers.\n\n # The point itself\n p0 = points[i]\n vector.append(p0)\n\n # The vector to the next point\n p1 = points[i + 1]\n d0 = p1 - p0\n vector.append(d0 * 3)\n\n # The turn vector\n p2 = points[i + 2]\n d1 = p2 - p1\n vector.append(d1 - d0)\n\n # The angle to the next point, as a cross product;\n # Square root of, to match dimentionality of distance.\n cross = d0.real * d1.imag - d0.imag * d1.real\n cross = copysign(sqrt(abs(cross)), cross)\n vector.append(cross * 4)\n\n return vector\n\n\ndef add_isomorphisms(points, isomorphisms, reverse):\n reference_bits = points_characteristic_bits(points)\n n = len(points)\n\n # if points[0][0] == points[-1][0]:\n # abort\n\n if reverse:\n points = points[::-1]\n bits = points_characteristic_bits(points)\n else:\n bits = reference_bits\n\n vector = points_complex_vector(points)\n\n assert len(vector) % n == 0\n mult = len(vector) // n\n mask = (1 << n) - 1\n\n for i in range(n):\n b = ((bits << (n - i)) & mask) | (bits >> i)\n if b == reference_bits:\n isomorphisms.append(\n (rot_list(vector, -i * mult), n - 1 - i if reverse else i, reverse)\n )\n\n\ndef find_parents_and_order(glyphsets, locations, *, discrete_axes=set()):\n parents = [None] + list(range(len(glyphsets) - 1))\n order = list(range(len(glyphsets)))\n if locations:\n # Order base master first\n bases = [\n i\n for i, l in enumerate(locations)\n if all(v == 0 for k, v in l.items() if k not in discrete_axes)\n ]\n if bases:\n logging.info("Found %s base masters: %s", len(bases), bases)\n else:\n logging.warning("No base master location found")\n\n # Form a minimum spanning tree of the locations\n try:\n from scipy.sparse.csgraph import minimum_spanning_tree\n\n graph = [[0] * len(locations) for _ in range(len(locations))]\n axes = set()\n for l in locations:\n axes.update(l.keys())\n axes = sorted(axes)\n vectors = [tuple(l.get(k, 0) for k in axes) for l in locations]\n for i, j in itertools.combinations(range(len(locations)), 2):\n i_discrete_location = {\n k: v for k, v in zip(axes, vectors[i]) if k in discrete_axes\n }\n j_discrete_location = {\n k: v for k, v in zip(axes, vectors[j]) if k in discrete_axes\n }\n if i_discrete_location != j_discrete_location:\n continue\n graph[i][j] = vdiff_hypot2(vectors[i], vectors[j])\n\n tree = minimum_spanning_tree(graph, overwrite=True)\n rows, cols = tree.nonzero()\n graph = defaultdict(set)\n for row, col in zip(rows, cols):\n graph[row].add(col)\n graph[col].add(row)\n\n # Traverse graph from the base and assign parents\n parents = [None] * len(locations)\n order = []\n visited = set()\n queue = deque(bases)\n while queue:\n i = queue.popleft()\n visited.add(i)\n order.append(i)\n for j in sorted(graph[i]):\n if j not in visited:\n parents[j] = i\n queue.append(j)\n assert len(order) == len(\n parents\n ), "Not all masters are reachable; report an issue"\n\n except ImportError:\n pass\n\n log.info("Parents: %s", parents)\n log.info("Order: %s", order)\n return parents, order\n\n\ndef transform_from_stats(stats, inverse=False):\n # https://cookierobotics.com/007/\n a = stats.varianceX\n b = stats.covariance\n c = stats.varianceY\n\n delta = (((a - c) * 0.5) ** 2 + b * b) ** 0.5\n lambda1 = (a + c) * 0.5 + delta # Major eigenvalue\n lambda2 = (a + c) * 0.5 - delta # Minor eigenvalue\n theta = atan2(lambda1 - a, b) if b != 0 else (pi * 0.5 if a < c else 0)\n trans = Transform()\n\n if lambda2 < 0:\n # XXX This is a hack.\n # The problem is that the covariance matrix is singular.\n # This happens when the contour is a line, or a circle.\n # In that case, the covariance matrix is not a good\n # representation of the contour.\n # We should probably detect this earlier and avoid\n # computing the covariance matrix in the first place.\n # But for now, we just avoid the division by zero.\n lambda2 = 0\n\n if inverse:\n trans = trans.translate(-stats.meanX, -stats.meanY)\n trans = trans.rotate(-theta)\n trans = trans.scale(1 / sqrt(lambda1), 1 / sqrt(lambda2))\n else:\n trans = trans.scale(sqrt(lambda1), sqrt(lambda2))\n trans = trans.rotate(theta)\n trans = trans.translate(stats.meanX, stats.meanY)\n\n return trans\n
.venv\Lib\site-packages\fontTools\varLib\interpolatableHelpers.py
interpolatableHelpers.py
Python
11,892
0.95
0.212121
0.088889
awesome-app
610
2025-06-16T14:36:40.599647
MIT
false
0f085e35340074318e70565a67cf7b0c
from .interpolatableHelpers import *\nfrom fontTools.ttLib import TTFont\nfrom fontTools.ttLib.ttGlyphSet import LerpGlyphSet\nfrom fontTools.pens.recordingPen import (\n RecordingPen,\n DecomposingRecordingPen,\n RecordingPointPen,\n)\nfrom fontTools.pens.boundsPen import ControlBoundsPen\nfrom fontTools.pens.cairoPen import CairoPen\nfrom fontTools.pens.pointPen import (\n SegmentToPointPen,\n PointToSegmentPen,\n ReverseContourPointPen,\n)\nfrom fontTools.varLib.interpolatableHelpers import (\n PerContourOrComponentPen,\n SimpleRecordingPointPen,\n)\nfrom itertools import cycle\nfrom functools import wraps\nfrom io import BytesIO\nimport cairo\nimport math\nimport os\nimport logging\n\nlog = logging.getLogger("fontTools.varLib.interpolatable")\n\n\nclass OverridingDict(dict):\n def __init__(self, parent_dict):\n self.parent_dict = parent_dict\n\n def __missing__(self, key):\n return self.parent_dict[key]\n\n\nclass InterpolatablePlot:\n width = 8.5 * 72\n height = 11 * 72\n pad = 0.1 * 72\n title_font_size = 24\n font_size = 16\n page_number = 1\n head_color = (0.3, 0.3, 0.3)\n label_color = (0.2, 0.2, 0.2)\n border_color = (0.9, 0.9, 0.9)\n border_width = 0.5\n fill_color = (0.8, 0.8, 0.8)\n stroke_color = (0.1, 0.1, 0.1)\n stroke_width = 1\n oncurve_node_color = (0, 0.8, 0, 0.7)\n oncurve_node_diameter = 6\n offcurve_node_color = (0, 0.5, 0, 0.7)\n offcurve_node_diameter = 4\n handle_color = (0, 0.5, 0, 0.7)\n handle_width = 0.5\n corrected_start_point_color = (0, 0.9, 0, 0.7)\n corrected_start_point_size = 7\n wrong_start_point_color = (1, 0, 0, 0.7)\n start_point_color = (0, 0, 1, 0.7)\n start_arrow_length = 9\n kink_point_size = 7\n kink_point_color = (1, 0, 1, 0.7)\n kink_circle_size = 15\n kink_circle_stroke_width = 1\n kink_circle_color = (1, 0, 1, 0.7)\n contour_colors = ((1, 0, 0), (0, 0, 1), (0, 1, 0), (1, 1, 0), (1, 0, 1), (0, 1, 1))\n contour_alpha = 0.5\n weight_issue_contour_color = (0, 0, 0, 0.4)\n no_issues_label = "Your font's good! Have a cupcake..."\n no_issues_label_color = (0, 0.5, 0)\n cupcake_color = (0.3, 0, 0.3)\n cupcake = r"""\n ,@.\n ,@.@@,.\n ,@@,.@@@. @.@@@,.\n ,@@. @@@. @@. @@,.\n ,@@@.@,.@. @. @@@@,.@.@@,.\n ,@@.@. @@.@@. @,. .@' @' @@,\n ,@@. @. .@@.@@@. @@' @,\n,@. @@. @,\n@. @,@@,. , .@@,\n@,. .@,@@,. .@@,. , .@@, @, @,\n@. .@. @ @@,. , @\n @,.@@. @,. @@,. @. @,. @'\n @@||@,. @'@,. @@,. @@ @,. @'@@, @'\n \\@@@@' @,. @'@@@@' @@,. @@@' //@@@'\n |||||||| @@,. @@' ||||||| |@@@|@|| ||\n \\\\\\\ ||@@@|| ||||||| ||||||| //\n ||||||| |||||| |||||| |||||| ||\n \\\\\\ |||||| |||||| |||||| //\n |||||| ||||| ||||| ||||| ||\n \\\\\ ||||| ||||| ||||| //\n ||||| |||| ||||| |||| ||\n \\\\ |||| |||| |||| //\n ||||||||||||||||||||||||\n"""\n emoticon_color = (0, 0.3, 0.3)\n shrug = r"""\_(")_/"""\n underweight = r"""\n o\n/|\\n/ \\n"""\n overweight = r"""\n o\n/O\\n/ \\n"""\n yay = r""" \o/ """\n\n def __init__(self, out, glyphsets, names=None, **kwargs):\n self.out = out\n self.glyphsets = glyphsets\n self.names = names or [repr(g) for g in glyphsets]\n self.toc = {}\n\n for k, v in kwargs.items():\n if not hasattr(self, k):\n raise TypeError("Unknown keyword argument: %s" % k)\n setattr(self, k, v)\n\n self.panel_width = self.width / 2 - self.pad * 3\n self.panel_height = (\n self.height / 2 - self.pad * 6 - self.font_size * 2 - self.title_font_size\n )\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n pass\n\n def show_page(self):\n self.page_number += 1\n\n def add_title_page(\n self, files, *, show_tolerance=True, tolerance=None, kinkiness=None\n ):\n pad = self.pad\n width = self.width - 3 * self.pad\n height = self.height - 2 * self.pad\n x = y = pad\n\n self.draw_label(\n "Problem report for:",\n x=x,\n y=y,\n bold=True,\n width=width,\n font_size=self.title_font_size,\n )\n y += self.title_font_size\n\n import hashlib\n\n for file in files:\n base_file = os.path.basename(file)\n y += self.font_size + self.pad\n self.draw_label(base_file, x=x, y=y, bold=True, width=width)\n y += self.font_size + self.pad\n\n try:\n h = hashlib.sha1(open(file, "rb").read()).hexdigest()\n self.draw_label("sha1: %s" % h, x=x + pad, y=y, width=width)\n y += self.font_size\n except IsADirectoryError:\n pass\n\n if file.endswith(".ttf"):\n ttFont = TTFont(file)\n name = ttFont["name"] if "name" in ttFont else None\n if name:\n for what, nameIDs in (\n ("Family name", (21, 16, 1)),\n ("Version", (5,)),\n ):\n n = name.getFirstDebugName(nameIDs)\n if n is None:\n continue\n self.draw_label(\n "%s: %s" % (what, n), x=x + pad, y=y, width=width\n )\n y += self.font_size + self.pad\n elif file.endswith((".glyphs", ".glyphspackage")):\n from glyphsLib import GSFont\n\n f = GSFont(file)\n for what, field in (\n ("Family name", "familyName"),\n ("VersionMajor", "versionMajor"),\n ("VersionMinor", "_versionMinor"),\n ):\n self.draw_label(\n "%s: %s" % (what, getattr(f, field)),\n x=x + pad,\n y=y,\n width=width,\n )\n y += self.font_size + self.pad\n\n self.draw_legend(\n show_tolerance=show_tolerance, tolerance=tolerance, kinkiness=kinkiness\n )\n self.show_page()\n\n def draw_legend(self, *, show_tolerance=True, tolerance=None, kinkiness=None):\n cr = cairo.Context(self.surface)\n\n x = self.pad\n y = self.height - self.pad - self.font_size * 2\n width = self.width - 2 * self.pad\n\n xx = x + self.pad * 2\n xxx = x + self.pad * 4\n\n if show_tolerance:\n self.draw_label(\n "Tolerance: badness; closer to zero the worse", x=xxx, y=y, width=width\n )\n y -= self.pad + self.font_size\n\n self.draw_label("Underweight contours", x=xxx, y=y, width=width)\n cr.rectangle(xx - self.pad * 0.7, y, 1.5 * self.pad, self.font_size)\n cr.set_source_rgb(*self.fill_color)\n cr.fill_preserve()\n if self.stroke_color:\n cr.set_source_rgb(*self.stroke_color)\n cr.set_line_width(self.stroke_width)\n cr.stroke_preserve()\n cr.set_source_rgba(*self.weight_issue_contour_color)\n cr.fill()\n y -= self.pad + self.font_size\n\n self.draw_label(\n "Colored contours: contours with the wrong order", x=xxx, y=y, width=width\n )\n cr.rectangle(xx - self.pad * 0.7, y, 1.5 * self.pad, self.font_size)\n if self.fill_color:\n cr.set_source_rgb(*self.fill_color)\n cr.fill_preserve()\n if self.stroke_color:\n cr.set_source_rgb(*self.stroke_color)\n cr.set_line_width(self.stroke_width)\n cr.stroke_preserve()\n cr.set_source_rgba(*self.contour_colors[0], self.contour_alpha)\n cr.fill()\n y -= self.pad + self.font_size\n\n self.draw_label("Kink artifact", x=xxx, y=y, width=width)\n self.draw_circle(\n cr,\n x=xx,\n y=y + self.font_size * 0.5,\n diameter=self.kink_circle_size,\n stroke_width=self.kink_circle_stroke_width,\n color=self.kink_circle_color,\n )\n y -= self.pad + self.font_size\n\n self.draw_label("Point causing kink in the contour", x=xxx, y=y, width=width)\n self.draw_dot(\n cr,\n x=xx,\n y=y + self.font_size * 0.5,\n diameter=self.kink_point_size,\n color=self.kink_point_color,\n )\n y -= self.pad + self.font_size\n\n self.draw_label("Suggested new contour start point", x=xxx, y=y, width=width)\n self.draw_dot(\n cr,\n x=xx,\n y=y + self.font_size * 0.5,\n diameter=self.corrected_start_point_size,\n color=self.corrected_start_point_color,\n )\n y -= self.pad + self.font_size\n\n self.draw_label(\n "Contour start point in contours with wrong direction",\n x=xxx,\n y=y,\n width=width,\n )\n self.draw_arrow(\n cr,\n x=xx - self.start_arrow_length * 0.3,\n y=y + self.font_size * 0.5,\n color=self.wrong_start_point_color,\n )\n y -= self.pad + self.font_size\n\n self.draw_label(\n "Contour start point when the first two points overlap",\n x=xxx,\n y=y,\n width=width,\n )\n self.draw_dot(\n cr,\n x=xx,\n y=y + self.font_size * 0.5,\n diameter=self.corrected_start_point_size,\n color=self.start_point_color,\n )\n y -= self.pad + self.font_size\n\n self.draw_label("Contour start point and direction", x=xxx, y=y, width=width)\n self.draw_arrow(\n cr,\n x=xx - self.start_arrow_length * 0.3,\n y=y + self.font_size * 0.5,\n color=self.start_point_color,\n )\n y -= self.pad + self.font_size\n\n self.draw_label("Legend:", x=x, y=y, width=width, bold=True)\n y -= self.pad + self.font_size\n\n if kinkiness is not None:\n self.draw_label(\n "Kink-reporting aggressiveness: %g" % kinkiness,\n x=xxx,\n y=y,\n width=width,\n )\n y -= self.pad + self.font_size\n\n if tolerance is not None:\n self.draw_label(\n "Error tolerance: %g" % tolerance,\n x=xxx,\n y=y,\n width=width,\n )\n y -= self.pad + self.font_size\n\n self.draw_label("Parameters:", x=x, y=y, width=width, bold=True)\n y -= self.pad + self.font_size\n\n def add_summary(self, problems):\n pad = self.pad\n width = self.width - 3 * self.pad\n height = self.height - 2 * self.pad\n x = y = pad\n\n self.draw_label(\n "Summary of problems",\n x=x,\n y=y,\n bold=True,\n width=width,\n font_size=self.title_font_size,\n )\n y += self.title_font_size\n\n glyphs_per_problem = defaultdict(set)\n for glyphname, problems in sorted(problems.items()):\n for problem in problems:\n glyphs_per_problem[problem["type"]].add(glyphname)\n\n if "nothing" in glyphs_per_problem:\n del glyphs_per_problem["nothing"]\n\n for problem_type in sorted(\n glyphs_per_problem, key=lambda x: InterpolatableProblem.severity[x]\n ):\n y += self.font_size\n self.draw_label(\n "%s: %d" % (problem_type, len(glyphs_per_problem[problem_type])),\n x=x,\n y=y,\n width=width,\n bold=True,\n )\n y += self.font_size\n\n for glyphname in sorted(glyphs_per_problem[problem_type]):\n if y + self.font_size > height:\n self.show_page()\n y = self.font_size + pad\n self.draw_label(glyphname, x=x + 2 * pad, y=y, width=width - 2 * pad)\n y += self.font_size\n\n self.show_page()\n\n def _add_listing(self, title, items):\n pad = self.pad\n width = self.width - 2 * self.pad\n height = self.height - 2 * self.pad\n x = y = pad\n\n self.draw_label(\n title, x=x, y=y, bold=True, width=width, font_size=self.title_font_size\n )\n y += self.title_font_size + self.pad\n\n last_glyphname = None\n for page_no, (glyphname, problems) in items:\n if glyphname == last_glyphname:\n continue\n last_glyphname = glyphname\n if y + self.font_size > height:\n self.show_page()\n y = self.font_size + pad\n self.draw_label(glyphname, x=x + 5 * pad, y=y, width=width - 2 * pad)\n self.draw_label(str(page_no), x=x, y=y, width=4 * pad, align=1)\n y += self.font_size\n\n self.show_page()\n\n def add_table_of_contents(self):\n self._add_listing("Table of contents", sorted(self.toc.items()))\n\n def add_index(self):\n self._add_listing("Index", sorted(self.toc.items(), key=lambda x: x[1][0]))\n\n def add_problems(self, problems, *, show_tolerance=True, show_page_number=True):\n for glyph, glyph_problems in problems.items():\n last_masters = None\n current_glyph_problems = []\n for p in glyph_problems:\n masters = (\n p["master_idx"]\n if "master_idx" in p\n else (p["master_1_idx"], p["master_2_idx"])\n )\n if masters == last_masters:\n current_glyph_problems.append(p)\n continue\n # Flush\n if current_glyph_problems:\n self.add_problem(\n glyph,\n current_glyph_problems,\n show_tolerance=show_tolerance,\n show_page_number=show_page_number,\n )\n self.show_page()\n current_glyph_problems = []\n last_masters = masters\n current_glyph_problems.append(p)\n if current_glyph_problems:\n self.add_problem(\n glyph,\n current_glyph_problems,\n show_tolerance=show_tolerance,\n show_page_number=show_page_number,\n )\n self.show_page()\n\n def add_problem(\n self, glyphname, problems, *, show_tolerance=True, show_page_number=True\n ):\n if type(problems) not in (list, tuple):\n problems = [problems]\n\n self.toc[self.page_number] = (glyphname, problems)\n\n problem_type = problems[0]["type"]\n problem_types = set(problem["type"] for problem in problems)\n if not all(pt == problem_type for pt in problem_types):\n problem_type = ", ".join(sorted({problem["type"] for problem in problems}))\n\n log.info("Drawing %s: %s", glyphname, problem_type)\n\n master_keys = (\n ("master_idx",)\n if "master_idx" in problems[0]\n else ("master_1_idx", "master_2_idx")\n )\n master_indices = [problems[0][k] for k in master_keys]\n\n if problem_type == InterpolatableProblem.MISSING:\n sample_glyph = next(\n i for i, m in enumerate(self.glyphsets) if m[glyphname] is not None\n )\n master_indices.insert(0, sample_glyph)\n\n x = self.pad\n y = self.pad\n\n self.draw_label(\n "Glyph name: " + glyphname,\n x=x,\n y=y,\n color=self.head_color,\n align=0,\n bold=True,\n font_size=self.title_font_size,\n )\n tolerance = min(p.get("tolerance", 1) for p in problems)\n if tolerance < 1 and show_tolerance:\n self.draw_label(\n "tolerance: %.2f" % tolerance,\n x=x,\n y=y,\n width=self.width - 2 * self.pad,\n align=1,\n bold=True,\n )\n y += self.title_font_size + self.pad\n self.draw_label(\n "Problems: " + problem_type,\n x=x,\n y=y,\n width=self.width - 2 * self.pad,\n color=self.head_color,\n bold=True,\n )\n y += self.font_size + self.pad * 2\n\n scales = []\n for which, master_idx in enumerate(master_indices):\n glyphset = self.glyphsets[master_idx]\n name = self.names[master_idx]\n\n self.draw_label(\n name,\n x=x,\n y=y,\n color=self.label_color,\n width=self.panel_width,\n align=0.5,\n )\n y += self.font_size + self.pad\n\n if glyphset[glyphname] is not None:\n scales.append(\n self.draw_glyph(glyphset, glyphname, problems, which, x=x, y=y)\n )\n else:\n self.draw_emoticon(self.shrug, x=x, y=y)\n y += self.panel_height + self.font_size + self.pad\n\n if any(\n pt\n in (\n InterpolatableProblem.NOTHING,\n InterpolatableProblem.WRONG_START_POINT,\n InterpolatableProblem.CONTOUR_ORDER,\n InterpolatableProblem.KINK,\n InterpolatableProblem.UNDERWEIGHT,\n InterpolatableProblem.OVERWEIGHT,\n )\n for pt in problem_types\n ):\n x = self.pad + self.panel_width + self.pad\n y = self.pad\n y += self.title_font_size + self.pad * 2\n y += self.font_size + self.pad\n\n glyphset1 = self.glyphsets[master_indices[0]]\n glyphset2 = self.glyphsets[master_indices[1]]\n\n # Draw the mid-way of the two masters\n\n self.draw_label(\n "midway interpolation",\n x=x,\n y=y,\n color=self.head_color,\n width=self.panel_width,\n align=0.5,\n )\n y += self.font_size + self.pad\n\n midway_glyphset = LerpGlyphSet(glyphset1, glyphset2)\n self.draw_glyph(\n midway_glyphset,\n glyphname,\n [{"type": "midway"}]\n + [\n p\n for p in problems\n if p["type"]\n in (\n InterpolatableProblem.KINK,\n InterpolatableProblem.UNDERWEIGHT,\n InterpolatableProblem.OVERWEIGHT,\n )\n ],\n None,\n x=x,\n y=y,\n scale=min(scales),\n )\n\n y += self.panel_height + self.font_size + self.pad\n\n if any(\n pt\n in (\n InterpolatableProblem.WRONG_START_POINT,\n InterpolatableProblem.CONTOUR_ORDER,\n InterpolatableProblem.KINK,\n )\n for pt in problem_types\n ):\n # Draw the proposed fix\n\n self.draw_label(\n "proposed fix",\n x=x,\n y=y,\n color=self.head_color,\n width=self.panel_width,\n align=0.5,\n )\n y += self.font_size + self.pad\n\n overriding1 = OverridingDict(glyphset1)\n overriding2 = OverridingDict(glyphset2)\n perContourPen1 = PerContourOrComponentPen(\n RecordingPen, glyphset=overriding1\n )\n perContourPen2 = PerContourOrComponentPen(\n RecordingPen, glyphset=overriding2\n )\n glyphset1[glyphname].draw(perContourPen1)\n glyphset2[glyphname].draw(perContourPen2)\n\n for problem in problems:\n if problem["type"] == InterpolatableProblem.CONTOUR_ORDER:\n fixed_contours = [\n perContourPen2.value[i] for i in problems[0]["value_2"]\n ]\n perContourPen2.value = fixed_contours\n\n for problem in problems:\n if problem["type"] == InterpolatableProblem.WRONG_START_POINT:\n # Save the wrong contours\n wrongContour1 = perContourPen1.value[problem["contour"]]\n wrongContour2 = perContourPen2.value[problem["contour"]]\n\n # Convert the wrong contours to point pens\n points1 = RecordingPointPen()\n converter = SegmentToPointPen(points1, False)\n wrongContour1.replay(converter)\n points2 = RecordingPointPen()\n converter = SegmentToPointPen(points2, False)\n wrongContour2.replay(converter)\n\n proposed_start = problem["value_2"]\n\n # See if we need reversing; fragile but worth a try\n if problem["reversed"]:\n new_points2 = RecordingPointPen()\n reversedPen = ReverseContourPointPen(new_points2)\n points2.replay(reversedPen)\n points2 = new_points2\n proposed_start = len(points2.value) - 2 - proposed_start\n\n # Rotate points2 so that the first point is the same as in points1\n beginPath = points2.value[:1]\n endPath = points2.value[-1:]\n pts = points2.value[1:-1]\n pts = pts[proposed_start:] + pts[:proposed_start]\n points2.value = beginPath + pts + endPath\n\n # Convert the point pens back to segment pens\n segment1 = RecordingPen()\n converter = PointToSegmentPen(segment1, True)\n points1.replay(converter)\n segment2 = RecordingPen()\n converter = PointToSegmentPen(segment2, True)\n points2.replay(converter)\n\n # Replace the wrong contours\n wrongContour1.value = segment1.value\n wrongContour2.value = segment2.value\n perContourPen1.value[problem["contour"]] = wrongContour1\n perContourPen2.value[problem["contour"]] = wrongContour2\n\n for problem in problems:\n # If we have a kink, try to fix it.\n if problem["type"] == InterpolatableProblem.KINK:\n # Save the wrong contours\n wrongContour1 = perContourPen1.value[problem["contour"]]\n wrongContour2 = perContourPen2.value[problem["contour"]]\n\n # Convert the wrong contours to point pens\n points1 = RecordingPointPen()\n converter = SegmentToPointPen(points1, False)\n wrongContour1.replay(converter)\n points2 = RecordingPointPen()\n converter = SegmentToPointPen(points2, False)\n wrongContour2.replay(converter)\n\n i = problem["value"]\n\n # Position points to be around the same ratio\n # beginPath / endPath dance\n j = i + 1\n pt0 = points1.value[j][1][0]\n pt1 = points2.value[j][1][0]\n j_prev = (i - 1) % (len(points1.value) - 2) + 1\n pt0_prev = points1.value[j_prev][1][0]\n pt1_prev = points2.value[j_prev][1][0]\n j_next = (i + 1) % (len(points1.value) - 2) + 1\n pt0_next = points1.value[j_next][1][0]\n pt1_next = points2.value[j_next][1][0]\n\n pt0 = complex(*pt0)\n pt1 = complex(*pt1)\n pt0_prev = complex(*pt0_prev)\n pt1_prev = complex(*pt1_prev)\n pt0_next = complex(*pt0_next)\n pt1_next = complex(*pt1_next)\n\n # Find the ratio of the distance between the points\n r0 = abs(pt0 - pt0_prev) / abs(pt0_next - pt0_prev)\n r1 = abs(pt1 - pt1_prev) / abs(pt1_next - pt1_prev)\n r_mid = (r0 + r1) / 2\n\n pt0 = pt0_prev + r_mid * (pt0_next - pt0_prev)\n pt1 = pt1_prev + r_mid * (pt1_next - pt1_prev)\n\n points1.value[j] = (\n points1.value[j][0],\n (((pt0.real, pt0.imag),) + points1.value[j][1][1:]),\n points1.value[j][2],\n )\n points2.value[j] = (\n points2.value[j][0],\n (((pt1.real, pt1.imag),) + points2.value[j][1][1:]),\n points2.value[j][2],\n )\n\n # Convert the point pens back to segment pens\n segment1 = RecordingPen()\n converter = PointToSegmentPen(segment1, True)\n points1.replay(converter)\n segment2 = RecordingPen()\n converter = PointToSegmentPen(segment2, True)\n points2.replay(converter)\n\n # Replace the wrong contours\n wrongContour1.value = segment1.value\n wrongContour2.value = segment2.value\n\n # Assemble\n fixed1 = RecordingPen()\n fixed2 = RecordingPen()\n for contour in perContourPen1.value:\n fixed1.value.extend(contour.value)\n for contour in perContourPen2.value:\n fixed2.value.extend(contour.value)\n fixed1.draw = fixed1.replay\n fixed2.draw = fixed2.replay\n\n overriding1[glyphname] = fixed1\n overriding2[glyphname] = fixed2\n\n try:\n midway_glyphset = LerpGlyphSet(overriding1, overriding2)\n self.draw_glyph(\n midway_glyphset,\n glyphname,\n {"type": "fixed"},\n None,\n x=x,\n y=y,\n scale=min(scales),\n )\n except ValueError:\n self.draw_emoticon(self.shrug, x=x, y=y)\n y += self.panel_height + self.pad\n\n else:\n emoticon = self.shrug\n if InterpolatableProblem.UNDERWEIGHT in problem_types:\n emoticon = self.underweight\n elif InterpolatableProblem.OVERWEIGHT in problem_types:\n emoticon = self.overweight\n elif InterpolatableProblem.NOTHING in problem_types:\n emoticon = self.yay\n self.draw_emoticon(emoticon, x=x, y=y)\n\n if show_page_number:\n self.draw_label(\n str(self.page_number),\n x=0,\n y=self.height - self.font_size - self.pad,\n width=self.width,\n color=self.head_color,\n align=0.5,\n )\n\n def draw_label(\n self,\n label,\n *,\n x=0,\n y=0,\n color=(0, 0, 0),\n align=0,\n bold=False,\n width=None,\n height=None,\n font_size=None,\n ):\n if width is None:\n width = self.width\n if height is None:\n height = self.height\n if font_size is None:\n font_size = self.font_size\n cr = cairo.Context(self.surface)\n cr.select_font_face(\n "@cairo:",\n cairo.FONT_SLANT_NORMAL,\n cairo.FONT_WEIGHT_BOLD if bold else cairo.FONT_WEIGHT_NORMAL,\n )\n cr.set_font_size(font_size)\n font_extents = cr.font_extents()\n font_size = font_size * font_size / font_extents[2]\n cr.set_font_size(font_size)\n font_extents = cr.font_extents()\n\n cr.set_source_rgb(*color)\n\n extents = cr.text_extents(label)\n if extents.width > width:\n # Shrink\n font_size *= width / extents.width\n cr.set_font_size(font_size)\n font_extents = cr.font_extents()\n extents = cr.text_extents(label)\n\n # Center\n label_x = x + (width - extents.width) * align\n label_y = y + font_extents[0]\n cr.move_to(label_x, label_y)\n cr.show_text(label)\n\n def draw_glyph(self, glyphset, glyphname, problems, which, *, x=0, y=0, scale=None):\n if type(problems) not in (list, tuple):\n problems = [problems]\n\n midway = any(problem["type"] == "midway" for problem in problems)\n problem_type = problems[0]["type"]\n problem_types = set(problem["type"] for problem in problems)\n if not all(pt == problem_type for pt in problem_types):\n problem_type = "mixed"\n glyph = glyphset[glyphname]\n\n recording = RecordingPen()\n glyph.draw(recording)\n decomposedRecording = DecomposingRecordingPen(glyphset)\n glyph.draw(decomposedRecording)\n\n boundsPen = ControlBoundsPen(glyphset)\n decomposedRecording.replay(boundsPen)\n bounds = boundsPen.bounds\n if bounds is None:\n bounds = (0, 0, 0, 0)\n\n glyph_width = bounds[2] - bounds[0]\n glyph_height = bounds[3] - bounds[1]\n\n if glyph_width:\n if scale is None:\n scale = self.panel_width / glyph_width\n else:\n scale = min(scale, self.panel_height / glyph_height)\n if glyph_height:\n if scale is None:\n scale = self.panel_height / glyph_height\n else:\n scale = min(scale, self.panel_height / glyph_height)\n if scale is None:\n scale = 1\n\n cr = cairo.Context(self.surface)\n cr.translate(x, y)\n # Center\n cr.translate(\n (self.panel_width - glyph_width * scale) / 2,\n (self.panel_height - glyph_height * scale) / 2,\n )\n cr.scale(scale, -scale)\n cr.translate(-bounds[0], -bounds[3])\n\n if self.border_color:\n cr.set_source_rgb(*self.border_color)\n cr.rectangle(bounds[0], bounds[1], glyph_width, glyph_height)\n cr.set_line_width(self.border_width / scale)\n cr.stroke()\n\n if self.fill_color or self.stroke_color:\n pen = CairoPen(glyphset, cr)\n decomposedRecording.replay(pen)\n\n if self.fill_color and problem_type != InterpolatableProblem.OPEN_PATH:\n cr.set_source_rgb(*self.fill_color)\n cr.fill_preserve()\n\n if self.stroke_color:\n cr.set_source_rgb(*self.stroke_color)\n cr.set_line_width(self.stroke_width / scale)\n cr.stroke_preserve()\n\n cr.new_path()\n\n if (\n InterpolatableProblem.UNDERWEIGHT in problem_types\n or InterpolatableProblem.OVERWEIGHT in problem_types\n ):\n perContourPen = PerContourOrComponentPen(RecordingPen, glyphset=glyphset)\n recording.replay(perContourPen)\n for problem in problems:\n if problem["type"] in (\n InterpolatableProblem.UNDERWEIGHT,\n InterpolatableProblem.OVERWEIGHT,\n ):\n contour = perContourPen.value[problem["contour"]]\n contour.replay(CairoPen(glyphset, cr))\n cr.set_source_rgba(*self.weight_issue_contour_color)\n cr.fill()\n\n if any(\n t in problem_types\n for t in {\n InterpolatableProblem.NOTHING,\n InterpolatableProblem.NODE_COUNT,\n InterpolatableProblem.NODE_INCOMPATIBILITY,\n }\n ):\n cr.set_line_cap(cairo.LINE_CAP_ROUND)\n\n # Oncurve nodes\n for segment, args in decomposedRecording.value:\n if not args:\n continue\n x, y = args[-1]\n cr.move_to(x, y)\n cr.line_to(x, y)\n cr.set_source_rgba(*self.oncurve_node_color)\n cr.set_line_width(self.oncurve_node_diameter / scale)\n cr.stroke()\n\n # Offcurve nodes\n for segment, args in decomposedRecording.value:\n if not args:\n continue\n for x, y in args[:-1]:\n cr.move_to(x, y)\n cr.line_to(x, y)\n cr.set_source_rgba(*self.offcurve_node_color)\n cr.set_line_width(self.offcurve_node_diameter / scale)\n cr.stroke()\n\n # Handles\n for segment, args in decomposedRecording.value:\n if not args:\n pass\n elif segment in ("moveTo", "lineTo"):\n cr.move_to(*args[0])\n elif segment == "qCurveTo":\n for x, y in args:\n cr.line_to(x, y)\n cr.new_sub_path()\n cr.move_to(*args[-1])\n elif segment == "curveTo":\n cr.line_to(*args[0])\n cr.new_sub_path()\n cr.move_to(*args[1])\n cr.line_to(*args[2])\n cr.new_sub_path()\n cr.move_to(*args[-1])\n else:\n continue\n\n cr.set_source_rgba(*self.handle_color)\n cr.set_line_width(self.handle_width / scale)\n cr.stroke()\n\n matching = None\n for problem in problems:\n if problem["type"] == InterpolatableProblem.CONTOUR_ORDER:\n matching = problem["value_2"]\n colors = cycle(self.contour_colors)\n perContourPen = PerContourOrComponentPen(\n RecordingPen, glyphset=glyphset\n )\n recording.replay(perContourPen)\n for i, contour in enumerate(perContourPen.value):\n if matching[i] == i:\n continue\n color = next(colors)\n contour.replay(CairoPen(glyphset, cr))\n cr.set_source_rgba(*color, self.contour_alpha)\n cr.fill()\n\n for problem in problems:\n if problem["type"] in (\n InterpolatableProblem.NOTHING,\n InterpolatableProblem.WRONG_START_POINT,\n ):\n idx = problem.get("contour")\n\n # Draw suggested point\n if idx is not None and which == 1 and "value_2" in problem:\n perContourPen = PerContourOrComponentPen(\n RecordingPen, glyphset=glyphset\n )\n decomposedRecording.replay(perContourPen)\n points = SimpleRecordingPointPen()\n converter = SegmentToPointPen(points, False)\n perContourPen.value[\n idx if matching is None else matching[idx]\n ].replay(converter)\n targetPoint = points.value[problem["value_2"]][0]\n cr.save()\n cr.translate(*targetPoint)\n cr.scale(1 / scale, 1 / scale)\n self.draw_dot(\n cr,\n diameter=self.corrected_start_point_size,\n color=self.corrected_start_point_color,\n )\n cr.restore()\n\n # Draw start-point arrow\n if which == 0 or not problem.get("reversed"):\n color = self.start_point_color\n else:\n color = self.wrong_start_point_color\n first_pt = None\n i = 0\n cr.save()\n for segment, args in decomposedRecording.value:\n if segment == "moveTo":\n first_pt = args[0]\n continue\n if first_pt is None:\n continue\n if segment == "closePath":\n second_pt = first_pt\n else:\n second_pt = args[0]\n\n if idx is None or i == idx:\n cr.save()\n first_pt = complex(*first_pt)\n second_pt = complex(*second_pt)\n length = abs(second_pt - first_pt)\n cr.translate(first_pt.real, first_pt.imag)\n if length:\n # Draw arrowhead\n cr.rotate(\n math.atan2(\n second_pt.imag - first_pt.imag,\n second_pt.real - first_pt.real,\n )\n )\n cr.scale(1 / scale, 1 / scale)\n self.draw_arrow(cr, color=color)\n else:\n # Draw circle\n cr.scale(1 / scale, 1 / scale)\n self.draw_dot(\n cr,\n diameter=self.corrected_start_point_size,\n color=color,\n )\n cr.restore()\n\n if idx is not None:\n break\n\n first_pt = None\n i += 1\n\n cr.restore()\n\n if problem["type"] == InterpolatableProblem.KINK:\n idx = problem.get("contour")\n perContourPen = PerContourOrComponentPen(\n RecordingPen, glyphset=glyphset\n )\n decomposedRecording.replay(perContourPen)\n points = SimpleRecordingPointPen()\n converter = SegmentToPointPen(points, False)\n perContourPen.value[idx if matching is None else matching[idx]].replay(\n converter\n )\n\n targetPoint = points.value[problem["value"]][0]\n cr.save()\n cr.translate(*targetPoint)\n cr.scale(1 / scale, 1 / scale)\n if midway:\n self.draw_circle(\n cr,\n diameter=self.kink_circle_size,\n stroke_width=self.kink_circle_stroke_width,\n color=self.kink_circle_color,\n )\n else:\n self.draw_dot(\n cr,\n diameter=self.kink_point_size,\n color=self.kink_point_color,\n )\n cr.restore()\n\n return scale\n\n def draw_dot(self, cr, *, x=0, y=0, color=(0, 0, 0), diameter=10):\n cr.save()\n cr.set_line_width(diameter)\n cr.set_line_cap(cairo.LINE_CAP_ROUND)\n cr.move_to(x, y)\n cr.line_to(x, y)\n if len(color) == 3:\n color = color + (1,)\n cr.set_source_rgba(*color)\n cr.stroke()\n cr.restore()\n\n def draw_circle(\n self, cr, *, x=0, y=0, color=(0, 0, 0), diameter=10, stroke_width=1\n ):\n cr.save()\n cr.set_line_width(stroke_width)\n cr.set_line_cap(cairo.LINE_CAP_SQUARE)\n cr.arc(x, y, diameter / 2, 0, 2 * math.pi)\n if len(color) == 3:\n color = color + (1,)\n cr.set_source_rgba(*color)\n cr.stroke()\n cr.restore()\n\n def draw_arrow(self, cr, *, x=0, y=0, color=(0, 0, 0)):\n cr.save()\n if len(color) == 3:\n color = color + (1,)\n cr.set_source_rgba(*color)\n cr.translate(self.start_arrow_length + x, y)\n cr.move_to(0, 0)\n cr.line_to(\n -self.start_arrow_length,\n -self.start_arrow_length * 0.4,\n )\n cr.line_to(\n -self.start_arrow_length,\n self.start_arrow_length * 0.4,\n )\n cr.close_path()\n cr.fill()\n cr.restore()\n\n def draw_text(self, text, *, x=0, y=0, color=(0, 0, 0), width=None, height=None):\n if width is None:\n width = self.width\n if height is None:\n height = self.height\n\n text = text.splitlines()\n cr = cairo.Context(self.surface)\n cr.set_source_rgb(*color)\n cr.set_font_size(self.font_size)\n cr.select_font_face(\n "@cairo:monospace", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL\n )\n text_width = 0\n text_height = 0\n font_extents = cr.font_extents()\n font_font_size = font_extents[2]\n font_ascent = font_extents[0]\n for line in text:\n extents = cr.text_extents(line)\n text_width = max(text_width, extents.x_advance)\n text_height += font_font_size\n if not text_width:\n return\n cr.translate(x, y)\n scale = min(width / text_width, height / text_height)\n # center\n cr.translate(\n (width - text_width * scale) / 2, (height - text_height * scale) / 2\n )\n cr.scale(scale, scale)\n\n cr.translate(0, font_ascent)\n for line in text:\n cr.move_to(0, 0)\n cr.show_text(line)\n cr.translate(0, font_font_size)\n\n def draw_cupcake(self):\n self.draw_label(\n self.no_issues_label,\n x=self.pad,\n y=self.pad,\n color=self.no_issues_label_color,\n width=self.width - 2 * self.pad,\n align=0.5,\n bold=True,\n font_size=self.title_font_size,\n )\n\n self.draw_text(\n self.cupcake,\n x=self.pad,\n y=self.pad + self.font_size,\n width=self.width - 2 * self.pad,\n height=self.height - 2 * self.pad - self.font_size,\n color=self.cupcake_color,\n )\n\n def draw_emoticon(self, emoticon, x=0, y=0):\n self.draw_text(\n emoticon,\n x=x,\n y=y,\n color=self.emoticon_color,\n width=self.panel_width,\n height=self.panel_height,\n )\n\n\nclass InterpolatablePostscriptLike(InterpolatablePlot):\n def __exit__(self, type, value, traceback):\n self.surface.finish()\n\n def show_page(self):\n super().show_page()\n self.surface.show_page()\n\n\nclass InterpolatablePS(InterpolatablePostscriptLike):\n def __enter__(self):\n self.surface = cairo.PSSurface(self.out, self.width, self.height)\n return self\n\n\nclass InterpolatablePDF(InterpolatablePostscriptLike):\n def __enter__(self):\n self.surface = cairo.PDFSurface(self.out, self.width, self.height)\n self.surface.set_metadata(\n cairo.PDF_METADATA_CREATOR, "fonttools varLib.interpolatable"\n )\n self.surface.set_metadata(cairo.PDF_METADATA_CREATE_DATE, "")\n return self\n\n\nclass InterpolatableSVG(InterpolatablePlot):\n def __enter__(self):\n self.sink = BytesIO()\n self.surface = cairo.SVGSurface(self.sink, self.width, self.height)\n return self\n\n def __exit__(self, type, value, traceback):\n if self.surface is not None:\n self.show_page()\n\n def show_page(self):\n super().show_page()\n self.surface.finish()\n self.out.append(self.sink.getvalue())\n self.sink = BytesIO()\n self.surface = cairo.SVGSurface(self.sink, self.width, self.height)\n
.venv\Lib\site-packages\fontTools\varLib\interpolatablePlot.py
interpolatablePlot.py
Python
45,644
0.95
0.130024
0.026714
python-kit
818
2023-08-19T10:24:56.508376
BSD-3-Clause
false
01ec297dd828d18d5bcaa1c8ba0cf7e1
from .interpolatableHelpers import *\nimport logging\n\nlog = logging.getLogger("fontTools.varLib.interpolatable")\n\n\ndef test_contour_order(glyph0, glyph1):\n # We try matching both the StatisticsControlPen vector\n # and the StatisticsPen vector.\n #\n # If either method found a identity matching, accept it.\n # This is crucial for fonts like Kablammo[MORF].ttf and\n # Nabla[EDPT,EHLT].ttf, since they really confuse the\n # StatisticsPen vector because of their area=0 contours.\n\n n = len(glyph0.controlVectors)\n matching = None\n matching_cost = 0\n identity_cost = 0\n done = n <= 1\n if not done:\n m0Control = glyph0.controlVectors\n m1Control = glyph1.controlVectors\n (\n matching_control,\n matching_cost_control,\n identity_cost_control,\n ) = matching_for_vectors(m0Control, m1Control)\n done = matching_cost_control == identity_cost_control\n if not done:\n m0Green = glyph0.greenVectors\n m1Green = glyph1.greenVectors\n (\n matching_green,\n matching_cost_green,\n identity_cost_green,\n ) = matching_for_vectors(m0Green, m1Green)\n done = matching_cost_green == identity_cost_green\n\n if not done:\n # See if reversing contours in one master helps.\n # That's a common problem. Then the wrong_start_point\n # test will fix them.\n #\n # Reverse the sign of the area (0); the rest stay the same.\n if not done:\n m1ControlReversed = [(-m[0],) + m[1:] for m in m1Control]\n (\n matching_control_reversed,\n matching_cost_control_reversed,\n identity_cost_control_reversed,\n ) = matching_for_vectors(m0Control, m1ControlReversed)\n done = matching_cost_control_reversed == identity_cost_control_reversed\n if not done:\n m1GreenReversed = [(-m[0],) + m[1:] for m in m1Green]\n (\n matching_control_reversed,\n matching_cost_green_reversed,\n identity_cost_green_reversed,\n ) = matching_for_vectors(m0Green, m1GreenReversed)\n done = matching_cost_green_reversed == identity_cost_green_reversed\n\n if not done:\n # Otherwise, use the worst of the two matchings.\n if (\n matching_cost_control / identity_cost_control\n < matching_cost_green / identity_cost_green\n ):\n matching = matching_control\n matching_cost = matching_cost_control\n identity_cost = identity_cost_control\n else:\n matching = matching_green\n matching_cost = matching_cost_green\n identity_cost = identity_cost_green\n\n this_tolerance = matching_cost / identity_cost if identity_cost else 1\n log.debug(\n "test-contour-order: tolerance %g",\n this_tolerance,\n )\n return this_tolerance, matching\n
.venv\Lib\site-packages\fontTools\varLib\interpolatableTestContourOrder.py
interpolatableTestContourOrder.py
Python
3,103
0.95
0.170732
0.173333
python-kit
553
2024-02-14T22:28:44.806829
BSD-3-Clause
true
5c6a79f9d1dbb48fd142e95982ed6752
from .interpolatableHelpers import *\n\n\ndef test_starting_point(glyph0, glyph1, ix, tolerance, matching):\n if matching is None:\n matching = list(range(len(glyph0.isomorphisms)))\n contour0 = glyph0.isomorphisms[ix]\n contour1 = glyph1.isomorphisms[matching[ix]]\n m0Vectors = glyph0.greenVectors\n m1Vectors = [glyph1.greenVectors[i] for i in matching]\n\n c0 = contour0[0]\n # Next few lines duplicated below.\n costs = [vdiff_hypot2_complex(c0[0], c1[0]) for c1 in contour1]\n min_cost_idx, min_cost = min(enumerate(costs), key=lambda x: x[1])\n first_cost = costs[0]\n proposed_point = contour1[min_cost_idx][1]\n reverse = contour1[min_cost_idx][2]\n\n if min_cost < first_cost * tolerance:\n # c0 is the first isomorphism of the m0 master\n # contour1 is list of all isomorphisms of the m1 master\n #\n # If the two shapes are both circle-ish and slightly\n # rotated, we detect wrong start point. This is for\n # example the case hundreds of times in\n # RobotoSerif-Italic[GRAD,opsz,wdth,wght].ttf\n #\n # If the proposed point is only one off from the first\n # point (and not reversed), try harder:\n #\n # Find the major eigenvector of the covariance matrix,\n # and rotate the contours by that angle. Then find the\n # closest point again. If it matches this time, let it\n # pass.\n\n num_points = len(glyph1.points[ix])\n leeway = 3\n if not reverse and (\n proposed_point <= leeway or proposed_point >= num_points - leeway\n ):\n # Try harder\n\n # Recover the covariance matrix from the GreenVectors.\n # This is a 2x2 matrix.\n transforms = []\n for vector in (m0Vectors[ix], m1Vectors[ix]):\n meanX = vector[1]\n meanY = vector[2]\n stddevX = vector[3] * 0.5\n stddevY = vector[4] * 0.5\n correlation = vector[5]\n if correlation:\n correlation /= abs(vector[0])\n\n # https://cookierobotics.com/007/\n a = stddevX * stddevX # VarianceX\n c = stddevY * stddevY # VarianceY\n b = correlation * stddevX * stddevY # Covariance\n\n delta = (((a - c) * 0.5) ** 2 + b * b) ** 0.5\n lambda1 = (a + c) * 0.5 + delta # Major eigenvalue\n lambda2 = (a + c) * 0.5 - delta # Minor eigenvalue\n theta = atan2(lambda1 - a, b) if b != 0 else (pi * 0.5 if a < c else 0)\n trans = Transform()\n # Don't translate here. We are working on the complex-vector\n # that includes more than just the points. It's horrible what\n # we are doing anyway...\n # trans = trans.translate(meanX, meanY)\n trans = trans.rotate(theta)\n trans = trans.scale(sqrt(lambda1), sqrt(lambda2))\n transforms.append(trans)\n\n trans = transforms[0]\n new_c0 = (\n [complex(*trans.transformPoint((pt.real, pt.imag))) for pt in c0[0]],\n ) + c0[1:]\n trans = transforms[1]\n new_contour1 = []\n for c1 in contour1:\n new_c1 = (\n [\n complex(*trans.transformPoint((pt.real, pt.imag)))\n for pt in c1[0]\n ],\n ) + c1[1:]\n new_contour1.append(new_c1)\n\n # Next few lines duplicate from above.\n costs = [\n vdiff_hypot2_complex(new_c0[0], new_c1[0]) for new_c1 in new_contour1\n ]\n min_cost_idx, min_cost = min(enumerate(costs), key=lambda x: x[1])\n first_cost = costs[0]\n if min_cost < first_cost * tolerance:\n # Don't report this\n # min_cost = first_cost\n # reverse = False\n # proposed_point = 0 # new_contour1[min_cost_idx][1]\n pass\n\n this_tolerance = min_cost / first_cost if first_cost else 1\n log.debug(\n "test-starting-point: tolerance %g",\n this_tolerance,\n )\n return this_tolerance, proposed_point, reverse\n
.venv\Lib\site-packages\fontTools\varLib\interpolatableTestStartingPoint.py
interpolatableTestStartingPoint.py
Python
4,403
0.95
0.168224
0.302083
react-lib
468
2024-10-30T06:59:09.451822
Apache-2.0
true
5b17d1187243e101efac9b52cdac017a
"""\nInterpolate OpenType Layout tables (GDEF / GPOS / GSUB).\n"""\n\nfrom fontTools.ttLib import TTFont\nfrom fontTools.varLib import models, VarLibError, load_designspace, load_masters\nfrom fontTools.varLib.merger import InstancerMerger\nimport os.path\nimport logging\nfrom copy import deepcopy\nfrom pprint import pformat\n\nlog = logging.getLogger("fontTools.varLib.interpolate_layout")\n\n\ndef interpolate_layout(designspace, loc, master_finder=lambda s: s, mapped=False):\n """\n Interpolate GPOS from a designspace file and location.\n\n If master_finder is set, it should be a callable that takes master\n filename as found in designspace file and map it to master font\n binary as to be opened (eg. .ttf or .otf).\n\n If mapped is False (default), then location is mapped using the\n map element of the axes in designspace file. If mapped is True,\n it is assumed that location is in designspace's internal space and\n no mapping is performed.\n """\n if hasattr(designspace, "sources"): # Assume a DesignspaceDocument\n pass\n else: # Assume a file path\n from fontTools.designspaceLib import DesignSpaceDocument\n\n designspace = DesignSpaceDocument.fromfile(designspace)\n\n ds = load_designspace(designspace)\n log.info("Building interpolated font")\n\n log.info("Loading master fonts")\n master_fonts = load_masters(designspace, master_finder)\n font = deepcopy(master_fonts[ds.base_idx])\n\n log.info("Location: %s", pformat(loc))\n if not mapped:\n loc = {name: ds.axes[name].map_forward(v) for name, v in loc.items()}\n log.info("Internal location: %s", pformat(loc))\n loc = models.normalizeLocation(loc, ds.internal_axis_supports)\n log.info("Normalized location: %s", pformat(loc))\n\n # Assume single-model for now.\n model = models.VariationModel(ds.normalized_master_locs)\n assert 0 == model.mapping[ds.base_idx]\n\n merger = InstancerMerger(font, model, loc)\n\n log.info("Building interpolated tables")\n # TODO GSUB/GDEF\n merger.mergeTables(font, master_fonts, ["GPOS"])\n return font\n\n\ndef main(args=None):\n """Interpolate GDEF/GPOS/GSUB tables for a point on a designspace"""\n from fontTools import configLogger\n import argparse\n import sys\n\n parser = argparse.ArgumentParser(\n "fonttools varLib.interpolate_layout",\n description=main.__doc__,\n )\n parser.add_argument(\n "designspace_filename", metavar="DESIGNSPACE", help="Input TTF files"\n )\n parser.add_argument(\n "locations",\n metavar="LOCATION",\n type=str,\n nargs="+",\n help="Axis locations (e.g. wdth=120",\n )\n parser.add_argument(\n "-o",\n "--output",\n metavar="OUTPUT",\n help="Output font file (defaults to <designspacename>-instance.ttf)",\n )\n parser.add_argument(\n "-l",\n "--loglevel",\n metavar="LEVEL",\n default="INFO",\n help="Logging level (defaults to INFO)",\n )\n\n args = parser.parse_args(args)\n\n if not args.output:\n args.output = os.path.splitext(args.designspace_filename)[0] + "-instance.ttf"\n\n configLogger(level=args.loglevel)\n\n finder = lambda s: s.replace("master_ufo", "master_ttf_interpolatable").replace(\n ".ufo", ".ttf"\n )\n\n loc = {}\n for arg in args.locations:\n tag, val = arg.split("=")\n loc[tag] = float(val)\n\n font = interpolate_layout(args.designspace_filename, loc, finder)\n log.info("Saving font %s", args.output)\n font.save(args.output)\n\n\nif __name__ == "__main__":\n import sys\n\n if len(sys.argv) > 1:\n sys.exit(main())\n import doctest\n\n sys.exit(doctest.testmod().failed)\n
.venv\Lib\site-packages\fontTools\varLib\interpolate_layout.py
interpolate_layout.py
Python
3,813
0.95
0.08871
0.020408
python-kit
261
2024-02-17T11:30:10.746931
GPL-3.0
false
efe9bdf70ede77fdad735089cf907289
MZ
.venv\Lib\site-packages\fontTools\varLib\iup.cp313-win_amd64.pyd
iup.cp313-win_amd64.pyd
Other
130,560
0.75
0.014909
0.012061
awesome-app
419
2023-10-11T18:39:57.015973
BSD-3-Clause
false
8fb1758ceeb0012d477156a1f37c7db0
try:\n import cython\nexcept (AttributeError, ImportError):\n # if cython not installed, use mock module with no-op decorators and types\n from fontTools.misc import cython\nCOMPILED = cython.compiled\n\nfrom typing import (\n Sequence,\n Tuple,\n Union,\n)\nfrom numbers import Integral, Real\n\n\n_Point = Tuple[Real, Real]\n_Delta = Tuple[Real, Real]\n_PointSegment = Sequence[_Point]\n_DeltaSegment = Sequence[_Delta]\n_DeltaOrNone = Union[_Delta, None]\n_DeltaOrNoneSegment = Sequence[_DeltaOrNone]\n_Endpoints = Sequence[Integral]\n\n\nMAX_LOOKBACK = 8\n\n\n@cython.cfunc\n@cython.locals(\n j=cython.int,\n n=cython.int,\n x1=cython.double,\n x2=cython.double,\n d1=cython.double,\n d2=cython.double,\n scale=cython.double,\n x=cython.double,\n d=cython.double,\n)\ndef iup_segment(\n coords: _PointSegment, rc1: _Point, rd1: _Delta, rc2: _Point, rd2: _Delta\n): # -> _DeltaSegment:\n """Given two reference coordinates `rc1` & `rc2` and their respective\n delta vectors `rd1` & `rd2`, returns interpolated deltas for the set of\n coordinates `coords`."""\n\n # rc1 = reference coord 1\n # rd1 = reference delta 1\n out_arrays = [None, None]\n for j in 0, 1:\n out_arrays[j] = out = []\n x1, x2, d1, d2 = rc1[j], rc2[j], rd1[j], rd2[j]\n\n if x1 == x2:\n n = len(coords)\n if d1 == d2:\n out.extend([d1] * n)\n else:\n out.extend([0] * n)\n continue\n\n if x1 > x2:\n x1, x2 = x2, x1\n d1, d2 = d2, d1\n\n # x1 < x2\n scale = (d2 - d1) / (x2 - x1)\n for pair in coords:\n x = pair[j]\n\n if x <= x1:\n d = d1\n elif x >= x2:\n d = d2\n else:\n # Interpolate\n #\n # NOTE: we assign an explicit intermediate variable here in\n # order to disable a fused mul-add optimization. See:\n #\n # - https://godbolt.org/z/YsP4T3TqK,\n # - https://github.com/fonttools/fonttools/issues/3703\n nudge = (x - x1) * scale\n d = d1 + nudge\n\n out.append(d)\n\n return zip(*out_arrays)\n\n\ndef iup_contour(deltas: _DeltaOrNoneSegment, coords: _PointSegment) -> _DeltaSegment:\n """For the contour given in `coords`, interpolate any missing\n delta values in delta vector `deltas`.\n\n Returns fully filled-out delta vector."""\n\n assert len(deltas) == len(coords)\n if None not in deltas:\n return deltas\n\n n = len(deltas)\n # indices of points with explicit deltas\n indices = [i for i, v in enumerate(deltas) if v is not None]\n if not indices:\n # All deltas are None. Return 0,0 for all.\n return [(0, 0)] * n\n\n out = []\n it = iter(indices)\n start = next(it)\n if start != 0:\n # Initial segment that wraps around\n i1, i2, ri1, ri2 = 0, start, start, indices[-1]\n out.extend(\n iup_segment(\n coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]\n )\n )\n out.append(deltas[start])\n for end in it:\n if end - start > 1:\n i1, i2, ri1, ri2 = start + 1, end, start, end\n out.extend(\n iup_segment(\n coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]\n )\n )\n out.append(deltas[end])\n start = end\n if start != n - 1:\n # Final segment that wraps around\n i1, i2, ri1, ri2 = start + 1, n, start, indices[0]\n out.extend(\n iup_segment(\n coords[i1:i2], coords[ri1], deltas[ri1], coords[ri2], deltas[ri2]\n )\n )\n\n assert len(deltas) == len(out), (len(deltas), len(out))\n return out\n\n\ndef iup_delta(\n deltas: _DeltaOrNoneSegment, coords: _PointSegment, ends: _Endpoints\n) -> _DeltaSegment:\n """For the outline given in `coords`, with contour endpoints given\n in sorted increasing order in `ends`, interpolate any missing\n delta values in delta vector `deltas`.\n\n Returns fully filled-out delta vector."""\n\n assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4\n n = len(coords)\n ends = ends + [n - 4, n - 3, n - 2, n - 1]\n out = []\n start = 0\n for end in ends:\n end += 1\n contour = iup_contour(deltas[start:end], coords[start:end])\n out.extend(contour)\n start = end\n\n return out\n\n\n# Optimizer\n\n\n@cython.cfunc\n@cython.inline\n@cython.locals(\n i=cython.int,\n j=cython.int,\n # tolerance=cython.double, # https://github.com/fonttools/fonttools/issues/3282\n x=cython.double,\n y=cython.double,\n p=cython.double,\n q=cython.double,\n)\n@cython.returns(int)\ndef can_iup_in_between(\n deltas: _DeltaSegment,\n coords: _PointSegment,\n i: Integral,\n j: Integral,\n tolerance: Real,\n): # -> bool:\n """Return true if the deltas for points at `i` and `j` (`i < j`) can be\n successfully used to interpolate deltas for points in between them within\n provided error tolerance."""\n\n assert j - i >= 2\n interp = iup_segment(coords[i + 1 : j], coords[i], deltas[i], coords[j], deltas[j])\n deltas = deltas[i + 1 : j]\n\n return all(\n abs(complex(x - p, y - q)) <= tolerance\n for (x, y), (p, q) in zip(deltas, interp)\n )\n\n\n@cython.locals(\n cj=cython.double,\n dj=cython.double,\n lcj=cython.double,\n ldj=cython.double,\n ncj=cython.double,\n ndj=cython.double,\n force=cython.int,\n forced=set,\n)\ndef _iup_contour_bound_forced_set(\n deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0\n) -> set:\n """The forced set is a conservative set of points on the contour that must be encoded\n explicitly (ie. cannot be interpolated). Calculating this set allows for significantly\n speeding up the dynamic-programming, as well as resolve circularity in DP.\n\n The set is precise; that is, if an index is in the returned set, then there is no way\n that IUP can generate delta for that point, given `coords` and `deltas`.\n """\n assert len(deltas) == len(coords)\n\n n = len(deltas)\n forced = set()\n # Track "last" and "next" points on the contour as we sweep.\n for i in range(len(deltas) - 1, -1, -1):\n ld, lc = deltas[i - 1], coords[i - 1]\n d, c = deltas[i], coords[i]\n nd, nc = deltas[i - n + 1], coords[i - n + 1]\n\n for j in (0, 1): # For X and for Y\n cj = c[j]\n dj = d[j]\n lcj = lc[j]\n ldj = ld[j]\n ncj = nc[j]\n ndj = nd[j]\n\n if lcj <= ncj:\n c1, c2 = lcj, ncj\n d1, d2 = ldj, ndj\n else:\n c1, c2 = ncj, lcj\n d1, d2 = ndj, ldj\n\n force = False\n\n # If the two coordinates are the same, then the interpolation\n # algorithm produces the same delta if both deltas are equal,\n # and zero if they differ.\n #\n # This test has to be before the next one.\n if c1 == c2:\n if abs(d1 - d2) > tolerance and abs(dj) > tolerance:\n force = True\n\n # If coordinate for current point is between coordinate of adjacent\n # points on the two sides, but the delta for current point is NOT\n # between delta for those adjacent points (considering tolerance\n # allowance), then there is no way that current point can be IUP-ed.\n # Mark it forced.\n elif c1 <= cj <= c2: # and c1 != c2\n if not (min(d1, d2) - tolerance <= dj <= max(d1, d2) + tolerance):\n force = True\n\n # Otherwise, the delta should either match the closest, or have the\n # same sign as the interpolation of the two deltas.\n else: # cj < c1 or c2 < cj\n if d1 != d2:\n if cj < c1:\n if (\n abs(dj) > tolerance\n and abs(dj - d1) > tolerance\n and ((dj - tolerance < d1) != (d1 < d2))\n ):\n force = True\n else: # c2 < cj\n if (\n abs(dj) > tolerance\n and abs(dj - d2) > tolerance\n and ((d2 < dj + tolerance) != (d1 < d2))\n ):\n force = True\n\n if force:\n forced.add(i)\n break\n\n return forced\n\n\n@cython.locals(\n i=cython.int,\n j=cython.int,\n best_cost=cython.double,\n best_j=cython.int,\n cost=cython.double,\n forced=set,\n tolerance=cython.double,\n)\ndef _iup_contour_optimize_dp(\n deltas: _DeltaSegment,\n coords: _PointSegment,\n forced=set(),\n tolerance: Real = 0,\n lookback: Integral = None,\n):\n """Straightforward Dynamic-Programming. For each index i, find least-costly encoding of\n points 0 to i where i is explicitly encoded. We find this by considering all previous\n explicit points j and check whether interpolation can fill points between j and i.\n\n Note that solution always encodes last point explicitly. Higher-level is responsible\n for removing that restriction.\n\n As major speedup, we stop looking further whenever we see a "forced" point."""\n\n n = len(deltas)\n if lookback is None:\n lookback = n\n lookback = min(lookback, MAX_LOOKBACK)\n costs = {-1: 0}\n chain = {-1: None}\n for i in range(0, n):\n best_cost = costs[i - 1] + 1\n\n costs[i] = best_cost\n chain[i] = i - 1\n\n if i - 1 in forced:\n continue\n\n for j in range(i - 2, max(i - lookback, -2), -1):\n cost = costs[j] + 1\n\n if cost < best_cost and can_iup_in_between(deltas, coords, j, i, tolerance):\n costs[i] = best_cost = cost\n chain[i] = j\n\n if j in forced:\n break\n\n return chain, costs\n\n\ndef _rot_list(l: list, k: int):\n """Rotate list by k items forward. Ie. item at position 0 will be\n at position k in returned list. Negative k is allowed."""\n n = len(l)\n k %= n\n if not k:\n return l\n return l[n - k :] + l[: n - k]\n\n\ndef _rot_set(s: set, k: int, n: int):\n k %= n\n if not k:\n return s\n return {(v + k) % n for v in s}\n\n\ndef iup_contour_optimize(\n deltas: _DeltaSegment, coords: _PointSegment, tolerance: Real = 0.0\n) -> _DeltaOrNoneSegment:\n """For contour with coordinates `coords`, optimize a set of delta\n values `deltas` within error `tolerance`.\n\n Returns delta vector that has most number of None items instead of\n the input delta.\n """\n\n n = len(deltas)\n\n # Get the easy cases out of the way:\n\n # If all are within tolerance distance of 0, encode nothing:\n if all(abs(complex(*p)) <= tolerance for p in deltas):\n return [None] * n\n\n # If there's exactly one point, return it:\n if n == 1:\n return deltas\n\n # If all deltas are exactly the same, return just one (the first one):\n d0 = deltas[0]\n if all(d0 == d for d in deltas):\n return [d0] + [None] * (n - 1)\n\n # Else, solve the general problem using Dynamic Programming.\n\n forced = _iup_contour_bound_forced_set(deltas, coords, tolerance)\n # The _iup_contour_optimize_dp() routine returns the optimal encoding\n # solution given the constraint that the last point is always encoded.\n # To remove this constraint, we use two different methods, depending on\n # whether forced set is non-empty or not:\n\n # Debugging: Make the next if always take the second branch and observe\n # if the font size changes (reduced); that would mean the forced-set\n # has members it should not have.\n if forced:\n # Forced set is non-empty: rotate the contour start point\n # such that the last point in the list is a forced point.\n k = (n - 1) - max(forced)\n assert k >= 0\n\n deltas = _rot_list(deltas, k)\n coords = _rot_list(coords, k)\n forced = _rot_set(forced, k, n)\n\n # Debugging: Pass a set() instead of forced variable to the next call\n # to exercise forced-set computation for under-counting.\n chain, costs = _iup_contour_optimize_dp(deltas, coords, forced, tolerance)\n\n # Assemble solution.\n solution = set()\n i = n - 1\n while i is not None:\n solution.add(i)\n i = chain[i]\n solution.remove(-1)\n\n # if not forced <= solution:\n # print("coord", coords)\n # print("deltas", deltas)\n # print("len", len(deltas))\n assert forced <= solution, (forced, solution)\n\n deltas = [deltas[i] if i in solution else None for i in range(n)]\n\n deltas = _rot_list(deltas, -k)\n else:\n # Repeat the contour an extra time, solve the new case, then look for solutions of the\n # circular n-length problem in the solution for new linear case. I cannot prove that\n # this always produces the optimal solution...\n chain, costs = _iup_contour_optimize_dp(\n deltas + deltas, coords + coords, forced, tolerance, n\n )\n best_sol, best_cost = None, n + 1\n\n for start in range(n - 1, len(costs) - 1):\n # Assemble solution.\n solution = set()\n i = start\n while i > start - n:\n solution.add(i % n)\n i = chain[i]\n if i == start - n:\n cost = costs[start] - costs[start - n]\n if cost <= best_cost:\n best_sol, best_cost = solution, cost\n\n # if not forced <= best_sol:\n # print("coord", coords)\n # print("deltas", deltas)\n # print("len", len(deltas))\n assert forced <= best_sol, (forced, best_sol)\n\n deltas = [deltas[i] if i in best_sol else None for i in range(n)]\n\n return deltas\n\n\ndef iup_delta_optimize(\n deltas: _DeltaSegment,\n coords: _PointSegment,\n ends: _Endpoints,\n tolerance: Real = 0.0,\n) -> _DeltaOrNoneSegment:\n """For the outline given in `coords`, with contour endpoints given\n in sorted increasing order in `ends`, optimize a set of delta\n values `deltas` within error `tolerance`.\n\n Returns delta vector that has most number of None items instead of\n the input delta.\n """\n assert sorted(ends) == ends and len(coords) == (ends[-1] + 1 if ends else 0) + 4\n n = len(coords)\n ends = ends + [n - 4, n - 3, n - 2, n - 1]\n out = []\n start = 0\n for end in ends:\n contour = iup_contour_optimize(\n deltas[start : end + 1], coords[start : end + 1], tolerance\n )\n assert len(contour) == end - start + 1\n out.extend(contour)\n start = end + 1\n\n return out\n
.venv\Lib\site-packages\fontTools\varLib\iup.py
iup.py
Python
15,474
0.95
0.179592
0.14532
python-kit
823
2025-05-01T04:11:20.194199
BSD-3-Clause
false
7353c0a72861e7cca8db61856733550a
"""\nMerge OpenType Layout tables (GDEF / GPOS / GSUB).\n"""\n\nimport os\nimport copy\nimport enum\nfrom operator import ior\nimport logging\nfrom fontTools.colorLib.builder import MAX_PAINT_COLR_LAYER_COUNT, LayerReuseCache\nfrom fontTools.misc import classifyTools\nfrom fontTools.misc.roundTools import otRound\nfrom fontTools.misc.treeTools import build_n_ary_tree\nfrom fontTools.ttLib.tables import otTables as ot\nfrom fontTools.ttLib.tables import otBase as otBase\nfrom fontTools.ttLib.tables.otConverters import BaseFixedValue\nfrom fontTools.ttLib.tables.otTraverse import dfs_base_table\nfrom fontTools.ttLib.tables.DefaultTable import DefaultTable\nfrom fontTools.varLib import builder, models, varStore\nfrom fontTools.varLib.models import nonNone, allNone, allEqual, allEqualTo, subList\nfrom fontTools.varLib.varStore import VarStoreInstancer\nfrom functools import reduce\nfrom fontTools.otlLib.builder import buildSinglePos\nfrom fontTools.otlLib.optimize.gpos import (\n _compression_level_from_env,\n compact_pair_pos,\n)\n\nlog = logging.getLogger("fontTools.varLib.merger")\n\nfrom .errors import (\n ShouldBeConstant,\n FoundANone,\n MismatchedTypes,\n NotANone,\n LengthsDiffer,\n KeysDiffer,\n InconsistentGlyphOrder,\n InconsistentExtensions,\n InconsistentFormats,\n UnsupportedFormat,\n VarLibMergeError,\n)\n\n\nclass Merger(object):\n def __init__(self, font=None):\n self.font = font\n # mergeTables populates this from the parent's master ttfs\n self.ttfs = None\n\n @classmethod\n def merger(celf, clazzes, attrs=(None,)):\n assert celf != Merger, "Subclass Merger instead."\n if "mergers" not in celf.__dict__:\n celf.mergers = {}\n if type(clazzes) in (type, enum.EnumMeta):\n clazzes = (clazzes,)\n if type(attrs) == str:\n attrs = (attrs,)\n\n def wrapper(method):\n assert method.__name__ == "merge"\n done = []\n for clazz in clazzes:\n if clazz in done:\n continue # Support multiple names of a clazz\n done.append(clazz)\n mergers = celf.mergers.setdefault(clazz, {})\n for attr in attrs:\n assert attr not in mergers, (\n "Oops, class '%s' has merge function for '%s' defined already."\n % (clazz.__name__, attr)\n )\n mergers[attr] = method\n return None\n\n return wrapper\n\n @classmethod\n def mergersFor(celf, thing, _default={}):\n typ = type(thing)\n\n for celf in celf.mro():\n mergers = getattr(celf, "mergers", None)\n if mergers is None:\n break\n\n m = celf.mergers.get(typ, None)\n if m is not None:\n return m\n\n return _default\n\n def mergeObjects(self, out, lst, exclude=()):\n if hasattr(out, "ensureDecompiled"):\n out.ensureDecompiled(recurse=False)\n for item in lst:\n if hasattr(item, "ensureDecompiled"):\n item.ensureDecompiled(recurse=False)\n keys = sorted(vars(out).keys())\n if not all(keys == sorted(vars(v).keys()) for v in lst):\n raise KeysDiffer(\n self, expected=keys, got=[sorted(vars(v).keys()) for v in lst]\n )\n mergers = self.mergersFor(out)\n defaultMerger = mergers.get("*", self.__class__.mergeThings)\n try:\n for key in keys:\n if key in exclude:\n continue\n value = getattr(out, key)\n values = [getattr(table, key) for table in lst]\n mergerFunc = mergers.get(key, defaultMerger)\n mergerFunc(self, value, values)\n except VarLibMergeError as e:\n e.stack.append("." + key)\n raise\n\n def mergeLists(self, out, lst):\n if not allEqualTo(out, lst, len):\n raise LengthsDiffer(self, expected=len(out), got=[len(x) for x in lst])\n for i, (value, values) in enumerate(zip(out, zip(*lst))):\n try:\n self.mergeThings(value, values)\n except VarLibMergeError as e:\n e.stack.append("[%d]" % i)\n raise\n\n def mergeThings(self, out, lst):\n if not allEqualTo(out, lst, type):\n raise MismatchedTypes(\n self, expected=type(out).__name__, got=[type(x).__name__ for x in lst]\n )\n mergerFunc = self.mergersFor(out).get(None, None)\n if mergerFunc is not None:\n mergerFunc(self, out, lst)\n elif isinstance(out, enum.Enum):\n # need to special-case Enums as have __dict__ but are not regular 'objects',\n # otherwise mergeObjects/mergeThings get trapped in a RecursionError\n if not allEqualTo(out, lst):\n raise ShouldBeConstant(self, expected=out, got=lst)\n elif hasattr(out, "__dict__"):\n self.mergeObjects(out, lst)\n elif isinstance(out, list):\n self.mergeLists(out, lst)\n else:\n if not allEqualTo(out, lst):\n raise ShouldBeConstant(self, expected=out, got=lst)\n\n def mergeTables(self, font, master_ttfs, tableTags):\n for tag in tableTags:\n if tag not in font:\n continue\n try:\n self.ttfs = master_ttfs\n self.mergeThings(font[tag], [m.get(tag) for m in master_ttfs])\n except VarLibMergeError as e:\n e.stack.append(tag)\n raise\n\n\n#\n# Aligning merger\n#\nclass AligningMerger(Merger):\n pass\n\n\n@AligningMerger.merger(ot.GDEF, "GlyphClassDef")\ndef merge(merger, self, lst):\n if self is None:\n if not allNone(lst):\n raise NotANone(merger, expected=None, got=lst)\n return\n\n lst = [l.classDefs for l in lst]\n self.classDefs = {}\n # We only care about the .classDefs\n self = self.classDefs\n\n allKeys = set()\n allKeys.update(*[l.keys() for l in lst])\n for k in allKeys:\n allValues = nonNone(l.get(k) for l in lst)\n if not allEqual(allValues):\n raise ShouldBeConstant(\n merger, expected=allValues[0], got=lst, stack=["." + k]\n )\n if not allValues:\n self[k] = None\n else:\n self[k] = allValues[0]\n\n\ndef _SinglePosUpgradeToFormat2(self):\n if self.Format == 2:\n return self\n\n ret = ot.SinglePos()\n ret.Format = 2\n ret.Coverage = self.Coverage\n ret.ValueFormat = self.ValueFormat\n ret.Value = [self.Value for _ in ret.Coverage.glyphs]\n ret.ValueCount = len(ret.Value)\n\n return ret\n\n\ndef _merge_GlyphOrders(font, lst, values_lst=None, default=None):\n """Takes font and list of glyph lists (must be sorted by glyph id), and returns\n two things:\n - Combined glyph list,\n - If values_lst is None, return input glyph lists, but padded with None when a glyph\n was missing in a list. Otherwise, return values_lst list-of-list, padded with None\n to match combined glyph lists.\n """\n if values_lst is None:\n dict_sets = [set(l) for l in lst]\n else:\n dict_sets = [{g: v for g, v in zip(l, vs)} for l, vs in zip(lst, values_lst)]\n combined = set()\n combined.update(*dict_sets)\n\n sortKey = font.getReverseGlyphMap().__getitem__\n order = sorted(combined, key=sortKey)\n # Make sure all input glyphsets were in proper order\n if not all(sorted(vs, key=sortKey) == vs for vs in lst):\n raise InconsistentGlyphOrder()\n del combined\n\n paddedValues = None\n if values_lst is None:\n padded = [\n [glyph if glyph in dict_set else default for glyph in order]\n for dict_set in dict_sets\n ]\n else:\n assert len(lst) == len(values_lst)\n padded = [\n [dict_set[glyph] if glyph in dict_set else default for glyph in order]\n for dict_set in dict_sets\n ]\n return order, padded\n\n\n@AligningMerger.merger(otBase.ValueRecord)\ndef merge(merger, self, lst):\n # Code below sometimes calls us with self being\n # a new object. Copy it from lst and recurse.\n self.__dict__ = lst[0].__dict__.copy()\n merger.mergeObjects(self, lst)\n\n\n@AligningMerger.merger(ot.Anchor)\ndef merge(merger, self, lst):\n # Code below sometimes calls us with self being\n # a new object. Copy it from lst and recurse.\n self.__dict__ = lst[0].__dict__.copy()\n merger.mergeObjects(self, lst)\n\n\ndef _Lookup_SinglePos_get_effective_value(merger, subtables, glyph):\n for self in subtables:\n if (\n self is None\n or type(self) != ot.SinglePos\n or self.Coverage is None\n or glyph not in self.Coverage.glyphs\n ):\n continue\n if self.Format == 1:\n return self.Value\n elif self.Format == 2:\n return self.Value[self.Coverage.glyphs.index(glyph)]\n else:\n raise UnsupportedFormat(merger, subtable="single positioning lookup")\n return None\n\n\ndef _Lookup_PairPos_get_effective_value_pair(\n merger, subtables, firstGlyph, secondGlyph\n):\n for self in subtables:\n if (\n self is None\n or type(self) != ot.PairPos\n or self.Coverage is None\n or firstGlyph not in self.Coverage.glyphs\n ):\n continue\n if self.Format == 1:\n ps = self.PairSet[self.Coverage.glyphs.index(firstGlyph)]\n pvr = ps.PairValueRecord\n for rec in pvr: # TODO Speed up\n if rec.SecondGlyph == secondGlyph:\n return rec\n continue\n elif self.Format == 2:\n klass1 = self.ClassDef1.classDefs.get(firstGlyph, 0)\n klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0)\n return self.Class1Record[klass1].Class2Record[klass2]\n else:\n raise UnsupportedFormat(merger, subtable="pair positioning lookup")\n return None\n\n\n@AligningMerger.merger(ot.SinglePos)\ndef merge(merger, self, lst):\n self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0)\n if not (len(lst) == 1 or (valueFormat & ~0xF == 0)):\n raise UnsupportedFormat(merger, subtable="single positioning lookup")\n\n # If all have same coverage table and all are format 1,\n coverageGlyphs = self.Coverage.glyphs\n if all(v.Format == 1 for v in lst) and all(\n coverageGlyphs == v.Coverage.glyphs for v in lst\n ):\n self.Value = otBase.ValueRecord(valueFormat, self.Value)\n if valueFormat != 0:\n # If v.Value is None, it means a kerning of 0; we want\n # it to participate in the model still.\n # https://github.com/fonttools/fonttools/issues/3111\n merger.mergeThings(\n self.Value,\n [v.Value if v.Value is not None else otBase.ValueRecord() for v in lst],\n )\n self.ValueFormat = self.Value.getFormat()\n return\n\n # Upgrade everything to Format=2\n self.Format = 2\n lst = [_SinglePosUpgradeToFormat2(v) for v in lst]\n\n # Align them\n glyphs, padded = _merge_GlyphOrders(\n merger.font, [v.Coverage.glyphs for v in lst], [v.Value for v in lst]\n )\n\n self.Coverage.glyphs = glyphs\n self.Value = [otBase.ValueRecord(valueFormat) for _ in glyphs]\n self.ValueCount = len(self.Value)\n\n for i, values in enumerate(padded):\n for j, glyph in enumerate(glyphs):\n if values[j] is not None:\n continue\n # Fill in value from other subtables\n # Note!!! This *might* result in behavior change if ValueFormat2-zeroedness\n # is different between used subtable and current subtable!\n # TODO(behdad) Check and warn if that happens?\n v = _Lookup_SinglePos_get_effective_value(\n merger, merger.lookup_subtables[i], glyph\n )\n if v is None:\n v = otBase.ValueRecord(valueFormat)\n values[j] = v\n\n merger.mergeLists(self.Value, padded)\n\n # Merge everything else; though, there shouldn't be anything else. :)\n merger.mergeObjects(\n self, lst, exclude=("Format", "Coverage", "Value", "ValueCount", "ValueFormat")\n )\n self.ValueFormat = reduce(\n int.__or__, [v.getEffectiveFormat() for v in self.Value], 0\n )\n\n\n@AligningMerger.merger(ot.PairSet)\ndef merge(merger, self, lst):\n # Align them\n glyphs, padded = _merge_GlyphOrders(\n merger.font,\n [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst],\n [vs.PairValueRecord for vs in lst],\n )\n\n self.PairValueRecord = pvrs = []\n for glyph in glyphs:\n pvr = ot.PairValueRecord()\n pvr.SecondGlyph = glyph\n pvr.Value1 = (\n otBase.ValueRecord(merger.valueFormat1) if merger.valueFormat1 else None\n )\n pvr.Value2 = (\n otBase.ValueRecord(merger.valueFormat2) if merger.valueFormat2 else None\n )\n pvrs.append(pvr)\n self.PairValueCount = len(self.PairValueRecord)\n\n for i, values in enumerate(padded):\n for j, glyph in enumerate(glyphs):\n # Fill in value from other subtables\n v = ot.PairValueRecord()\n v.SecondGlyph = glyph\n if values[j] is not None:\n vpair = values[j]\n else:\n vpair = _Lookup_PairPos_get_effective_value_pair(\n merger, merger.lookup_subtables[i], self._firstGlyph, glyph\n )\n if vpair is None:\n v1, v2 = None, None\n else:\n v1 = getattr(vpair, "Value1", None)\n v2 = getattr(vpair, "Value2", None)\n v.Value1 = (\n otBase.ValueRecord(merger.valueFormat1, src=v1)\n if merger.valueFormat1\n else None\n )\n v.Value2 = (\n otBase.ValueRecord(merger.valueFormat2, src=v2)\n if merger.valueFormat2\n else None\n )\n values[j] = v\n del self._firstGlyph\n\n merger.mergeLists(self.PairValueRecord, padded)\n\n\ndef _PairPosFormat1_merge(self, lst, merger):\n assert allEqual(\n [l.ValueFormat2 == 0 for l in lst if l.PairSet]\n ), "Report bug against fonttools."\n\n # Merge everything else; makes sure Format is the same.\n merger.mergeObjects(\n self,\n lst,\n exclude=("Coverage", "PairSet", "PairSetCount", "ValueFormat1", "ValueFormat2"),\n )\n\n empty = ot.PairSet()\n empty.PairValueRecord = []\n empty.PairValueCount = 0\n\n # Align them\n glyphs, padded = _merge_GlyphOrders(\n merger.font,\n [v.Coverage.glyphs for v in lst],\n [v.PairSet for v in lst],\n default=empty,\n )\n\n self.Coverage.glyphs = glyphs\n self.PairSet = [ot.PairSet() for _ in glyphs]\n self.PairSetCount = len(self.PairSet)\n for glyph, ps in zip(glyphs, self.PairSet):\n ps._firstGlyph = glyph\n\n merger.mergeLists(self.PairSet, padded)\n\n\ndef _ClassDef_invert(self, allGlyphs=None):\n if isinstance(self, dict):\n classDefs = self\n else:\n classDefs = self.classDefs if self and self.classDefs else {}\n m = max(classDefs.values()) if classDefs else 0\n\n ret = []\n for _ in range(m + 1):\n ret.append(set())\n\n for k, v in classDefs.items():\n ret[v].add(k)\n\n # Class-0 is special. It's "everything else".\n if allGlyphs is None:\n ret[0] = None\n else:\n # Limit all classes to glyphs in allGlyphs.\n # Collect anything without a non-zero class into class=zero.\n ret[0] = class0 = set(allGlyphs)\n for s in ret[1:]:\n s.intersection_update(class0)\n class0.difference_update(s)\n\n return ret\n\n\ndef _ClassDef_merge_classify(lst, allGlyphses=None):\n self = ot.ClassDef()\n self.classDefs = classDefs = {}\n allGlyphsesWasNone = allGlyphses is None\n if allGlyphsesWasNone:\n allGlyphses = [None] * len(lst)\n\n classifier = classifyTools.Classifier()\n for classDef, allGlyphs in zip(lst, allGlyphses):\n sets = _ClassDef_invert(classDef, allGlyphs)\n if allGlyphs is None:\n sets = sets[1:]\n classifier.update(sets)\n classes = classifier.getClasses()\n\n if allGlyphsesWasNone:\n classes.insert(0, set())\n\n for i, classSet in enumerate(classes):\n if i == 0:\n continue\n for g in classSet:\n classDefs[g] = i\n\n return self, classes\n\n\ndef _PairPosFormat2_align_matrices(self, lst, font, transparent=False):\n matrices = [l.Class1Record for l in lst]\n\n # Align first classes\n self.ClassDef1, classes = _ClassDef_merge_classify(\n [l.ClassDef1 for l in lst], [l.Coverage.glyphs for l in lst]\n )\n self.Class1Count = len(classes)\n new_matrices = []\n for l, matrix in zip(lst, matrices):\n nullRow = None\n coverage = set(l.Coverage.glyphs)\n classDef1 = l.ClassDef1.classDefs\n class1Records = []\n for classSet in classes:\n exemplarGlyph = next(iter(classSet))\n if exemplarGlyph not in coverage:\n # Follow-up to e6125b353e1f54a0280ded5434b8e40d042de69f,\n # Fixes https://github.com/googlei18n/fontmake/issues/470\n # Again, revert 8d441779e5afc664960d848f62c7acdbfc71d7b9\n # when merger becomes selfless.\n nullRow = None\n if nullRow is None:\n nullRow = ot.Class1Record()\n class2records = nullRow.Class2Record = []\n # TODO: When merger becomes selfless, revert e6125b353e1f54a0280ded5434b8e40d042de69f\n for _ in range(l.Class2Count):\n if transparent:\n rec2 = None\n else:\n rec2 = ot.Class2Record()\n rec2.Value1 = (\n otBase.ValueRecord(self.ValueFormat1)\n if self.ValueFormat1\n else None\n )\n rec2.Value2 = (\n otBase.ValueRecord(self.ValueFormat2)\n if self.ValueFormat2\n else None\n )\n class2records.append(rec2)\n rec1 = nullRow\n else:\n klass = classDef1.get(exemplarGlyph, 0)\n rec1 = matrix[klass] # TODO handle out-of-range?\n class1Records.append(rec1)\n new_matrices.append(class1Records)\n matrices = new_matrices\n del new_matrices\n\n # Align second classes\n self.ClassDef2, classes = _ClassDef_merge_classify([l.ClassDef2 for l in lst])\n self.Class2Count = len(classes)\n new_matrices = []\n for l, matrix in zip(lst, matrices):\n classDef2 = l.ClassDef2.classDefs\n class1Records = []\n for rec1old in matrix:\n oldClass2Records = rec1old.Class2Record\n rec1new = ot.Class1Record()\n class2Records = rec1new.Class2Record = []\n for classSet in classes:\n if not classSet: # class=0\n rec2 = oldClass2Records[0]\n else:\n exemplarGlyph = next(iter(classSet))\n klass = classDef2.get(exemplarGlyph, 0)\n rec2 = oldClass2Records[klass]\n class2Records.append(copy.deepcopy(rec2))\n class1Records.append(rec1new)\n new_matrices.append(class1Records)\n matrices = new_matrices\n del new_matrices\n\n return matrices\n\n\ndef _PairPosFormat2_merge(self, lst, merger):\n assert allEqual(\n [l.ValueFormat2 == 0 for l in lst if l.Class1Record]\n ), "Report bug against fonttools."\n\n merger.mergeObjects(\n self,\n lst,\n exclude=(\n "Coverage",\n "ClassDef1",\n "Class1Count",\n "ClassDef2",\n "Class2Count",\n "Class1Record",\n "ValueFormat1",\n "ValueFormat2",\n ),\n )\n\n # Align coverages\n glyphs, _ = _merge_GlyphOrders(merger.font, [v.Coverage.glyphs for v in lst])\n self.Coverage.glyphs = glyphs\n\n # Currently, if the coverage of PairPosFormat2 subtables are different,\n # we do NOT bother walking down the subtable list when filling in new\n # rows for alignment. As such, this is only correct if current subtable\n # is the last subtable in the lookup. Ensure that.\n #\n # Note that our canonicalization process merges trailing PairPosFormat2's,\n # so in reality this is rare.\n for l, subtables in zip(lst, merger.lookup_subtables):\n if l.Coverage.glyphs != glyphs:\n assert l == subtables[-1]\n\n matrices = _PairPosFormat2_align_matrices(self, lst, merger.font)\n\n self.Class1Record = list(matrices[0]) # TODO move merger to be selfless\n merger.mergeLists(self.Class1Record, matrices)\n\n\n@AligningMerger.merger(ot.PairPos)\ndef merge(merger, self, lst):\n merger.valueFormat1 = self.ValueFormat1 = reduce(\n int.__or__, [l.ValueFormat1 for l in lst], 0\n )\n merger.valueFormat2 = self.ValueFormat2 = reduce(\n int.__or__, [l.ValueFormat2 for l in lst], 0\n )\n\n if self.Format == 1:\n _PairPosFormat1_merge(self, lst, merger)\n elif self.Format == 2:\n _PairPosFormat2_merge(self, lst, merger)\n else:\n raise UnsupportedFormat(merger, subtable="pair positioning lookup")\n\n del merger.valueFormat1, merger.valueFormat2\n\n # Now examine the list of value records, and update to the union of format values,\n # as merge might have created new values.\n vf1 = 0\n vf2 = 0\n if self.Format == 1:\n for pairSet in self.PairSet:\n for pairValueRecord in pairSet.PairValueRecord:\n pv1 = getattr(pairValueRecord, "Value1", None)\n if pv1 is not None:\n vf1 |= pv1.getFormat()\n pv2 = getattr(pairValueRecord, "Value2", None)\n if pv2 is not None:\n vf2 |= pv2.getFormat()\n elif self.Format == 2:\n for class1Record in self.Class1Record:\n for class2Record in class1Record.Class2Record:\n pv1 = getattr(class2Record, "Value1", None)\n if pv1 is not None:\n vf1 |= pv1.getFormat()\n pv2 = getattr(class2Record, "Value2", None)\n if pv2 is not None:\n vf2 |= pv2.getFormat()\n self.ValueFormat1 = vf1\n self.ValueFormat2 = vf2\n\n\ndef _MarkBasePosFormat1_merge(self, lst, merger, Mark="Mark", Base="Base"):\n self.ClassCount = max(l.ClassCount for l in lst)\n\n MarkCoverageGlyphs, MarkRecords = _merge_GlyphOrders(\n merger.font,\n [getattr(l, Mark + "Coverage").glyphs for l in lst],\n [getattr(l, Mark + "Array").MarkRecord for l in lst],\n )\n getattr(self, Mark + "Coverage").glyphs = MarkCoverageGlyphs\n\n BaseCoverageGlyphs, BaseRecords = _merge_GlyphOrders(\n merger.font,\n [getattr(l, Base + "Coverage").glyphs for l in lst],\n [getattr(getattr(l, Base + "Array"), Base + "Record") for l in lst],\n )\n getattr(self, Base + "Coverage").glyphs = BaseCoverageGlyphs\n\n # MarkArray\n records = []\n for g, glyphRecords in zip(MarkCoverageGlyphs, zip(*MarkRecords)):\n allClasses = [r.Class for r in glyphRecords if r is not None]\n\n # TODO Right now we require that all marks have same class in\n # all masters that cover them. This is not required.\n #\n # We can relax that by just requiring that all marks that have\n # the same class in a master, have the same class in every other\n # master. Indeed, if, say, a sparse master only covers one mark,\n # that mark probably will get class 0, which would possibly be\n # different from its class in other masters.\n #\n # We can even go further and reclassify marks to support any\n # input. But, since, it's unlikely that two marks being both,\n # say, "top" in one master, and one being "top" and other being\n # "top-right" in another master, we shouldn't do that, as any\n # failures in that case will probably signify mistakes in the\n # input masters.\n\n if not allEqual(allClasses):\n raise ShouldBeConstant(merger, expected=allClasses[0], got=allClasses)\n else:\n rec = ot.MarkRecord()\n rec.Class = allClasses[0]\n allAnchors = [None if r is None else r.MarkAnchor for r in glyphRecords]\n if allNone(allAnchors):\n anchor = None\n else:\n anchor = ot.Anchor()\n anchor.Format = 1\n merger.mergeThings(anchor, allAnchors)\n rec.MarkAnchor = anchor\n records.append(rec)\n array = ot.MarkArray()\n array.MarkRecord = records\n array.MarkCount = len(records)\n setattr(self, Mark + "Array", array)\n\n # BaseArray\n records = []\n for g, glyphRecords in zip(BaseCoverageGlyphs, zip(*BaseRecords)):\n if allNone(glyphRecords):\n rec = None\n else:\n rec = getattr(ot, Base + "Record")()\n anchors = []\n setattr(rec, Base + "Anchor", anchors)\n glyphAnchors = [\n [] if r is None else getattr(r, Base + "Anchor") for r in glyphRecords\n ]\n for l in glyphAnchors:\n l.extend([None] * (self.ClassCount - len(l)))\n for allAnchors in zip(*glyphAnchors):\n if allNone(allAnchors):\n anchor = None\n else:\n anchor = ot.Anchor()\n anchor.Format = 1\n merger.mergeThings(anchor, allAnchors)\n anchors.append(anchor)\n records.append(rec)\n array = getattr(ot, Base + "Array")()\n setattr(array, Base + "Record", records)\n setattr(array, Base + "Count", len(records))\n setattr(self, Base + "Array", array)\n\n\n@AligningMerger.merger(ot.MarkBasePos)\ndef merge(merger, self, lst):\n if not allEqualTo(self.Format, (l.Format for l in lst)):\n raise InconsistentFormats(\n merger,\n subtable="mark-to-base positioning lookup",\n expected=self.Format,\n got=[l.Format for l in lst],\n )\n if self.Format == 1:\n _MarkBasePosFormat1_merge(self, lst, merger)\n else:\n raise UnsupportedFormat(merger, subtable="mark-to-base positioning lookup")\n\n\n@AligningMerger.merger(ot.MarkMarkPos)\ndef merge(merger, self, lst):\n if not allEqualTo(self.Format, (l.Format for l in lst)):\n raise InconsistentFormats(\n merger,\n subtable="mark-to-mark positioning lookup",\n expected=self.Format,\n got=[l.Format for l in lst],\n )\n if self.Format == 1:\n _MarkBasePosFormat1_merge(self, lst, merger, "Mark1", "Mark2")\n else:\n raise UnsupportedFormat(merger, subtable="mark-to-mark positioning lookup")\n\n\ndef _PairSet_flatten(lst, font):\n self = ot.PairSet()\n self.Coverage = ot.Coverage()\n\n # Align them\n glyphs, padded = _merge_GlyphOrders(\n font,\n [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst],\n [vs.PairValueRecord for vs in lst],\n )\n\n self.Coverage.glyphs = glyphs\n self.PairValueRecord = pvrs = []\n for values in zip(*padded):\n for v in values:\n if v is not None:\n pvrs.append(v)\n break\n else:\n assert False\n self.PairValueCount = len(self.PairValueRecord)\n\n return self\n\n\ndef _Lookup_PairPosFormat1_subtables_flatten(lst, font):\n assert allEqual(\n [l.ValueFormat2 == 0 for l in lst if l.PairSet]\n ), "Report bug against fonttools."\n\n self = ot.PairPos()\n self.Format = 1\n self.Coverage = ot.Coverage()\n self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)\n self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)\n\n # Align them\n glyphs, padded = _merge_GlyphOrders(\n font, [v.Coverage.glyphs for v in lst], [v.PairSet for v in lst]\n )\n\n self.Coverage.glyphs = glyphs\n self.PairSet = [\n _PairSet_flatten([v for v in values if v is not None], font)\n for values in zip(*padded)\n ]\n self.PairSetCount = len(self.PairSet)\n return self\n\n\ndef _Lookup_PairPosFormat2_subtables_flatten(lst, font):\n assert allEqual(\n [l.ValueFormat2 == 0 for l in lst if l.Class1Record]\n ), "Report bug against fonttools."\n\n self = ot.PairPos()\n self.Format = 2\n self.Coverage = ot.Coverage()\n self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0)\n self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0)\n\n # Align them\n glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst])\n self.Coverage.glyphs = glyphs\n\n matrices = _PairPosFormat2_align_matrices(self, lst, font, transparent=True)\n\n matrix = self.Class1Record = []\n for rows in zip(*matrices):\n row = ot.Class1Record()\n matrix.append(row)\n row.Class2Record = []\n row = row.Class2Record\n for cols in zip(*list(r.Class2Record for r in rows)):\n col = next(iter(c for c in cols if c is not None))\n row.append(col)\n\n return self\n\n\ndef _Lookup_PairPos_subtables_canonicalize(lst, font):\n """Merge multiple Format1 subtables at the beginning of lst,\n and merge multiple consecutive Format2 subtables that have the same\n Class2 (ie. were split because of offset overflows). Returns new list."""\n lst = list(lst)\n\n l = len(lst)\n i = 0\n while i < l and lst[i].Format == 1:\n i += 1\n lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)]\n\n l = len(lst)\n i = l\n while i > 0 and lst[i - 1].Format == 2:\n i -= 1\n lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)]\n\n return lst\n\n\ndef _Lookup_SinglePos_subtables_flatten(lst, font, min_inclusive_rec_format):\n glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst], None)\n num_glyphs = len(glyphs)\n new = ot.SinglePos()\n new.Format = 2\n new.ValueFormat = min_inclusive_rec_format\n new.Coverage = ot.Coverage()\n new.Coverage.glyphs = glyphs\n new.ValueCount = num_glyphs\n new.Value = [None] * num_glyphs\n for singlePos in lst:\n if singlePos.Format == 1:\n val_rec = singlePos.Value\n for gname in singlePos.Coverage.glyphs:\n i = glyphs.index(gname)\n new.Value[i] = copy.deepcopy(val_rec)\n elif singlePos.Format == 2:\n for j, gname in enumerate(singlePos.Coverage.glyphs):\n val_rec = singlePos.Value[j]\n i = glyphs.index(gname)\n new.Value[i] = copy.deepcopy(val_rec)\n return [new]\n\n\n@AligningMerger.merger(ot.CursivePos)\ndef merge(merger, self, lst):\n # Align them\n glyphs, padded = _merge_GlyphOrders(\n merger.font,\n [l.Coverage.glyphs for l in lst],\n [l.EntryExitRecord for l in lst],\n )\n\n self.Format = 1\n self.Coverage = ot.Coverage()\n self.Coverage.glyphs = glyphs\n self.EntryExitRecord = []\n for _ in glyphs:\n rec = ot.EntryExitRecord()\n rec.EntryAnchor = ot.Anchor()\n rec.EntryAnchor.Format = 1\n rec.ExitAnchor = ot.Anchor()\n rec.ExitAnchor.Format = 1\n self.EntryExitRecord.append(rec)\n merger.mergeLists(self.EntryExitRecord, padded)\n self.EntryExitCount = len(self.EntryExitRecord)\n\n\n@AligningMerger.merger(ot.EntryExitRecord)\ndef merge(merger, self, lst):\n if all(master.EntryAnchor is None for master in lst):\n self.EntryAnchor = None\n if all(master.ExitAnchor is None for master in lst):\n self.ExitAnchor = None\n merger.mergeObjects(self, lst)\n\n\n@AligningMerger.merger(ot.Lookup)\ndef merge(merger, self, lst):\n subtables = merger.lookup_subtables = [l.SubTable for l in lst]\n\n # Remove Extension subtables\n for l, sts in list(zip(lst, subtables)) + [(self, self.SubTable)]:\n if not sts:\n continue\n if sts[0].__class__.__name__.startswith("Extension"):\n if not allEqual([st.__class__ for st in sts]):\n raise InconsistentExtensions(\n merger,\n expected="Extension",\n got=[st.__class__.__name__ for st in sts],\n )\n if not allEqual([st.ExtensionLookupType for st in sts]):\n raise InconsistentExtensions(merger)\n l.LookupType = sts[0].ExtensionLookupType\n new_sts = [st.ExtSubTable for st in sts]\n del sts[:]\n sts.extend(new_sts)\n\n isPairPos = self.SubTable and isinstance(self.SubTable[0], ot.PairPos)\n\n if isPairPos:\n # AFDKO and feaLib sometimes generate two Format1 subtables instead of one.\n # Merge those before continuing.\n # https://github.com/fonttools/fonttools/issues/719\n self.SubTable = _Lookup_PairPos_subtables_canonicalize(\n self.SubTable, merger.font\n )\n subtables = merger.lookup_subtables = [\n _Lookup_PairPos_subtables_canonicalize(st, merger.font) for st in subtables\n ]\n else:\n isSinglePos = self.SubTable and isinstance(self.SubTable[0], ot.SinglePos)\n if isSinglePos:\n numSubtables = [len(st) for st in subtables]\n if not all([nums == numSubtables[0] for nums in numSubtables]):\n # Flatten list of SinglePos subtables to single Format 2 subtable,\n # with all value records set to the rec format type.\n # We use buildSinglePos() to optimize the lookup after merging.\n valueFormatList = [t.ValueFormat for st in subtables for t in st]\n # Find the minimum value record that can accomodate all the singlePos subtables.\n mirf = reduce(ior, valueFormatList)\n self.SubTable = _Lookup_SinglePos_subtables_flatten(\n self.SubTable, merger.font, mirf\n )\n subtables = merger.lookup_subtables = [\n _Lookup_SinglePos_subtables_flatten(st, merger.font, mirf)\n for st in subtables\n ]\n flattened = True\n else:\n flattened = False\n\n merger.mergeLists(self.SubTable, subtables)\n self.SubTableCount = len(self.SubTable)\n\n if isPairPos:\n # If format-1 subtable created during canonicalization is empty, remove it.\n assert len(self.SubTable) >= 1 and self.SubTable[0].Format == 1\n if not self.SubTable[0].Coverage.glyphs:\n self.SubTable.pop(0)\n self.SubTableCount -= 1\n\n # If format-2 subtable created during canonicalization is empty, remove it.\n assert len(self.SubTable) >= 1 and self.SubTable[-1].Format == 2\n if not self.SubTable[-1].Coverage.glyphs:\n self.SubTable.pop(-1)\n self.SubTableCount -= 1\n\n # Compact the merged subtables\n # This is a good moment to do it because the compaction should create\n # smaller subtables, which may prevent overflows from happening.\n # Keep reading the value from the ENV until ufo2ft switches to the config system\n level = merger.font.cfg.get(\n "fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",\n default=_compression_level_from_env(),\n )\n if level != 0:\n log.info("Compacting GPOS...")\n self.SubTable = compact_pair_pos(merger.font, level, self.SubTable)\n self.SubTableCount = len(self.SubTable)\n\n elif isSinglePos and flattened:\n singlePosTable = self.SubTable[0]\n glyphs = singlePosTable.Coverage.glyphs\n # We know that singlePosTable is Format 2, as this is set\n # in _Lookup_SinglePos_subtables_flatten.\n singlePosMapping = {\n gname: valRecord for gname, valRecord in zip(glyphs, singlePosTable.Value)\n }\n self.SubTable = buildSinglePos(\n singlePosMapping, merger.font.getReverseGlyphMap()\n )\n merger.mergeObjects(self, lst, exclude=["SubTable", "SubTableCount"])\n\n del merger.lookup_subtables\n\n\n#\n# InstancerMerger\n#\n\n\nclass InstancerMerger(AligningMerger):\n """A merger that takes multiple master fonts, and instantiates\n an instance."""\n\n def __init__(self, font, model, location):\n Merger.__init__(self, font)\n self.model = model\n self.location = location\n self.masterScalars = model.getMasterScalars(location)\n\n\n@InstancerMerger.merger(ot.CaretValue)\ndef merge(merger, self, lst):\n assert self.Format == 1\n Coords = [a.Coordinate for a in lst]\n model = merger.model\n masterScalars = merger.masterScalars\n self.Coordinate = otRound(\n model.interpolateFromValuesAndScalars(Coords, masterScalars)\n )\n\n\n@InstancerMerger.merger(ot.Anchor)\ndef merge(merger, self, lst):\n assert self.Format == 1\n XCoords = [a.XCoordinate for a in lst]\n YCoords = [a.YCoordinate for a in lst]\n model = merger.model\n masterScalars = merger.masterScalars\n self.XCoordinate = otRound(\n model.interpolateFromValuesAndScalars(XCoords, masterScalars)\n )\n self.YCoordinate = otRound(\n model.interpolateFromValuesAndScalars(YCoords, masterScalars)\n )\n\n\n@InstancerMerger.merger(otBase.ValueRecord)\ndef merge(merger, self, lst):\n model = merger.model\n masterScalars = merger.masterScalars\n # TODO Handle differing valueformats\n for name, tableName in [\n ("XAdvance", "XAdvDevice"),\n ("YAdvance", "YAdvDevice"),\n ("XPlacement", "XPlaDevice"),\n ("YPlacement", "YPlaDevice"),\n ]:\n assert not hasattr(self, tableName)\n\n if hasattr(self, name):\n values = [getattr(a, name, 0) for a in lst]\n value = otRound(\n model.interpolateFromValuesAndScalars(values, masterScalars)\n )\n setattr(self, name, value)\n\n\n#\n# MutatorMerger\n#\n\n\nclass MutatorMerger(AligningMerger):\n """A merger that takes a variable font, and instantiates\n an instance. While there's no "merging" to be done per se,\n the operation can benefit from many operations that the\n aligning merger does."""\n\n def __init__(self, font, instancer, deleteVariations=True):\n Merger.__init__(self, font)\n self.instancer = instancer\n self.deleteVariations = deleteVariations\n\n\n@MutatorMerger.merger(ot.CaretValue)\ndef merge(merger, self, lst):\n # Hack till we become selfless.\n self.__dict__ = lst[0].__dict__.copy()\n\n if self.Format != 3:\n return\n\n instancer = merger.instancer\n dev = self.DeviceTable\n if merger.deleteVariations:\n del self.DeviceTable\n if dev:\n assert dev.DeltaFormat == 0x8000\n varidx = (dev.StartSize << 16) + dev.EndSize\n delta = otRound(instancer[varidx])\n self.Coordinate += delta\n\n if merger.deleteVariations:\n self.Format = 1\n\n\n@MutatorMerger.merger(ot.Anchor)\ndef merge(merger, self, lst):\n # Hack till we become selfless.\n self.__dict__ = lst[0].__dict__.copy()\n\n if self.Format != 3:\n return\n\n instancer = merger.instancer\n for v in "XY":\n tableName = v + "DeviceTable"\n if not hasattr(self, tableName):\n continue\n dev = getattr(self, tableName)\n if merger.deleteVariations:\n delattr(self, tableName)\n if dev is None:\n continue\n\n assert dev.DeltaFormat == 0x8000\n varidx = (dev.StartSize << 16) + dev.EndSize\n delta = otRound(instancer[varidx])\n\n attr = v + "Coordinate"\n setattr(self, attr, getattr(self, attr) + delta)\n\n if merger.deleteVariations:\n self.Format = 1\n\n\n@MutatorMerger.merger(otBase.ValueRecord)\ndef merge(merger, self, lst):\n # Hack till we become selfless.\n self.__dict__ = lst[0].__dict__.copy()\n\n instancer = merger.instancer\n for name, tableName in [\n ("XAdvance", "XAdvDevice"),\n ("YAdvance", "YAdvDevice"),\n ("XPlacement", "XPlaDevice"),\n ("YPlacement", "YPlaDevice"),\n ]:\n if not hasattr(self, tableName):\n continue\n dev = getattr(self, tableName)\n if merger.deleteVariations:\n delattr(self, tableName)\n if dev is None:\n continue\n\n assert dev.DeltaFormat == 0x8000\n varidx = (dev.StartSize << 16) + dev.EndSize\n delta = otRound(instancer[varidx])\n\n setattr(self, name, getattr(self, name, 0) + delta)\n\n\n#\n# VariationMerger\n#\n\n\nclass VariationMerger(AligningMerger):\n """A merger that takes multiple master fonts, and builds a\n variable font."""\n\n def __init__(self, model, axisTags, font):\n Merger.__init__(self, font)\n self.store_builder = varStore.OnlineVarStoreBuilder(axisTags)\n self.setModel(model)\n\n def setModel(self, model):\n self.model = model\n self.store_builder.setModel(model)\n\n def mergeThings(self, out, lst):\n masterModel = None\n origTTFs = None\n if None in lst:\n if allNone(lst):\n if out is not None:\n raise FoundANone(self, got=lst)\n return\n\n # temporarily subset the list of master ttfs to the ones for which\n # master values are not None\n origTTFs = self.ttfs\n if self.ttfs:\n self.ttfs = subList([v is not None for v in lst], self.ttfs)\n\n masterModel = self.model\n model, lst = masterModel.getSubModel(lst)\n self.setModel(model)\n\n super(VariationMerger, self).mergeThings(out, lst)\n\n if masterModel:\n self.setModel(masterModel)\n if origTTFs:\n self.ttfs = origTTFs\n\n\ndef buildVarDevTable(store_builder, master_values):\n if allEqual(master_values):\n return master_values[0], None\n base, varIdx = store_builder.storeMasters(master_values)\n return base, builder.buildVarDevTable(varIdx)\n\n\n@VariationMerger.merger(ot.BaseCoord)\ndef merge(merger, self, lst):\n if self.Format != 1:\n raise UnsupportedFormat(merger, subtable="a baseline coordinate")\n self.Coordinate, DeviceTable = buildVarDevTable(\n merger.store_builder, [a.Coordinate for a in lst]\n )\n if DeviceTable:\n self.Format = 3\n self.DeviceTable = DeviceTable\n\n\n@VariationMerger.merger(ot.CaretValue)\ndef merge(merger, self, lst):\n if self.Format != 1:\n raise UnsupportedFormat(merger, subtable="a caret")\n self.Coordinate, DeviceTable = buildVarDevTable(\n merger.store_builder, [a.Coordinate for a in lst]\n )\n if DeviceTable:\n self.Format = 3\n self.DeviceTable = DeviceTable\n\n\n@VariationMerger.merger(ot.Anchor)\ndef merge(merger, self, lst):\n if self.Format != 1:\n raise UnsupportedFormat(merger, subtable="an anchor")\n self.XCoordinate, XDeviceTable = buildVarDevTable(\n merger.store_builder, [a.XCoordinate for a in lst]\n )\n self.YCoordinate, YDeviceTable = buildVarDevTable(\n merger.store_builder, [a.YCoordinate for a in lst]\n )\n if XDeviceTable or YDeviceTable:\n self.Format = 3\n self.XDeviceTable = XDeviceTable\n self.YDeviceTable = YDeviceTable\n\n\n@VariationMerger.merger(otBase.ValueRecord)\ndef merge(merger, self, lst):\n for name, tableName in [\n ("XAdvance", "XAdvDevice"),\n ("YAdvance", "YAdvDevice"),\n ("XPlacement", "XPlaDevice"),\n ("YPlacement", "YPlaDevice"),\n ]:\n if hasattr(self, name):\n value, deviceTable = buildVarDevTable(\n merger.store_builder, [getattr(a, name, 0) for a in lst]\n )\n setattr(self, name, value)\n if deviceTable:\n setattr(self, tableName, deviceTable)\n\n\nclass COLRVariationMerger(VariationMerger):\n """A specialized VariationMerger that takes multiple master fonts containing\n COLRv1 tables, and builds a variable COLR font.\n\n COLR tables are special in that variable subtables can be associated with\n multiple delta-set indices (via VarIndexBase).\n They also contain tables that must change their type (not simply the Format)\n as they become variable (e.g. Affine2x3 -> VarAffine2x3) so this merger takes\n care of that too.\n """\n\n def __init__(self, model, axisTags, font, allowLayerReuse=True):\n VariationMerger.__init__(self, model, axisTags, font)\n # maps {tuple(varIdxes): VarIndexBase} to facilitate reuse of VarIndexBase\n # between variable tables with same varIdxes.\n self.varIndexCache = {}\n # flat list of all the varIdxes generated while merging\n self.varIdxes = []\n # set of id()s of the subtables that contain variations after merging\n # and need to be upgraded to the associated VarType.\n self.varTableIds = set()\n # we keep these around for rebuilding a LayerList while merging PaintColrLayers\n self.layers = []\n self.layerReuseCache = None\n if allowLayerReuse:\n self.layerReuseCache = LayerReuseCache()\n # flag to ensure BaseGlyphList is fully merged before LayerList gets processed\n self._doneBaseGlyphs = False\n\n def mergeTables(self, font, master_ttfs, tableTags=("COLR",)):\n if "COLR" in tableTags and "COLR" in font:\n # The merger modifies the destination COLR table in-place. If this contains\n # multiple PaintColrLayers referencing the same layers from LayerList, it's\n # a problem because we may risk modifying the same paint more than once, or\n # worse, fail while attempting to do that.\n # We don't know whether the master COLR table was built with layer reuse\n # disabled, thus to be safe we rebuild its LayerList so that it contains only\n # unique layers referenced from non-overlapping PaintColrLayers throughout\n # the base paint graphs.\n self.expandPaintColrLayers(font["COLR"].table)\n VariationMerger.mergeTables(self, font, master_ttfs, tableTags)\n\n def checkFormatEnum(self, out, lst, validate=lambda _: True):\n fmt = out.Format\n formatEnum = out.formatEnum\n ok = False\n try:\n fmt = formatEnum(fmt)\n except ValueError:\n pass\n else:\n ok = validate(fmt)\n if not ok:\n raise UnsupportedFormat(self, subtable=type(out).__name__, value=fmt)\n expected = fmt\n got = []\n for v in lst:\n fmt = getattr(v, "Format", None)\n try:\n fmt = formatEnum(fmt)\n except ValueError:\n pass\n got.append(fmt)\n if not allEqualTo(expected, got):\n raise InconsistentFormats(\n self,\n subtable=type(out).__name__,\n expected=expected,\n got=got,\n )\n return expected\n\n def mergeSparseDict(self, out, lst):\n for k in out.keys():\n try:\n self.mergeThings(out[k], [v.get(k) for v in lst])\n except VarLibMergeError as e:\n e.stack.append(f"[{k!r}]")\n raise\n\n def mergeAttrs(self, out, lst, attrs):\n for attr in attrs:\n value = getattr(out, attr)\n values = [getattr(item, attr) for item in lst]\n try:\n self.mergeThings(value, values)\n except VarLibMergeError as e:\n e.stack.append(f".{attr}")\n raise\n\n def storeMastersForAttr(self, out, lst, attr):\n master_values = [getattr(item, attr) for item in lst]\n\n # VarStore treats deltas for fixed-size floats as integers, so we\n # must convert master values to int before storing them in the builder\n # then back to float.\n is_fixed_size_float = False\n conv = out.getConverterByName(attr)\n if isinstance(conv, BaseFixedValue):\n is_fixed_size_float = True\n master_values = [conv.toInt(v) for v in master_values]\n\n baseValue = master_values[0]\n varIdx = ot.NO_VARIATION_INDEX\n if not allEqual(master_values):\n baseValue, varIdx = self.store_builder.storeMasters(master_values)\n\n if is_fixed_size_float:\n baseValue = conv.fromInt(baseValue)\n\n return baseValue, varIdx\n\n def storeVariationIndices(self, varIdxes) -> int:\n # try to reuse an existing VarIndexBase for the same varIdxes, or else\n # create a new one\n key = tuple(varIdxes)\n varIndexBase = self.varIndexCache.get(key)\n\n if varIndexBase is None:\n # scan for a full match anywhere in the self.varIdxes\n for i in range(len(self.varIdxes) - len(varIdxes) + 1):\n if self.varIdxes[i : i + len(varIdxes)] == varIdxes:\n self.varIndexCache[key] = varIndexBase = i\n break\n\n if varIndexBase is None:\n # try find a partial match at the end of the self.varIdxes\n for n in range(len(varIdxes) - 1, 0, -1):\n if self.varIdxes[-n:] == varIdxes[:n]:\n varIndexBase = len(self.varIdxes) - n\n self.varIndexCache[key] = varIndexBase\n self.varIdxes.extend(varIdxes[n:])\n break\n\n if varIndexBase is None:\n # no match found, append at the end\n self.varIndexCache[key] = varIndexBase = len(self.varIdxes)\n self.varIdxes.extend(varIdxes)\n\n return varIndexBase\n\n def mergeVariableAttrs(self, out, lst, attrs) -> int:\n varIndexBase = ot.NO_VARIATION_INDEX\n varIdxes = []\n for attr in attrs:\n baseValue, varIdx = self.storeMastersForAttr(out, lst, attr)\n setattr(out, attr, baseValue)\n varIdxes.append(varIdx)\n\n if any(v != ot.NO_VARIATION_INDEX for v in varIdxes):\n varIndexBase = self.storeVariationIndices(varIdxes)\n\n return varIndexBase\n\n @classmethod\n def convertSubTablesToVarType(cls, table):\n for path in dfs_base_table(\n table,\n skip_root=True,\n predicate=lambda path: (\n getattr(type(path[-1].value), "VarType", None) is not None\n ),\n ):\n st = path[-1]\n subTable = st.value\n varType = type(subTable).VarType\n newSubTable = varType()\n newSubTable.__dict__.update(subTable.__dict__)\n newSubTable.populateDefaults()\n parent = path[-2].value\n if st.index is not None:\n getattr(parent, st.name)[st.index] = newSubTable\n else:\n setattr(parent, st.name, newSubTable)\n\n @staticmethod\n def expandPaintColrLayers(colr):\n """Rebuild LayerList without PaintColrLayers reuse.\n\n Each base paint graph is fully DFS-traversed (with exception of PaintColrGlyph\n which are irrelevant for this); any layers referenced via PaintColrLayers are\n collected into a new LayerList and duplicated when reuse is detected, to ensure\n that all paints are distinct objects at the end of the process.\n PaintColrLayers's FirstLayerIndex/NumLayers are updated so that no overlap\n is left. Also, any consecutively nested PaintColrLayers are flattened.\n The COLR table's LayerList is replaced with the new unique layers.\n A side effect is also that any layer from the old LayerList which is not\n referenced by any PaintColrLayers is dropped.\n """\n if not colr.LayerList:\n # if no LayerList, there's nothing to expand\n return\n uniqueLayerIDs = set()\n newLayerList = []\n for rec in colr.BaseGlyphList.BaseGlyphPaintRecord:\n frontier = [rec.Paint]\n while frontier:\n paint = frontier.pop()\n if paint.Format == ot.PaintFormat.PaintColrGlyph:\n # don't traverse these, we treat them as constant for merging\n continue\n elif paint.Format == ot.PaintFormat.PaintColrLayers:\n # de-treeify any nested PaintColrLayers, append unique copies to\n # the new layer list and update PaintColrLayers index/count\n children = list(_flatten_layers(paint, colr))\n first_layer_index = len(newLayerList)\n for layer in children:\n if id(layer) in uniqueLayerIDs:\n layer = copy.deepcopy(layer)\n assert id(layer) not in uniqueLayerIDs\n newLayerList.append(layer)\n uniqueLayerIDs.add(id(layer))\n paint.FirstLayerIndex = first_layer_index\n paint.NumLayers = len(children)\n else:\n children = paint.getChildren(colr)\n frontier.extend(reversed(children))\n # sanity check all the new layers are distinct objects\n assert len(newLayerList) == len(uniqueLayerIDs)\n colr.LayerList.Paint = newLayerList\n colr.LayerList.LayerCount = len(newLayerList)\n\n\n@COLRVariationMerger.merger(ot.BaseGlyphList)\ndef merge(merger, self, lst):\n # ignore BaseGlyphCount, allow sparse glyph sets across masters\n out = {rec.BaseGlyph: rec for rec in self.BaseGlyphPaintRecord}\n masters = [{rec.BaseGlyph: rec for rec in m.BaseGlyphPaintRecord} for m in lst]\n\n for i, g in enumerate(out.keys()):\n try:\n # missing base glyphs don't participate in the merge\n merger.mergeThings(out[g], [v.get(g) for v in masters])\n except VarLibMergeError as e:\n e.stack.append(f".BaseGlyphPaintRecord[{i}]")\n e.cause["location"] = f"base glyph {g!r}"\n raise\n\n merger._doneBaseGlyphs = True\n\n\n@COLRVariationMerger.merger(ot.LayerList)\ndef merge(merger, self, lst):\n # nothing to merge for LayerList, assuming we have already merged all PaintColrLayers\n # found while traversing the paint graphs rooted at BaseGlyphPaintRecords.\n assert merger._doneBaseGlyphs, "BaseGlyphList must be merged before LayerList"\n # Simply flush the final list of layers and go home.\n self.LayerCount = len(merger.layers)\n self.Paint = merger.layers\n\n\ndef _flatten_layers(root, colr):\n assert root.Format == ot.PaintFormat.PaintColrLayers\n for paint in root.getChildren(colr):\n if paint.Format == ot.PaintFormat.PaintColrLayers:\n yield from _flatten_layers(paint, colr)\n else:\n yield paint\n\n\ndef _merge_PaintColrLayers(self, out, lst):\n # we only enforce that the (flat) number of layers is the same across all masters\n # but we allow FirstLayerIndex to differ to acommodate for sparse glyph sets.\n\n out_layers = list(_flatten_layers(out, self.font["COLR"].table))\n\n # sanity check ttfs are subset to current values (see VariationMerger.mergeThings)\n # before matching each master PaintColrLayers to its respective COLR by position\n assert len(self.ttfs) == len(lst)\n master_layerses = [\n list(_flatten_layers(lst[i], self.ttfs[i]["COLR"].table))\n for i in range(len(lst))\n ]\n\n try:\n self.mergeLists(out_layers, master_layerses)\n except VarLibMergeError as e:\n # NOTE: This attribute doesn't actually exist in PaintColrLayers but it's\n # handy to have it in the stack trace for debugging.\n e.stack.append(".Layers")\n raise\n\n # following block is very similar to LayerListBuilder._beforeBuildPaintColrLayers\n # but I couldn't find a nice way to share the code between the two...\n\n if self.layerReuseCache is not None:\n # successful reuse can make the list smaller\n out_layers = self.layerReuseCache.try_reuse(out_layers)\n\n # if the list is still too big we need to tree-fy it\n is_tree = len(out_layers) > MAX_PAINT_COLR_LAYER_COUNT\n out_layers = build_n_ary_tree(out_layers, n=MAX_PAINT_COLR_LAYER_COUNT)\n\n # We now have a tree of sequences with Paint leaves.\n # Convert the sequences into PaintColrLayers.\n def listToColrLayers(paint):\n if isinstance(paint, list):\n layers = [listToColrLayers(l) for l in paint]\n paint = ot.Paint()\n paint.Format = int(ot.PaintFormat.PaintColrLayers)\n paint.NumLayers = len(layers)\n paint.FirstLayerIndex = len(self.layers)\n self.layers.extend(layers)\n if self.layerReuseCache is not None:\n self.layerReuseCache.add(layers, paint.FirstLayerIndex)\n return paint\n\n out_layers = [listToColrLayers(l) for l in out_layers]\n\n if len(out_layers) == 1 and out_layers[0].Format == ot.PaintFormat.PaintColrLayers:\n # special case when the reuse cache finds a single perfect PaintColrLayers match\n # (it can only come from a successful reuse, _flatten_layers has gotten rid of\n # all nested PaintColrLayers already); we assign it directly and avoid creating\n # an extra table\n out.NumLayers = out_layers[0].NumLayers\n out.FirstLayerIndex = out_layers[0].FirstLayerIndex\n else:\n out.NumLayers = len(out_layers)\n out.FirstLayerIndex = len(self.layers)\n\n self.layers.extend(out_layers)\n\n # Register our parts for reuse provided we aren't a tree\n # If we are a tree the leaves registered for reuse and that will suffice\n if self.layerReuseCache is not None and not is_tree:\n self.layerReuseCache.add(out_layers, out.FirstLayerIndex)\n\n\n@COLRVariationMerger.merger((ot.Paint, ot.ClipBox))\ndef merge(merger, self, lst):\n fmt = merger.checkFormatEnum(self, lst, lambda fmt: not fmt.is_variable())\n\n if fmt is ot.PaintFormat.PaintColrLayers:\n _merge_PaintColrLayers(merger, self, lst)\n return\n\n varFormat = fmt.as_variable()\n\n varAttrs = ()\n if varFormat is not None:\n varAttrs = otBase.getVariableAttrs(type(self), varFormat)\n staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs)\n\n merger.mergeAttrs(self, lst, staticAttrs)\n\n varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs)\n\n subTables = [st.value for st in self.iterSubTables()]\n\n # Convert table to variable if itself has variations or any subtables have\n isVariable = varIndexBase != ot.NO_VARIATION_INDEX or any(\n id(table) in merger.varTableIds for table in subTables\n )\n\n if isVariable:\n if varAttrs:\n # Some PaintVar* don't have any scalar attributes that can vary,\n # only indirect offsets to other variable subtables, thus have\n # no VarIndexBase of their own (e.g. PaintVarTransform)\n self.VarIndexBase = varIndexBase\n\n if subTables:\n # Convert Affine2x3 -> VarAffine2x3, ColorLine -> VarColorLine, etc.\n merger.convertSubTablesToVarType(self)\n\n assert varFormat is not None\n self.Format = int(varFormat)\n\n\n@COLRVariationMerger.merger((ot.Affine2x3, ot.ColorStop))\ndef merge(merger, self, lst):\n varType = type(self).VarType\n\n varAttrs = otBase.getVariableAttrs(varType)\n staticAttrs = (c.name for c in self.getConverters() if c.name not in varAttrs)\n\n merger.mergeAttrs(self, lst, staticAttrs)\n\n varIndexBase = merger.mergeVariableAttrs(self, lst, varAttrs)\n\n if varIndexBase != ot.NO_VARIATION_INDEX:\n self.VarIndexBase = varIndexBase\n # mark as having variations so the parent table will convert to Var{Type}\n merger.varTableIds.add(id(self))\n\n\n@COLRVariationMerger.merger(ot.ColorLine)\ndef merge(merger, self, lst):\n merger.mergeAttrs(self, lst, (c.name for c in self.getConverters()))\n\n if any(id(stop) in merger.varTableIds for stop in self.ColorStop):\n merger.convertSubTablesToVarType(self)\n merger.varTableIds.add(id(self))\n\n\n@COLRVariationMerger.merger(ot.ClipList, "clips")\ndef merge(merger, self, lst):\n # 'sparse' in that we allow non-default masters to omit ClipBox entries\n # for some/all glyphs (i.e. they don't participate)\n merger.mergeSparseDict(self, lst)\n
.venv\Lib\site-packages\fontTools\varLib\merger.py
merger.py
Python
62,519
0.75
0.267327
0.108666
awesome-app
32
2024-04-05T02:33:00.770681
MIT
false
86d9a282cc02be38144088a725f71422
"""Variation fonts interpolation models."""\n\n__all__ = [\n "normalizeValue",\n "normalizeLocation",\n "supportScalar",\n "piecewiseLinearMap",\n "VariationModel",\n]\n\nfrom fontTools.misc.roundTools import noRound\nfrom .errors import VariationModelError\n\n\ndef nonNone(lst):\n return [l for l in lst if l is not None]\n\n\ndef allNone(lst):\n return all(l is None for l in lst)\n\n\ndef allEqualTo(ref, lst, mapper=None):\n if mapper is None:\n return all(ref == item for item in lst)\n\n mapped = mapper(ref)\n return all(mapped == mapper(item) for item in lst)\n\n\ndef allEqual(lst, mapper=None):\n if not lst:\n return True\n it = iter(lst)\n try:\n first = next(it)\n except StopIteration:\n return True\n return allEqualTo(first, it, mapper=mapper)\n\n\ndef subList(truth, lst):\n assert len(truth) == len(lst)\n return [l for l, t in zip(lst, truth) if t]\n\n\ndef normalizeValue(v, triple, extrapolate=False):\n """Normalizes value based on a min/default/max triple.\n\n >>> normalizeValue(400, (100, 400, 900))\n 0.0\n >>> normalizeValue(100, (100, 400, 900))\n -1.0\n >>> normalizeValue(650, (100, 400, 900))\n 0.5\n """\n lower, default, upper = triple\n if not (lower <= default <= upper):\n raise ValueError(\n f"Invalid axis values, must be minimum, default, maximum: "\n f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}"\n )\n if not extrapolate:\n v = max(min(v, upper), lower)\n\n if v == default or lower == upper:\n return 0.0\n\n if (v < default and lower != default) or (v > default and upper == default):\n return (v - default) / (default - lower)\n else:\n assert (v > default and upper != default) or (\n v < default and lower == default\n ), f"Ooops... v={v}, triple=({lower}, {default}, {upper})"\n return (v - default) / (upper - default)\n\n\ndef normalizeLocation(location, axes, extrapolate=False, *, validate=False):\n """Normalizes location based on axis min/default/max values from axes.\n\n >>> axes = {"wght": (100, 400, 900)}\n >>> normalizeLocation({"wght": 400}, axes)\n {'wght': 0.0}\n >>> normalizeLocation({"wght": 100}, axes)\n {'wght': -1.0}\n >>> normalizeLocation({"wght": 900}, axes)\n {'wght': 1.0}\n >>> normalizeLocation({"wght": 650}, axes)\n {'wght': 0.5}\n >>> normalizeLocation({"wght": 1000}, axes)\n {'wght': 1.0}\n >>> normalizeLocation({"wght": 0}, axes)\n {'wght': -1.0}\n >>> axes = {"wght": (0, 0, 1000)}\n >>> normalizeLocation({"wght": 0}, axes)\n {'wght': 0.0}\n >>> normalizeLocation({"wght": -1}, axes)\n {'wght': 0.0}\n >>> normalizeLocation({"wght": 1000}, axes)\n {'wght': 1.0}\n >>> normalizeLocation({"wght": 500}, axes)\n {'wght': 0.5}\n >>> normalizeLocation({"wght": 1001}, axes)\n {'wght': 1.0}\n >>> axes = {"wght": (0, 1000, 1000)}\n >>> normalizeLocation({"wght": 0}, axes)\n {'wght': -1.0}\n >>> normalizeLocation({"wght": -1}, axes)\n {'wght': -1.0}\n >>> normalizeLocation({"wght": 500}, axes)\n {'wght': -0.5}\n >>> normalizeLocation({"wght": 1000}, axes)\n {'wght': 0.0}\n >>> normalizeLocation({"wght": 1001}, axes)\n {'wght': 0.0}\n """\n if validate:\n assert set(location.keys()) <= set(axes.keys()), set(location.keys()) - set(\n axes.keys()\n )\n out = {}\n for tag, triple in axes.items():\n v = location.get(tag, triple[1])\n out[tag] = normalizeValue(v, triple, extrapolate=extrapolate)\n return out\n\n\ndef supportScalar(location, support, ot=True, extrapolate=False, axisRanges=None):\n """Returns the scalar multiplier at location, for a master\n with support. If ot is True, then a peak value of zero\n for support of an axis means "axis does not participate". That\n is how OpenType Variation Font technology works.\n\n If extrapolate is True, axisRanges must be a dict that maps axis\n names to (axisMin, axisMax) tuples.\n\n >>> supportScalar({}, {})\n 1.0\n >>> supportScalar({'wght':.2}, {})\n 1.0\n >>> supportScalar({'wght':.2}, {'wght':(0,2,3)})\n 0.1\n >>> supportScalar({'wght':2.5}, {'wght':(0,2,4)})\n 0.75\n >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})\n 0.75\n >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False)\n 0.375\n >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})\n 0.75\n >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})\n 0.75\n >>> supportScalar({'wght':3}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})\n -1.0\n >>> supportScalar({'wght':-1}, {'wght':(0,1,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})\n -1.0\n >>> supportScalar({'wght':3}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})\n 1.5\n >>> supportScalar({'wght':-1}, {'wght':(0,2,2)}, extrapolate=True, axisRanges={'wght':(0, 2)})\n -0.5\n """\n if extrapolate and axisRanges is None:\n raise TypeError("axisRanges must be passed when extrapolate is True")\n scalar = 1.0\n for axis, (lower, peak, upper) in support.items():\n if ot:\n # OpenType-specific case handling\n if peak == 0.0:\n continue\n if lower > peak or peak > upper:\n continue\n if lower < 0.0 and upper > 0.0:\n continue\n v = location.get(axis, 0.0)\n else:\n assert axis in location\n v = location[axis]\n if v == peak:\n continue\n\n if extrapolate:\n axisMin, axisMax = axisRanges[axis]\n if v < axisMin and lower <= axisMin:\n if peak <= axisMin and peak < upper:\n scalar *= (v - upper) / (peak - upper)\n continue\n elif axisMin < peak:\n scalar *= (v - lower) / (peak - lower)\n continue\n elif axisMax < v and axisMax <= upper:\n if axisMax <= peak and lower < peak:\n scalar *= (v - lower) / (peak - lower)\n continue\n elif peak < axisMax:\n scalar *= (v - upper) / (peak - upper)\n continue\n\n if v <= lower or upper <= v:\n scalar = 0.0\n break\n\n if v < peak:\n scalar *= (v - lower) / (peak - lower)\n else: # v > peak\n scalar *= (v - upper) / (peak - upper)\n return scalar\n\n\nclass VariationModel(object):\n """Locations must have the base master at the origin (ie. 0).\n\n If axis-ranges are not provided, values are assumed to be normalized to\n the range [-1, 1].\n\n If the extrapolate argument is set to True, then values are extrapolated\n outside the axis range.\n\n >>> from pprint import pprint\n >>> axisRanges = {'wght': (-180, +180), 'wdth': (-1, +1)}\n >>> locations = [ \\n {'wght':100}, \\n {'wght':-100}, \\n {'wght':-180}, \\n {'wdth':+.3}, \\n {'wght':+120,'wdth':.3}, \\n {'wght':+120,'wdth':.2}, \\n {}, \\n {'wght':+180,'wdth':.3}, \\n {'wght':+180}, \\n ]\n >>> model = VariationModel(locations, axisOrder=['wght'], axisRanges=axisRanges)\n >>> pprint(model.locations)\n [{},\n {'wght': -100},\n {'wght': -180},\n {'wght': 100},\n {'wght': 180},\n {'wdth': 0.3},\n {'wdth': 0.3, 'wght': 180},\n {'wdth': 0.3, 'wght': 120},\n {'wdth': 0.2, 'wght': 120}]\n >>> pprint(model.deltaWeights)\n [{},\n {0: 1.0},\n {0: 1.0},\n {0: 1.0},\n {0: 1.0},\n {0: 1.0},\n {0: 1.0, 4: 1.0, 5: 1.0},\n {0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.6666666666666666},\n {0: 1.0,\n 3: 0.75,\n 4: 0.25,\n 5: 0.6666666666666667,\n 6: 0.4444444444444445,\n 7: 0.6666666666666667}]\n """\n\n def __init__(\n self, locations, axisOrder=None, extrapolate=False, *, axisRanges=None\n ):\n if len(set(tuple(sorted(l.items())) for l in locations)) != len(locations):\n raise VariationModelError("Locations must be unique.")\n\n self.origLocations = locations\n self.axisOrder = axisOrder if axisOrder is not None else []\n self.extrapolate = extrapolate\n if axisRanges is None:\n if extrapolate:\n axisRanges = self.computeAxisRanges(locations)\n else:\n allAxes = {axis for loc in locations for axis in loc.keys()}\n axisRanges = {axis: (-1, 1) for axis in allAxes}\n self.axisRanges = axisRanges\n\n locations = [{k: v for k, v in loc.items() if v != 0.0} for loc in locations]\n keyFunc = self.getMasterLocationsSortKeyFunc(\n locations, axisOrder=self.axisOrder\n )\n self.locations = sorted(locations, key=keyFunc)\n\n # Mapping from user's master order to our master order\n self.mapping = [self.locations.index(l) for l in locations]\n self.reverseMapping = [locations.index(l) for l in self.locations]\n\n self._computeMasterSupports()\n self._subModels = {}\n\n def getSubModel(self, items):\n """Return a sub-model and the items that are not None.\n\n The sub-model is necessary for working with the subset\n of items when some are None.\n\n The sub-model is cached."""\n if None not in items:\n return self, items\n key = tuple(v is not None for v in items)\n subModel = self._subModels.get(key)\n if subModel is None:\n subModel = VariationModel(subList(key, self.origLocations), self.axisOrder)\n self._subModels[key] = subModel\n return subModel, subList(key, items)\n\n @staticmethod\n def computeAxisRanges(locations):\n axisRanges = {}\n allAxes = {axis for loc in locations for axis in loc.keys()}\n for loc in locations:\n for axis in allAxes:\n value = loc.get(axis, 0)\n axisMin, axisMax = axisRanges.get(axis, (value, value))\n axisRanges[axis] = min(value, axisMin), max(value, axisMax)\n return axisRanges\n\n @staticmethod\n def getMasterLocationsSortKeyFunc(locations, axisOrder=[]):\n if {} not in locations:\n raise VariationModelError("Base master not found.")\n axisPoints = {}\n for loc in locations:\n if len(loc) != 1:\n continue\n axis = next(iter(loc))\n value = loc[axis]\n if axis not in axisPoints:\n axisPoints[axis] = {0.0}\n assert (\n value not in axisPoints[axis]\n ), 'Value "%s" in axisPoints["%s"] --> %s' % (value, axis, axisPoints)\n axisPoints[axis].add(value)\n\n def getKey(axisPoints, axisOrder):\n def sign(v):\n return -1 if v < 0 else +1 if v > 0 else 0\n\n def key(loc):\n rank = len(loc)\n onPointAxes = [\n axis\n for axis, value in loc.items()\n if axis in axisPoints and value in axisPoints[axis]\n ]\n orderedAxes = [axis for axis in axisOrder if axis in loc]\n orderedAxes.extend(\n [axis for axis in sorted(loc.keys()) if axis not in axisOrder]\n )\n return (\n rank, # First, order by increasing rank\n -len(onPointAxes), # Next, by decreasing number of onPoint axes\n tuple(\n axisOrder.index(axis) if axis in axisOrder else 0x10000\n for axis in orderedAxes\n ), # Next, by known axes\n tuple(orderedAxes), # Next, by all axes\n tuple(\n sign(loc[axis]) for axis in orderedAxes\n ), # Next, by signs of axis values\n tuple(\n abs(loc[axis]) for axis in orderedAxes\n ), # Next, by absolute value of axis values\n )\n\n return key\n\n ret = getKey(axisPoints, axisOrder)\n return ret\n\n def reorderMasters(self, master_list, mapping):\n # For changing the master data order without\n # recomputing supports and deltaWeights.\n new_list = [master_list[idx] for idx in mapping]\n self.origLocations = [self.origLocations[idx] for idx in mapping]\n locations = [\n {k: v for k, v in loc.items() if v != 0.0} for loc in self.origLocations\n ]\n self.mapping = [self.locations.index(l) for l in locations]\n self.reverseMapping = [locations.index(l) for l in self.locations]\n self._subModels = {}\n return new_list\n\n def _computeMasterSupports(self):\n self.supports = []\n regions = self._locationsToRegions()\n for i, region in enumerate(regions):\n locAxes = set(region.keys())\n # Walk over previous masters now\n for prev_region in regions[:i]:\n # Master with different axes do not participte\n if set(prev_region.keys()) != locAxes:\n continue\n # If it's NOT in the current box, it does not participate\n relevant = True\n for axis, (lower, peak, upper) in region.items():\n if not (\n prev_region[axis][1] == peak\n or lower < prev_region[axis][1] < upper\n ):\n relevant = False\n break\n if not relevant:\n continue\n\n # Split the box for new master; split in whatever direction\n # that has largest range ratio.\n #\n # For symmetry, we actually cut across multiple axes\n # if they have the largest, equal, ratio.\n # https://github.com/fonttools/fonttools/commit/7ee81c8821671157968b097f3e55309a1faa511e#commitcomment-31054804\n\n bestAxes = {}\n bestRatio = -1\n for axis in prev_region.keys():\n val = prev_region[axis][1]\n assert axis in region\n lower, locV, upper = region[axis]\n newLower, newUpper = lower, upper\n if val < locV:\n newLower = val\n ratio = (val - locV) / (lower - locV)\n elif locV < val:\n newUpper = val\n ratio = (val - locV) / (upper - locV)\n else: # val == locV\n # Can't split box in this direction.\n continue\n if ratio > bestRatio:\n bestAxes = {}\n bestRatio = ratio\n if ratio == bestRatio:\n bestAxes[axis] = (newLower, locV, newUpper)\n\n for axis, triple in bestAxes.items():\n region[axis] = triple\n self.supports.append(region)\n self._computeDeltaWeights()\n\n def _locationsToRegions(self):\n locations = self.locations\n axisRanges = self.axisRanges\n\n regions = []\n for loc in locations:\n region = {}\n for axis, locV in loc.items():\n if locV > 0:\n region[axis] = (0, locV, axisRanges[axis][1])\n else:\n region[axis] = (axisRanges[axis][0], locV, 0)\n regions.append(region)\n return regions\n\n def _computeDeltaWeights(self):\n self.deltaWeights = []\n for i, loc in enumerate(self.locations):\n deltaWeight = {}\n # Walk over previous masters now, populate deltaWeight\n for j, support in enumerate(self.supports[:i]):\n scalar = supportScalar(loc, support)\n if scalar:\n deltaWeight[j] = scalar\n self.deltaWeights.append(deltaWeight)\n\n def getDeltas(self, masterValues, *, round=noRound):\n assert len(masterValues) == len(self.deltaWeights), (\n len(masterValues),\n len(self.deltaWeights),\n )\n mapping = self.reverseMapping\n out = []\n for i, weights in enumerate(self.deltaWeights):\n delta = masterValues[mapping[i]]\n for j, weight in weights.items():\n if weight == 1:\n delta -= out[j]\n else:\n delta -= out[j] * weight\n out.append(round(delta))\n return out\n\n def getDeltasAndSupports(self, items, *, round=noRound):\n model, items = self.getSubModel(items)\n return model.getDeltas(items, round=round), model.supports\n\n def getScalars(self, loc):\n """Return scalars for each delta, for the given location.\n If interpolating many master-values at the same location,\n this function allows speed up by fetching the scalars once\n and using them with interpolateFromMastersAndScalars()."""\n return [\n supportScalar(\n loc, support, extrapolate=self.extrapolate, axisRanges=self.axisRanges\n )\n for support in self.supports\n ]\n\n def getMasterScalars(self, targetLocation):\n """Return multipliers for each master, for the given location.\n If interpolating many master-values at the same location,\n this function allows speed up by fetching the scalars once\n and using them with interpolateFromValuesAndScalars().\n\n Note that the scalars used in interpolateFromMastersAndScalars(),\n are *not* the same as the ones returned here. They are the result\n of getScalars()."""\n out = self.getScalars(targetLocation)\n for i, weights in reversed(list(enumerate(self.deltaWeights))):\n for j, weight in weights.items():\n out[j] -= out[i] * weight\n\n out = [out[self.mapping[i]] for i in range(len(out))]\n return out\n\n @staticmethod\n def interpolateFromValuesAndScalars(values, scalars):\n """Interpolate from values and scalars coefficients.\n\n If the values are master-values, then the scalars should be\n fetched from getMasterScalars().\n\n If the values are deltas, then the scalars should be fetched\n from getScalars(); in which case this is the same as\n interpolateFromDeltasAndScalars().\n """\n v = None\n assert len(values) == len(scalars)\n for value, scalar in zip(values, scalars):\n if not scalar:\n continue\n contribution = value * scalar\n if v is None:\n v = contribution\n else:\n v += contribution\n return v\n\n @staticmethod\n def interpolateFromDeltasAndScalars(deltas, scalars):\n """Interpolate from deltas and scalars fetched from getScalars()."""\n return VariationModel.interpolateFromValuesAndScalars(deltas, scalars)\n\n def interpolateFromDeltas(self, loc, deltas):\n """Interpolate from deltas, at location loc."""\n scalars = self.getScalars(loc)\n return self.interpolateFromDeltasAndScalars(deltas, scalars)\n\n def interpolateFromMasters(self, loc, masterValues, *, round=noRound):\n """Interpolate from master-values, at location loc."""\n scalars = self.getMasterScalars(loc)\n return self.interpolateFromValuesAndScalars(masterValues, scalars)\n\n def interpolateFromMastersAndScalars(self, masterValues, scalars, *, round=noRound):\n """Interpolate from master-values, and scalars fetched from\n getScalars(), which is useful when you want to interpolate\n multiple master-values with the same location."""\n deltas = self.getDeltas(masterValues, round=round)\n return self.interpolateFromDeltasAndScalars(deltas, scalars)\n\n\ndef piecewiseLinearMap(v, mapping):\n keys = mapping.keys()\n if not keys:\n return v\n if v in keys:\n return mapping[v]\n k = min(keys)\n if v < k:\n return v + mapping[k] - k\n k = max(keys)\n if v > k:\n return v + mapping[k] - k\n # Interpolate\n a = max(k for k in keys if k < v)\n b = min(k for k in keys if k > v)\n va = mapping[a]\n vb = mapping[b]\n return va + (vb - va) * (v - a) / (b - a)\n\n\ndef main(args=None):\n """Normalize locations on a given designspace"""\n from fontTools import configLogger\n import argparse\n\n parser = argparse.ArgumentParser(\n "fonttools varLib.models",\n description=main.__doc__,\n )\n parser.add_argument(\n "--loglevel",\n metavar="LEVEL",\n default="INFO",\n help="Logging level (defaults to INFO)",\n )\n\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument("-d", "--designspace", metavar="DESIGNSPACE", type=str)\n group.add_argument(\n "-l",\n "--locations",\n metavar="LOCATION",\n nargs="+",\n help="Master locations as comma-separate coordinates. One must be all zeros.",\n )\n\n args = parser.parse_args(args)\n\n configLogger(level=args.loglevel)\n from pprint import pprint\n\n if args.designspace:\n from fontTools.designspaceLib import DesignSpaceDocument\n\n doc = DesignSpaceDocument()\n doc.read(args.designspace)\n locs = [s.location for s in doc.sources]\n print("Original locations:")\n pprint(locs)\n doc.normalize()\n print("Normalized locations:")\n locs = [s.location for s in doc.sources]\n pprint(locs)\n else:\n axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]\n locs = [\n dict(zip(axes, (float(v) for v in s.split(",")))) for s in args.locations\n ]\n\n model = VariationModel(locs)\n print("Sorted locations:")\n pprint(model.locations)\n print("Supports:")\n pprint(model.supports)\n\n\nif __name__ == "__main__":\n import doctest, sys\n\n if len(sys.argv) > 1:\n sys.exit(main())\n\n sys.exit(doctest.testmod().failed)\n
.venv\Lib\site-packages\fontTools\varLib\models.py
models.py
Python
23,040
0.95
0.244548
0.028623
vue-tools
63
2024-01-05T06:01:24.222438
GPL-3.0
false
78e7fe8581cedc7ac74f4a37bd30ce57
from fontTools.misc.roundTools import noRound, otRound\nfrom fontTools.misc.intTools import bit_count\nfrom fontTools.misc.vector import Vector\nfrom fontTools.ttLib.tables import otTables as ot\nfrom fontTools.varLib.models import supportScalar\nimport fontTools.varLib.varStore # For monkey-patching\nfrom fontTools.varLib.builder import (\n buildVarRegionList,\n buildSparseVarRegionList,\n buildSparseVarRegion,\n buildMultiVarStore,\n buildMultiVarData,\n)\nfrom fontTools.misc.iterTools import batched\nfrom functools import partial\nfrom collections import defaultdict\nfrom heapq import heappush, heappop\n\n\nNO_VARIATION_INDEX = ot.NO_VARIATION_INDEX\not.MultiVarStore.NO_VARIATION_INDEX = NO_VARIATION_INDEX\n\n\ndef _getLocationKey(loc):\n return tuple(sorted(loc.items(), key=lambda kv: kv[0]))\n\n\nclass OnlineMultiVarStoreBuilder(object):\n def __init__(self, axisTags):\n self._axisTags = axisTags\n self._regionMap = {}\n self._regionList = buildSparseVarRegionList([], axisTags)\n self._store = buildMultiVarStore(self._regionList, [])\n self._data = None\n self._model = None\n self._supports = None\n self._varDataIndices = {}\n self._varDataCaches = {}\n self._cache = None\n\n def setModel(self, model):\n self.setSupports(model.supports)\n self._model = model\n\n def setSupports(self, supports):\n self._model = None\n self._supports = list(supports)\n if not self._supports[0]:\n del self._supports[0] # Drop base master support\n self._cache = None\n self._data = None\n\n def finish(self):\n self._regionList.RegionCount = len(self._regionList.Region)\n self._store.MultiVarDataCount = len(self._store.MultiVarData)\n return self._store\n\n def _add_MultiVarData(self):\n regionMap = self._regionMap\n regionList = self._regionList\n\n regions = self._supports\n regionIndices = []\n for region in regions:\n key = _getLocationKey(region)\n idx = regionMap.get(key)\n if idx is None:\n varRegion = buildSparseVarRegion(region, self._axisTags)\n idx = regionMap[key] = len(regionList.Region)\n regionList.Region.append(varRegion)\n regionIndices.append(idx)\n\n # Check if we have one already...\n key = tuple(regionIndices)\n varDataIdx = self._varDataIndices.get(key)\n if varDataIdx is not None:\n self._outer = varDataIdx\n self._data = self._store.MultiVarData[varDataIdx]\n self._cache = self._varDataCaches[key]\n if len(self._data.Item) == 0xFFFF:\n # This is full. Need new one.\n varDataIdx = None\n\n if varDataIdx is None:\n self._data = buildMultiVarData(regionIndices, [])\n self._outer = len(self._store.MultiVarData)\n self._store.MultiVarData.append(self._data)\n self._varDataIndices[key] = self._outer\n if key not in self._varDataCaches:\n self._varDataCaches[key] = {}\n self._cache = self._varDataCaches[key]\n\n def storeMasters(self, master_values, *, round=round):\n deltas = self._model.getDeltas(master_values, round=round)\n base = deltas.pop(0)\n return base, self.storeDeltas(deltas, round=noRound)\n\n def storeDeltas(self, deltas, *, round=round):\n deltas = tuple(round(d) for d in deltas)\n\n if not any(deltas):\n return NO_VARIATION_INDEX\n\n deltas_tuple = tuple(tuple(d) for d in deltas)\n\n if not self._data:\n self._add_MultiVarData()\n\n varIdx = self._cache.get(deltas_tuple)\n if varIdx is not None:\n return varIdx\n\n inner = len(self._data.Item)\n if inner == 0xFFFF:\n # Full array. Start new one.\n self._add_MultiVarData()\n return self.storeDeltas(deltas, round=noRound)\n self._data.addItem(deltas, round=noRound)\n\n varIdx = (self._outer << 16) + inner\n self._cache[deltas_tuple] = varIdx\n return varIdx\n\n\ndef MultiVarData_addItem(self, deltas, *, round=round):\n deltas = tuple(round(d) for d in deltas)\n\n assert len(deltas) == self.VarRegionCount\n\n values = []\n for d in deltas:\n values.extend(d)\n\n self.Item.append(values)\n self.ItemCount = len(self.Item)\n\n\not.MultiVarData.addItem = MultiVarData_addItem\n\n\ndef SparseVarRegion_get_support(self, fvar_axes):\n return {\n fvar_axes[reg.AxisIndex].axisTag: (reg.StartCoord, reg.PeakCoord, reg.EndCoord)\n for reg in self.SparseVarRegionAxis\n }\n\n\not.SparseVarRegion.get_support = SparseVarRegion_get_support\n\n\ndef MultiVarStore___bool__(self):\n return bool(self.MultiVarData)\n\n\not.MultiVarStore.__bool__ = MultiVarStore___bool__\n\n\nclass MultiVarStoreInstancer(object):\n def __init__(self, multivarstore, fvar_axes, location={}):\n self.fvar_axes = fvar_axes\n assert multivarstore is None or multivarstore.Format == 1\n self._varData = multivarstore.MultiVarData if multivarstore else []\n self._regions = (\n multivarstore.SparseVarRegionList.Region if multivarstore else []\n )\n self.setLocation(location)\n\n def setLocation(self, location):\n self.location = dict(location)\n self._clearCaches()\n\n def _clearCaches(self):\n self._scalars = {}\n\n def _getScalar(self, regionIdx):\n scalar = self._scalars.get(regionIdx)\n if scalar is None:\n support = self._regions[regionIdx].get_support(self.fvar_axes)\n scalar = supportScalar(self.location, support)\n self._scalars[regionIdx] = scalar\n return scalar\n\n @staticmethod\n def interpolateFromDeltasAndScalars(deltas, scalars):\n if not deltas:\n return Vector([])\n assert len(deltas) % len(scalars) == 0, (len(deltas), len(scalars))\n m = len(deltas) // len(scalars)\n delta = Vector([0] * m)\n for d, s in zip(batched(deltas, m), scalars):\n if not s:\n continue\n delta += Vector(d) * s\n return delta\n\n def __getitem__(self, varidx):\n major, minor = varidx >> 16, varidx & 0xFFFF\n if varidx == NO_VARIATION_INDEX:\n return Vector([])\n varData = self._varData\n scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex]\n deltas = varData[major].Item[minor]\n return self.interpolateFromDeltasAndScalars(deltas, scalars)\n\n def interpolateFromDeltas(self, varDataIndex, deltas):\n varData = self._varData\n scalars = [self._getScalar(ri) for ri in varData[varDataIndex].VarRegionIndex]\n return self.interpolateFromDeltasAndScalars(deltas, scalars)\n\n\ndef MultiVarStore_subset_varidxes(self, varIdxes):\n return ot.VarStore.subset_varidxes(self, varIdxes, VarData="MultiVarData")\n\n\ndef MultiVarStore_prune_regions(self):\n return ot.VarStore.prune_regions(\n self, VarData="MultiVarData", VarRegionList="SparseVarRegionList"\n )\n\n\not.MultiVarStore.prune_regions = MultiVarStore_prune_regions\not.MultiVarStore.subset_varidxes = MultiVarStore_subset_varidxes\n\n\ndef MultiVarStore_get_supports(self, major, fvarAxes):\n supports = []\n varData = self.MultiVarData[major]\n for regionIdx in varData.VarRegionIndex:\n region = self.SparseVarRegionList.Region[regionIdx]\n support = region.get_support(fvarAxes)\n supports.append(support)\n return supports\n\n\not.MultiVarStore.get_supports = MultiVarStore_get_supports\n\n\ndef VARC_collect_varidxes(self, varidxes):\n for glyph in self.VarCompositeGlyphs.VarCompositeGlyph:\n for component in glyph.components:\n varidxes.add(component.axisValuesVarIndex)\n varidxes.add(component.transformVarIndex)\n\n\ndef VARC_remap_varidxes(self, varidxes_map):\n for glyph in self.VarCompositeGlyphs.VarCompositeGlyph:\n for component in glyph.components:\n component.axisValuesVarIndex = varidxes_map[component.axisValuesVarIndex]\n component.transformVarIndex = varidxes_map[component.transformVarIndex]\n\n\not.VARC.collect_varidxes = VARC_collect_varidxes\not.VARC.remap_varidxes = VARC_remap_varidxes\n
.venv\Lib\site-packages\fontTools\varLib\multiVarStore.py
multiVarStore.py
Python
8,558
0.95
0.221344
0.015544
awesome-app
536
2025-01-12T23:48:07.374178
BSD-3-Clause
false
50d41d2c68816885c7786280b8e84711
MVAR_ENTRIES = {\n "hasc": ("OS/2", "sTypoAscender"), # horizontal ascender\n "hdsc": ("OS/2", "sTypoDescender"), # horizontal descender\n "hlgp": ("OS/2", "sTypoLineGap"), # horizontal line gap\n "hcla": ("OS/2", "usWinAscent"), # horizontal clipping ascent\n "hcld": ("OS/2", "usWinDescent"), # horizontal clipping descent\n "vasc": ("vhea", "ascent"), # vertical ascender\n "vdsc": ("vhea", "descent"), # vertical descender\n "vlgp": ("vhea", "lineGap"), # vertical line gap\n "hcrs": ("hhea", "caretSlopeRise"), # horizontal caret rise\n "hcrn": ("hhea", "caretSlopeRun"), # horizontal caret run\n "hcof": ("hhea", "caretOffset"), # horizontal caret offset\n "vcrs": ("vhea", "caretSlopeRise"), # vertical caret rise\n "vcrn": ("vhea", "caretSlopeRun"), # vertical caret run\n "vcof": ("vhea", "caretOffset"), # vertical caret offset\n "xhgt": ("OS/2", "sxHeight"), # x height\n "cpht": ("OS/2", "sCapHeight"), # cap height\n "sbxs": ("OS/2", "ySubscriptXSize"), # subscript em x size\n "sbys": ("OS/2", "ySubscriptYSize"), # subscript em y size\n "sbxo": ("OS/2", "ySubscriptXOffset"), # subscript em x offset\n "sbyo": ("OS/2", "ySubscriptYOffset"), # subscript em y offset\n "spxs": ("OS/2", "ySuperscriptXSize"), # superscript em x size\n "spys": ("OS/2", "ySuperscriptYSize"), # superscript em y size\n "spxo": ("OS/2", "ySuperscriptXOffset"), # superscript em x offset\n "spyo": ("OS/2", "ySuperscriptYOffset"), # superscript em y offset\n "strs": ("OS/2", "yStrikeoutSize"), # strikeout size\n "stro": ("OS/2", "yStrikeoutPosition"), # strikeout offset\n "unds": ("post", "underlineThickness"), # underline size\n "undo": ("post", "underlinePosition"), # underline offset\n #'gsp0': ('gasp', 'gaspRange[0].rangeMaxPPEM'), # gaspRange[0]\n #'gsp1': ('gasp', 'gaspRange[1].rangeMaxPPEM'), # gaspRange[1]\n #'gsp2': ('gasp', 'gaspRange[2].rangeMaxPPEM'), # gaspRange[2]\n #'gsp3': ('gasp', 'gaspRange[3].rangeMaxPPEM'), # gaspRange[3]\n #'gsp4': ('gasp', 'gaspRange[4].rangeMaxPPEM'), # gaspRange[4]\n #'gsp5': ('gasp', 'gaspRange[5].rangeMaxPPEM'), # gaspRange[5]\n #'gsp6': ('gasp', 'gaspRange[6].rangeMaxPPEM'), # gaspRange[6]\n #'gsp7': ('gasp', 'gaspRange[7].rangeMaxPPEM'), # gaspRange[7]\n #'gsp8': ('gasp', 'gaspRange[8].rangeMaxPPEM'), # gaspRange[8]\n #'gsp9': ('gasp', 'gaspRange[9].rangeMaxPPEM'), # gaspRange[9]\n}\n
.venv\Lib\site-packages\fontTools\varLib\mvar.py
mvar.py
Python
2,489
0.8
0
0.25
python-kit
604
2024-01-27T13:03:25.169289
GPL-3.0
false
62f4e1670d7452fc8f86321ab8549579
"""Visualize DesignSpaceDocument and resulting VariationModel."""\n\nfrom fontTools.varLib.models import VariationModel, supportScalar\nfrom fontTools.designspaceLib import DesignSpaceDocument\nfrom matplotlib import pyplot\nfrom mpl_toolkits.mplot3d import axes3d\nfrom itertools import cycle\nimport math\nimport logging\nimport sys\n\nlog = logging.getLogger(__name__)\n\n\ndef stops(support, count=10):\n a, b, c = support\n\n return (\n [a + (b - a) * i / count for i in range(count)]\n + [b + (c - b) * i / count for i in range(count)]\n + [c]\n )\n\n\ndef _plotLocationsDots(locations, axes, subplot, **kwargs):\n for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)):\n if len(axes) == 1:\n subplot.plot([loc.get(axes[0], 0)], [1.0], "o", color=color, **kwargs)\n elif len(axes) == 2:\n subplot.plot(\n [loc.get(axes[0], 0)],\n [loc.get(axes[1], 0)],\n [1.0],\n "o",\n color=color,\n **kwargs,\n )\n else:\n raise AssertionError(len(axes))\n\n\ndef plotLocations(locations, fig, names=None, **kwargs):\n n = len(locations)\n cols = math.ceil(n**0.5)\n rows = math.ceil(n / cols)\n\n if names is None:\n names = [None] * len(locations)\n\n model = VariationModel(locations)\n names = [names[model.reverseMapping[i]] for i in range(len(names))]\n\n axes = sorted(locations[0].keys())\n if len(axes) == 1:\n _plotLocations2D(model, axes[0], fig, cols, rows, names=names, **kwargs)\n elif len(axes) == 2:\n _plotLocations3D(model, axes, fig, cols, rows, names=names, **kwargs)\n else:\n raise ValueError("Only 1 or 2 axes are supported")\n\n\ndef _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs):\n subplot = fig.add_subplot(111)\n for i, (support, color, name) in enumerate(\n zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))\n ):\n if name is not None:\n subplot.set_title(name)\n subplot.set_xlabel(axis)\n pyplot.xlim(-1.0, +1.0)\n\n Xs = support.get(axis, (-1.0, 0.0, +1.0))\n X, Y = [], []\n for x in stops(Xs):\n y = supportScalar({axis: x}, support)\n X.append(x)\n Y.append(y)\n subplot.plot(X, Y, color=color, **kwargs)\n\n _plotLocationsDots(model.locations, [axis], subplot)\n\n\ndef _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs):\n ax1, ax2 = axes\n\n axis3D = fig.add_subplot(111, projection="3d")\n for i, (support, color, name) in enumerate(\n zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))\n ):\n if name is not None:\n axis3D.set_title(name)\n axis3D.set_xlabel(ax1)\n axis3D.set_ylabel(ax2)\n pyplot.xlim(-1.0, +1.0)\n pyplot.ylim(-1.0, +1.0)\n\n Xs = support.get(ax1, (-1.0, 0.0, +1.0))\n Ys = support.get(ax2, (-1.0, 0.0, +1.0))\n for x in stops(Xs):\n X, Y, Z = [], [], []\n for y in Ys:\n z = supportScalar({ax1: x, ax2: y}, support)\n X.append(x)\n Y.append(y)\n Z.append(z)\n axis3D.plot(X, Y, Z, color=color, **kwargs)\n for y in stops(Ys):\n X, Y, Z = [], [], []\n for x in Xs:\n z = supportScalar({ax1: x, ax2: y}, support)\n X.append(x)\n Y.append(y)\n Z.append(z)\n axis3D.plot(X, Y, Z, color=color, **kwargs)\n\n _plotLocationsDots(model.locations, [ax1, ax2], axis3D)\n\n\ndef plotDocument(doc, fig, **kwargs):\n doc.normalize()\n locations = [s.location for s in doc.sources]\n names = [s.name for s in doc.sources]\n plotLocations(locations, fig, names, **kwargs)\n\n\ndef _plotModelFromMasters2D(model, masterValues, fig, **kwargs):\n assert len(model.axisOrder) == 1\n axis = model.axisOrder[0]\n\n axis_min = min(loc.get(axis, 0) for loc in model.locations)\n axis_max = max(loc.get(axis, 0) for loc in model.locations)\n\n import numpy as np\n\n X = np.arange(axis_min, axis_max, (axis_max - axis_min) / 100)\n Y = []\n\n for x in X:\n loc = {axis: x}\n v = model.interpolateFromMasters(loc, masterValues)\n Y.append(v)\n\n subplot = fig.add_subplot(111)\n subplot.plot(X, Y, "-", **kwargs)\n\n\ndef _plotModelFromMasters3D(model, masterValues, fig, **kwargs):\n assert len(model.axisOrder) == 2\n axis1, axis2 = model.axisOrder[0], model.axisOrder[1]\n\n axis1_min = min(loc.get(axis1, 0) for loc in model.locations)\n axis1_max = max(loc.get(axis1, 0) for loc in model.locations)\n axis2_min = min(loc.get(axis2, 0) for loc in model.locations)\n axis2_max = max(loc.get(axis2, 0) for loc in model.locations)\n\n import numpy as np\n\n X = np.arange(axis1_min, axis1_max, (axis1_max - axis1_min) / 100)\n Y = np.arange(axis2_min, axis2_max, (axis2_max - axis2_min) / 100)\n X, Y = np.meshgrid(X, Y)\n Z = []\n\n for row_x, row_y in zip(X, Y):\n z_row = []\n Z.append(z_row)\n for x, y in zip(row_x, row_y):\n loc = {axis1: x, axis2: y}\n v = model.interpolateFromMasters(loc, masterValues)\n z_row.append(v)\n Z = np.array(Z)\n\n axis3D = fig.add_subplot(111, projection="3d")\n axis3D.plot_surface(X, Y, Z, **kwargs)\n\n\ndef plotModelFromMasters(model, masterValues, fig, **kwargs):\n """Plot a variation model and set of master values corresponding\n to the locations to the model into a pyplot figure. Variation\n model must have axisOrder of size 1 or 2."""\n if len(model.axisOrder) == 1:\n _plotModelFromMasters2D(model, masterValues, fig, **kwargs)\n elif len(model.axisOrder) == 2:\n _plotModelFromMasters3D(model, masterValues, fig, **kwargs)\n else:\n raise ValueError("Only 1 or 2 axes are supported")\n\n\ndef main(args=None):\n from fontTools import configLogger\n\n if args is None:\n args = sys.argv[1:]\n\n # configure the library logger (for >= WARNING)\n configLogger()\n # comment this out to enable debug messages from logger\n # log.setLevel(logging.DEBUG)\n\n if len(args) < 1:\n print("usage: fonttools varLib.plot source.designspace", file=sys.stderr)\n print(" or")\n print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr)\n print(" or")\n print(\n "usage: fonttools varLib.plot location1=value1 location2=value2 ...",\n file=sys.stderr,\n )\n sys.exit(1)\n\n fig = pyplot.figure()\n fig.set_tight_layout(True)\n\n if len(args) == 1 and args[0].endswith(".designspace"):\n doc = DesignSpaceDocument()\n doc.read(args[0])\n plotDocument(doc, fig)\n else:\n axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]\n if "=" not in args[0]:\n locs = [dict(zip(axes, (float(v) for v in s.split(",")))) for s in args]\n plotLocations(locs, fig)\n else:\n locations = []\n masterValues = []\n for arg in args:\n loc, v = arg.split("=")\n locations.append(dict(zip(axes, (float(v) for v in loc.split(",")))))\n masterValues.append(float(v))\n model = VariationModel(locations, axes[: len(locations[0])])\n plotModelFromMasters(model, masterValues, fig)\n\n pyplot.show()\n\n\nif __name__ == "__main__":\n import sys\n\n sys.exit(main())\n
.venv\Lib\site-packages\fontTools\varLib\plot.py
plot.py
Python
7,732
0.95
0.205882
0.021277
react-lib
232
2024-09-26T15:05:43.162127
GPL-3.0
false
4863b017393a760ec39c65e99213db00
"""Extra methods for DesignSpaceDocument to generate its STAT table data."""\n\nfrom __future__ import annotations\n\nfrom typing import Dict, List, Union\n\nimport fontTools.otlLib.builder\nfrom fontTools.designspaceLib import (\n AxisLabelDescriptor,\n DesignSpaceDocument,\n DesignSpaceDocumentError,\n LocationLabelDescriptor,\n)\nfrom fontTools.designspaceLib.types import Region, getVFUserRegion, locationInRegion\nfrom fontTools.ttLib import TTFont\n\n\ndef buildVFStatTable(ttFont: TTFont, doc: DesignSpaceDocument, vfName: str) -> None:\n """Build the STAT table for the variable font identified by its name in\n the given document.\n\n Knowing which variable we're building STAT data for is needed to subset\n the STAT locations to only include what the variable font actually ships.\n\n .. versionadded:: 5.0\n\n .. seealso::\n - :func:`getStatAxes()`\n - :func:`getStatLocations()`\n - :func:`fontTools.otlLib.builder.buildStatTable()`\n """\n for vf in doc.getVariableFonts():\n if vf.name == vfName:\n break\n else:\n raise DesignSpaceDocumentError(\n f"Cannot find the variable font by name {vfName}"\n )\n\n region = getVFUserRegion(doc, vf)\n\n # if there are not currently any mac names don't add them here, that's inconsistent\n # https://github.com/fonttools/fonttools/issues/683\n macNames = any(\n nr.platformID == 1 for nr in getattr(ttFont.get("name"), "names", ())\n )\n\n return fontTools.otlLib.builder.buildStatTable(\n ttFont,\n getStatAxes(doc, region),\n getStatLocations(doc, region),\n doc.elidedFallbackName if doc.elidedFallbackName is not None else 2,\n macNames=macNames,\n )\n\n\ndef getStatAxes(doc: DesignSpaceDocument, userRegion: Region) -> List[Dict]:\n """Return a list of axis dicts suitable for use as the ``axes``\n argument to :func:`fontTools.otlLib.builder.buildStatTable()`.\n\n .. versionadded:: 5.0\n """\n # First, get the axis labels with explicit ordering\n # then append the others in the order they appear.\n maxOrdering = max(\n (axis.axisOrdering for axis in doc.axes if axis.axisOrdering is not None),\n default=-1,\n )\n axisOrderings = []\n for axis in doc.axes:\n if axis.axisOrdering is not None:\n axisOrderings.append(axis.axisOrdering)\n else:\n maxOrdering += 1\n axisOrderings.append(maxOrdering)\n return [\n dict(\n tag=axis.tag,\n name={"en": axis.name, **axis.labelNames},\n ordering=ordering,\n values=[\n _axisLabelToStatLocation(label)\n for label in axis.axisLabels\n if locationInRegion({axis.name: label.userValue}, userRegion)\n ],\n )\n for axis, ordering in zip(doc.axes, axisOrderings)\n ]\n\n\ndef getStatLocations(doc: DesignSpaceDocument, userRegion: Region) -> List[Dict]:\n """Return a list of location dicts suitable for use as the ``locations``\n argument to :func:`fontTools.otlLib.builder.buildStatTable()`.\n\n .. versionadded:: 5.0\n """\n axesByName = {axis.name: axis for axis in doc.axes}\n return [\n dict(\n name={"en": label.name, **label.labelNames},\n # Location in the designspace is keyed by axis name\n # Location in buildStatTable by axis tag\n location={\n axesByName[name].tag: value\n for name, value in label.getFullUserLocation(doc).items()\n },\n flags=_labelToFlags(label),\n )\n for label in doc.locationLabels\n if locationInRegion(label.getFullUserLocation(doc), userRegion)\n ]\n\n\ndef _labelToFlags(label: Union[AxisLabelDescriptor, LocationLabelDescriptor]) -> int:\n flags = 0\n if label.olderSibling:\n flags |= 1\n if label.elidable:\n flags |= 2\n return flags\n\n\ndef _axisLabelToStatLocation(\n label: AxisLabelDescriptor,\n) -> Dict:\n label_format = label.getFormat()\n name = {"en": label.name, **label.labelNames}\n flags = _labelToFlags(label)\n if label_format == 1:\n return dict(name=name, value=label.userValue, flags=flags)\n if label_format == 3:\n return dict(\n name=name,\n value=label.userValue,\n linkedValue=label.linkedUserValue,\n flags=flags,\n )\n if label_format == 2:\n res = dict(\n name=name,\n nominalValue=label.userValue,\n flags=flags,\n )\n if label.userMinimum is not None:\n res["rangeMinValue"] = label.userMinimum\n if label.userMaximum is not None:\n res["rangeMaxValue"] = label.userMaximum\n return res\n raise NotImplementedError("Unknown STAT label format")\n
.venv\Lib\site-packages\fontTools\varLib\stat.py
stat.py
Python
4,960
0.95
0.221477
0.046875
python-kit
8
2023-08-19T21:55:58.112545
MIT
false
e92c80015028db8bf745ee653399c93b
from fontTools.misc.roundTools import noRound, otRound\nfrom fontTools.misc.intTools import bit_count\nfrom fontTools.ttLib.tables import otTables as ot\nfrom fontTools.varLib.models import supportScalar\nfrom fontTools.varLib.builder import (\n buildVarRegionList,\n buildVarStore,\n buildVarRegion,\n buildVarData,\n)\nfrom functools import partial\nfrom collections import defaultdict\nfrom heapq import heappush, heappop\n\n\nNO_VARIATION_INDEX = ot.NO_VARIATION_INDEX\not.VarStore.NO_VARIATION_INDEX = NO_VARIATION_INDEX\n\n\ndef _getLocationKey(loc):\n return tuple(sorted(loc.items(), key=lambda kv: kv[0]))\n\n\nclass OnlineVarStoreBuilder(object):\n def __init__(self, axisTags):\n self._axisTags = axisTags\n self._regionMap = {}\n self._regionList = buildVarRegionList([], axisTags)\n self._store = buildVarStore(self._regionList, [])\n self._data = None\n self._model = None\n self._supports = None\n self._varDataIndices = {}\n self._varDataCaches = {}\n self._cache = None\n\n def setModel(self, model):\n self.setSupports(model.supports)\n self._model = model\n\n def setSupports(self, supports):\n self._model = None\n self._supports = list(supports)\n if self._supports and not self._supports[0]:\n del self._supports[0] # Drop base master support\n self._cache = None\n self._data = None\n\n def finish(self, optimize=True):\n self._regionList.RegionCount = len(self._regionList.Region)\n self._store.VarDataCount = len(self._store.VarData)\n for data in self._store.VarData:\n data.ItemCount = len(data.Item)\n data.calculateNumShorts(optimize=optimize)\n return self._store\n\n def _add_VarData(self, num_items=1):\n regionMap = self._regionMap\n regionList = self._regionList\n\n regions = self._supports\n regionIndices = []\n for region in regions:\n key = _getLocationKey(region)\n idx = regionMap.get(key)\n if idx is None:\n varRegion = buildVarRegion(region, self._axisTags)\n idx = regionMap[key] = len(regionList.Region)\n regionList.Region.append(varRegion)\n regionIndices.append(idx)\n\n # Check if we have one already...\n key = tuple(regionIndices)\n varDataIdx = self._varDataIndices.get(key)\n if varDataIdx is not None:\n self._outer = varDataIdx\n self._data = self._store.VarData[varDataIdx]\n self._cache = self._varDataCaches[key]\n if len(self._data.Item) + num_items > 0xFFFF:\n # This is full. Need new one.\n varDataIdx = None\n\n if varDataIdx is None:\n self._data = buildVarData(regionIndices, [], optimize=False)\n self._outer = len(self._store.VarData)\n self._store.VarData.append(self._data)\n self._varDataIndices[key] = self._outer\n if key not in self._varDataCaches:\n self._varDataCaches[key] = {}\n self._cache = self._varDataCaches[key]\n\n def storeMasters(self, master_values, *, round=round):\n deltas = self._model.getDeltas(master_values, round=round)\n base = deltas.pop(0)\n return base, self.storeDeltas(deltas, round=noRound)\n\n def storeMastersMany(self, master_values_list, *, round=round):\n deltas_list = [\n self._model.getDeltas(master_values, round=round)\n for master_values in master_values_list\n ]\n base_list = [deltas.pop(0) for deltas in deltas_list]\n return base_list, self.storeDeltasMany(deltas_list, round=noRound)\n\n def storeDeltas(self, deltas, *, round=round):\n deltas = [round(d) for d in deltas]\n if len(deltas) == len(self._supports) + 1:\n deltas = tuple(deltas[1:])\n else:\n assert len(deltas) == len(self._supports)\n deltas = tuple(deltas)\n\n if not self._data:\n self._add_VarData()\n\n varIdx = self._cache.get(deltas)\n if varIdx is not None:\n return varIdx\n\n inner = len(self._data.Item)\n if inner == 0xFFFF:\n # Full array. Start new one.\n self._add_VarData()\n return self.storeDeltas(deltas, round=noRound)\n self._data.addItem(deltas, round=noRound)\n\n varIdx = (self._outer << 16) + inner\n self._cache[deltas] = varIdx\n return varIdx\n\n def storeDeltasMany(self, deltas_list, *, round=round):\n deltas_list = [[round(d) for d in deltas] for deltas in deltas_list]\n deltas_list = tuple(tuple(deltas) for deltas in deltas_list)\n\n if not self._data:\n self._add_VarData(len(deltas_list))\n\n varIdx = self._cache.get(deltas_list)\n if varIdx is not None:\n return varIdx\n\n inner = len(self._data.Item)\n if inner + len(deltas_list) > 0xFFFF:\n # Full array. Start new one.\n self._add_VarData(len(deltas_list))\n return self.storeDeltasMany(deltas_list, round=noRound)\n for i, deltas in enumerate(deltas_list):\n self._data.addItem(deltas, round=noRound)\n\n varIdx = (self._outer << 16) + inner + i\n self._cache[deltas] = varIdx\n\n varIdx = (self._outer << 16) + inner\n self._cache[deltas_list] = varIdx\n\n return varIdx\n\n\ndef VarData_addItem(self, deltas, *, round=round):\n deltas = [round(d) for d in deltas]\n\n countUs = self.VarRegionCount\n countThem = len(deltas)\n if countUs + 1 == countThem:\n deltas = list(deltas[1:])\n else:\n assert countUs == countThem, (countUs, countThem)\n deltas = list(deltas)\n self.Item.append(deltas)\n self.ItemCount = len(self.Item)\n\n\not.VarData.addItem = VarData_addItem\n\n\ndef VarRegion_get_support(self, fvar_axes):\n return {\n fvar_axes[i].axisTag: (reg.StartCoord, reg.PeakCoord, reg.EndCoord)\n for i, reg in enumerate(self.VarRegionAxis)\n if reg.PeakCoord != 0\n }\n\n\not.VarRegion.get_support = VarRegion_get_support\n\n\ndef VarStore___bool__(self):\n return bool(self.VarData)\n\n\not.VarStore.__bool__ = VarStore___bool__\n\n\nclass VarStoreInstancer(object):\n def __init__(self, varstore, fvar_axes, location={}):\n self.fvar_axes = fvar_axes\n assert varstore is None or varstore.Format == 1\n self._varData = varstore.VarData if varstore else []\n self._regions = varstore.VarRegionList.Region if varstore else []\n self.setLocation(location)\n\n def setLocation(self, location):\n self.location = dict(location)\n self._clearCaches()\n\n def _clearCaches(self):\n self._scalars = {}\n\n def _getScalar(self, regionIdx):\n scalar = self._scalars.get(regionIdx)\n if scalar is None:\n support = self._regions[regionIdx].get_support(self.fvar_axes)\n scalar = supportScalar(self.location, support)\n self._scalars[regionIdx] = scalar\n return scalar\n\n @staticmethod\n def interpolateFromDeltasAndScalars(deltas, scalars):\n delta = 0.0\n for d, s in zip(deltas, scalars):\n if not s:\n continue\n delta += d * s\n return delta\n\n def __getitem__(self, varidx):\n major, minor = varidx >> 16, varidx & 0xFFFF\n if varidx == NO_VARIATION_INDEX:\n return 0.0\n varData = self._varData\n scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex]\n deltas = varData[major].Item[minor]\n return self.interpolateFromDeltasAndScalars(deltas, scalars)\n\n def interpolateFromDeltas(self, varDataIndex, deltas):\n varData = self._varData\n scalars = [self._getScalar(ri) for ri in varData[varDataIndex].VarRegionIndex]\n return self.interpolateFromDeltasAndScalars(deltas, scalars)\n\n\n#\n# Optimizations\n#\n# retainFirstMap - If true, major 0 mappings are retained. Deltas for unused indices are zeroed\n# advIdxes - Set of major 0 indices for advance deltas to be listed first. Other major 0 indices follow.\n\n\ndef VarStore_subset_varidxes(\n self,\n varIdxes,\n optimize=True,\n retainFirstMap=False,\n advIdxes=set(),\n *,\n VarData="VarData",\n):\n # Sort out used varIdxes by major/minor.\n used = defaultdict(set)\n for varIdx in varIdxes:\n if varIdx == NO_VARIATION_INDEX:\n continue\n major = varIdx >> 16\n minor = varIdx & 0xFFFF\n used[major].add(minor)\n del varIdxes\n\n #\n # Subset VarData\n #\n\n varData = getattr(self, VarData)\n newVarData = []\n varDataMap = {NO_VARIATION_INDEX: NO_VARIATION_INDEX}\n for major, data in enumerate(varData):\n usedMinors = used.get(major)\n if usedMinors is None:\n continue\n newMajor = len(newVarData)\n newVarData.append(data)\n\n items = data.Item\n newItems = []\n if major == 0 and retainFirstMap:\n for minor in range(len(items)):\n newItems.append(\n items[minor] if minor in usedMinors else [0] * len(items[minor])\n )\n varDataMap[minor] = minor\n else:\n if major == 0:\n minors = sorted(advIdxes) + sorted(usedMinors - advIdxes)\n else:\n minors = sorted(usedMinors)\n for minor in minors:\n newMinor = len(newItems)\n newItems.append(items[minor])\n varDataMap[(major << 16) + minor] = (newMajor << 16) + newMinor\n\n data.Item = newItems\n data.ItemCount = len(data.Item)\n\n if VarData == "VarData":\n data.calculateNumShorts(optimize=optimize)\n\n setattr(self, VarData, newVarData)\n setattr(self, VarData + "Count", len(newVarData))\n\n self.prune_regions()\n\n return varDataMap\n\n\not.VarStore.subset_varidxes = VarStore_subset_varidxes\n\n\ndef VarStore_prune_regions(self, *, VarData="VarData", VarRegionList="VarRegionList"):\n """Remove unused VarRegions."""\n #\n # Subset VarRegionList\n #\n\n # Collect.\n usedRegions = set()\n for data in getattr(self, VarData):\n usedRegions.update(data.VarRegionIndex)\n # Subset.\n regionList = getattr(self, VarRegionList)\n regions = regionList.Region\n newRegions = []\n regionMap = {}\n for i in sorted(usedRegions):\n regionMap[i] = len(newRegions)\n newRegions.append(regions[i])\n regionList.Region = newRegions\n regionList.RegionCount = len(regionList.Region)\n # Map.\n for data in getattr(self, VarData):\n data.VarRegionIndex = [regionMap[i] for i in data.VarRegionIndex]\n\n\not.VarStore.prune_regions = VarStore_prune_regions\n\n\ndef _visit(self, func):\n """Recurse down from self, if type of an object is ot.Device,\n call func() on it. Works on otData-style classes."""\n\n if type(self) == ot.Device:\n func(self)\n\n elif isinstance(self, list):\n for that in self:\n _visit(that, func)\n\n elif hasattr(self, "getConverters") and not hasattr(self, "postRead"):\n for conv in self.getConverters():\n that = getattr(self, conv.name, None)\n if that is not None:\n _visit(that, func)\n\n elif isinstance(self, ot.ValueRecord):\n for that in self.__dict__.values():\n _visit(that, func)\n\n\ndef _Device_recordVarIdx(self, s):\n """Add VarIdx in this Device table (if any) to the set s."""\n if self.DeltaFormat == 0x8000:\n s.add((self.StartSize << 16) + self.EndSize)\n\n\ndef Object_collect_device_varidxes(self, varidxes):\n adder = partial(_Device_recordVarIdx, s=varidxes)\n _visit(self, adder)\n\n\not.GDEF.collect_device_varidxes = Object_collect_device_varidxes\not.GPOS.collect_device_varidxes = Object_collect_device_varidxes\n\n\ndef _Device_mapVarIdx(self, mapping, done):\n """Map VarIdx in this Device table (if any) through mapping."""\n if id(self) in done:\n return\n done.add(id(self))\n if self.DeltaFormat == 0x8000:\n varIdx = mapping[(self.StartSize << 16) + self.EndSize]\n self.StartSize = varIdx >> 16\n self.EndSize = varIdx & 0xFFFF\n\n\ndef Object_remap_device_varidxes(self, varidxes_map):\n mapper = partial(_Device_mapVarIdx, mapping=varidxes_map, done=set())\n _visit(self, mapper)\n\n\not.GDEF.remap_device_varidxes = Object_remap_device_varidxes\not.GPOS.remap_device_varidxes = Object_remap_device_varidxes\n\n\nclass _Encoding(object):\n def __init__(self, chars):\n self.chars = chars\n self.width = bit_count(chars)\n self.columns = self._columns(chars)\n self.overhead = self._characteristic_overhead(self.columns)\n self.items = set()\n\n def append(self, row):\n self.items.add(row)\n\n def extend(self, lst):\n self.items.update(lst)\n\n def width_sort_key(self):\n return self.width, self.chars\n\n @staticmethod\n def _characteristic_overhead(columns):\n """Returns overhead in bytes of encoding this characteristic\n as a VarData."""\n c = 4 + 6 # 4 bytes for LOffset, 6 bytes for VarData header\n c += bit_count(columns) * 2\n return c\n\n @staticmethod\n def _columns(chars):\n cols = 0\n i = 1\n while chars:\n if chars & 0b1111:\n cols |= i\n chars >>= 4\n i <<= 1\n return cols\n\n def gain_from_merging(self, other_encoding):\n combined_chars = other_encoding.chars | self.chars\n combined_width = bit_count(combined_chars)\n combined_columns = self.columns | other_encoding.columns\n combined_overhead = _Encoding._characteristic_overhead(combined_columns)\n combined_gain = (\n +self.overhead\n + other_encoding.overhead\n - combined_overhead\n - (combined_width - self.width) * len(self.items)\n - (combined_width - other_encoding.width) * len(other_encoding.items)\n )\n return combined_gain\n\n\nclass _EncodingDict(dict):\n def __missing__(self, chars):\n r = self[chars] = _Encoding(chars)\n return r\n\n def add_row(self, row):\n chars = self._row_characteristics(row)\n self[chars].append(row)\n\n @staticmethod\n def _row_characteristics(row):\n """Returns encoding characteristics for a row."""\n longWords = False\n\n chars = 0\n i = 1\n for v in row:\n if v:\n chars += i\n if not (-128 <= v <= 127):\n chars += i * 0b0010\n if not (-32768 <= v <= 32767):\n longWords = True\n break\n i <<= 4\n\n if longWords:\n # Redo; only allow 2byte/4byte encoding\n chars = 0\n i = 1\n for v in row:\n if v:\n chars += i * 0b0011\n if not (-32768 <= v <= 32767):\n chars += i * 0b1100\n i <<= 4\n\n return chars\n\n\ndef VarStore_optimize(self, use_NO_VARIATION_INDEX=True, quantization=1):\n """Optimize storage. Returns mapping from old VarIdxes to new ones."""\n\n # Overview:\n #\n # For each VarData row, we first extend it with zeroes to have\n # one column per region in VarRegionList. We then group the\n # rows into _Encoding objects, by their "characteristic" bitmap.\n # The characteristic bitmap is a binary number representing how\n # many bytes each column of the data takes up to encode. Each\n # column is encoded in four bits. For example, if a column has\n # only values in the range -128..127, it would only have a single\n # bit set in the characteristic bitmap for that column. If it has\n # values in the range -32768..32767, it would have two bits set.\n # The number of ones in the characteristic bitmap is the "width"\n # of the encoding.\n #\n # Each encoding as such has a number of "active" (ie. non-zero)\n # columns. The overhead of encoding the characteristic bitmap\n # is 10 bytes, plus 2 bytes per active column.\n #\n # When an encoding is merged into another one, if the characteristic\n # of the old encoding is a subset of the new one, then the overhead\n # of the old encoding is completely eliminated. However, each row\n # now would require more bytes to encode, to the tune of one byte\n # per characteristic bit that is active in the new encoding but not\n # in the old one.\n #\n # The "gain" of merging two encodings is how many bytes we save by doing so.\n #\n # High-level algorithm:\n #\n # - Each encoding has a minimal way to encode it. However, because\n # of the overhead of encoding the characteristic bitmap, it may\n # be beneficial to merge two encodings together, if there is\n # gain in doing so. As such, we need to search for the best\n # such successive merges.\n #\n # Algorithm:\n #\n # - Put all encodings into a "todo" list.\n #\n # - Sort todo list (for stability) by width_sort_key(), which is a tuple\n # of the following items:\n # * The "width" of the encoding.\n # * The characteristic bitmap of the encoding, with higher-numbered\n # columns compared first.\n #\n # - Make a priority-queue of the gain from combining each two\n # encodings in the todo list. The priority queue is sorted by\n # decreasing gain. Only positive gains are included.\n #\n # - While priority queue is not empty:\n # - Pop the first item from the priority queue,\n # - Merge the two encodings it represents,\n # - Remove the two encodings from the todo list,\n # - Insert positive gains from combining the new encoding with\n # all existing todo list items into the priority queue,\n # - If a todo list item with the same characteristic bitmap as\n # the new encoding exists, remove it from the todo list and\n # merge it into the new encoding.\n # - Insert the new encoding into the todo list,\n #\n # - Encode all remaining items in the todo list.\n #\n # The output is then sorted for stability, in the following way:\n # - The VarRegionList of the input is kept intact.\n # - The VarData is sorted by the same width_sort_key() used at the beginning.\n # - Within each VarData, the items are sorted as vectors of numbers.\n #\n # Finally, each VarData is optimized to remove the empty columns and\n # reorder columns as needed.\n\n # TODO\n # Check that no two VarRegions are the same; if they are, fold them.\n\n n = len(self.VarRegionList.Region) # Number of columns\n zeroes = [0] * n\n\n front_mapping = {} # Map from old VarIdxes to full row tuples\n\n encodings = _EncodingDict()\n\n # Collect all items into a set of full rows (with lots of zeroes.)\n for major, data in enumerate(self.VarData):\n regionIndices = data.VarRegionIndex\n\n for minor, item in enumerate(data.Item):\n row = list(zeroes)\n\n if quantization == 1:\n for regionIdx, v in zip(regionIndices, item):\n row[regionIdx] += v\n else:\n for regionIdx, v in zip(regionIndices, item):\n row[regionIdx] += (\n round(v / quantization) * quantization\n ) # TODO https://github.com/fonttools/fonttools/pull/3126#discussion_r1205439785\n\n row = tuple(row)\n\n if use_NO_VARIATION_INDEX and not any(row):\n front_mapping[(major << 16) + minor] = None\n continue\n\n encodings.add_row(row)\n front_mapping[(major << 16) + minor] = row\n\n # Prepare for the main algorithm.\n todo = sorted(encodings.values(), key=_Encoding.width_sort_key)\n del encodings\n\n # Repeatedly pick two best encodings to combine, and combine them.\n\n heap = []\n for i, encoding in enumerate(todo):\n for j in range(i + 1, len(todo)):\n other_encoding = todo[j]\n combining_gain = encoding.gain_from_merging(other_encoding)\n if combining_gain > 0:\n heappush(heap, (-combining_gain, i, j))\n\n while heap:\n _, i, j = heappop(heap)\n if todo[i] is None or todo[j] is None:\n continue\n\n encoding, other_encoding = todo[i], todo[j]\n todo[i], todo[j] = None, None\n\n # Combine the two encodings\n combined_chars = other_encoding.chars | encoding.chars\n combined_encoding = _Encoding(combined_chars)\n combined_encoding.extend(encoding.items)\n combined_encoding.extend(other_encoding.items)\n\n for k, enc in enumerate(todo):\n if enc is None:\n continue\n\n # In the unlikely event that the same encoding exists already,\n # combine it.\n if enc.chars == combined_chars:\n combined_encoding.extend(enc.items)\n todo[k] = None\n continue\n\n combining_gain = combined_encoding.gain_from_merging(enc)\n if combining_gain > 0:\n heappush(heap, (-combining_gain, k, len(todo)))\n\n todo.append(combined_encoding)\n\n encodings = [encoding for encoding in todo if encoding is not None]\n\n # Assemble final store.\n back_mapping = {} # Mapping from full rows to new VarIdxes\n encodings.sort(key=_Encoding.width_sort_key)\n self.VarData = []\n for encoding in encodings:\n items = sorted(encoding.items)\n\n while items:\n major = len(self.VarData)\n data = ot.VarData()\n self.VarData.append(data)\n data.VarRegionIndex = range(n)\n data.VarRegionCount = len(data.VarRegionIndex)\n\n # Each major can only encode up to 0xFFFF entries.\n data.Item, items = items[:0xFFFF], items[0xFFFF:]\n\n for minor, item in enumerate(data.Item):\n back_mapping[item] = (major << 16) + minor\n\n # Compile final mapping.\n varidx_map = {NO_VARIATION_INDEX: NO_VARIATION_INDEX}\n for k, v in front_mapping.items():\n varidx_map[k] = back_mapping[v] if v is not None else NO_VARIATION_INDEX\n\n # Recalculate things and go home.\n self.VarRegionList.RegionCount = len(self.VarRegionList.Region)\n self.VarDataCount = len(self.VarData)\n for data in self.VarData:\n data.ItemCount = len(data.Item)\n data.optimize()\n\n # Remove unused regions.\n self.prune_regions()\n\n return varidx_map\n\n\not.VarStore.optimize = VarStore_optimize\n\n\ndef main(args=None):\n """Optimize a font's GDEF variation store"""\n from argparse import ArgumentParser\n from fontTools import configLogger\n from fontTools.ttLib import TTFont\n from fontTools.ttLib.tables.otBase import OTTableWriter\n\n parser = ArgumentParser(prog="varLib.varStore", description=main.__doc__)\n parser.add_argument("--quantization", type=int, default=1)\n parser.add_argument("fontfile")\n parser.add_argument("outfile", nargs="?")\n options = parser.parse_args(args)\n\n # TODO: allow user to configure logging via command-line options\n configLogger(level="INFO")\n\n quantization = options.quantization\n fontfile = options.fontfile\n outfile = options.outfile\n\n font = TTFont(fontfile)\n gdef = font["GDEF"]\n store = gdef.table.VarStore\n\n writer = OTTableWriter()\n store.compile(writer, font)\n size = len(writer.getAllData())\n print("Before: %7d bytes" % size)\n\n varidx_map = store.optimize(quantization=quantization)\n\n writer = OTTableWriter()\n store.compile(writer, font)\n size = len(writer.getAllData())\n print("After: %7d bytes" % size)\n\n if outfile is not None:\n gdef.table.remap_device_varidxes(varidx_map)\n if "GPOS" in font:\n font["GPOS"].table.remap_device_varidxes(varidx_map)\n\n font.save(outfile)\n\n\nif __name__ == "__main__":\n import sys\n\n if len(sys.argv) > 1:\n sys.exit(main())\n import doctest\n\n sys.exit(doctest.testmod().failed)\n
.venv\Lib\site-packages\fontTools\varLib\varStore.py
varStore.py
Python
24,808
0.95
0.20839
0.176271
react-lib
7
2024-05-22T05:13:18.985515
MIT
false
ecf8f7e9cdfccbf9ed30d1b7a8aa361b
import sys\nfrom fontTools.varLib import main\n\n\nif __name__ == "__main__":\n sys.exit(main())\n
.venv\Lib\site-packages\fontTools\varLib\__main__.py
__main__.py
Python
101
0.65
0.166667
0
node-utils
581
2024-06-17T12:59:46.761876
Apache-2.0
false
0bfe5d508158ebe793952136703efb63
from fontTools.ttLib.tables import otTables as ot\nfrom copy import deepcopy\nimport logging\n\n\nlog = logging.getLogger("fontTools.varLib.instancer")\n\n\ndef _featureVariationRecordIsUnique(rec, seen):\n conditionSet = []\n conditionSets = (\n rec.ConditionSet.ConditionTable if rec.ConditionSet is not None else []\n )\n for cond in conditionSets:\n if cond.Format != 1:\n # can't tell whether this is duplicate, assume is unique\n return True\n conditionSet.append(\n (cond.AxisIndex, cond.FilterRangeMinValue, cond.FilterRangeMaxValue)\n )\n # besides the set of conditions, we also include the FeatureTableSubstitution\n # version to identify unique FeatureVariationRecords, even though only one\n # version is currently defined. It's theoretically possible that multiple\n # records with same conditions but different substitution table version be\n # present in the same font for backward compatibility.\n recordKey = frozenset([rec.FeatureTableSubstitution.Version] + conditionSet)\n if recordKey in seen:\n return False\n else:\n seen.add(recordKey) # side effect\n return True\n\n\ndef _limitFeatureVariationConditionRange(condition, axisLimit):\n minValue = condition.FilterRangeMinValue\n maxValue = condition.FilterRangeMaxValue\n\n if (\n minValue > maxValue\n or minValue > axisLimit.maximum\n or maxValue < axisLimit.minimum\n ):\n # condition invalid or out of range\n return\n\n return tuple(\n axisLimit.renormalizeValue(v, extrapolate=False) for v in (minValue, maxValue)\n )\n\n\ndef _instantiateFeatureVariationRecord(\n record, recIdx, axisLimits, fvarAxes, axisIndexMap\n):\n applies = True\n shouldKeep = False\n newConditions = []\n from fontTools.varLib.instancer import NormalizedAxisTripleAndDistances\n\n default_triple = NormalizedAxisTripleAndDistances(-1, 0, +1)\n if record.ConditionSet is None:\n record.ConditionSet = ot.ConditionSet()\n record.ConditionSet.ConditionTable = []\n record.ConditionSet.ConditionCount = 0\n for i, condition in enumerate(record.ConditionSet.ConditionTable):\n if condition.Format == 1:\n axisIdx = condition.AxisIndex\n axisTag = fvarAxes[axisIdx].axisTag\n\n minValue = condition.FilterRangeMinValue\n maxValue = condition.FilterRangeMaxValue\n triple = axisLimits.get(axisTag, default_triple)\n\n if not (minValue <= triple.default <= maxValue):\n applies = False\n\n # if condition not met, remove entire record\n if triple.minimum > maxValue or triple.maximum < minValue:\n newConditions = None\n break\n\n if axisTag in axisIndexMap:\n # remap axis index\n condition.AxisIndex = axisIndexMap[axisTag]\n\n # remap condition limits\n newRange = _limitFeatureVariationConditionRange(condition, triple)\n if newRange:\n # keep condition with updated limits\n minimum, maximum = newRange\n condition.FilterRangeMinValue = minimum\n condition.FilterRangeMaxValue = maximum\n shouldKeep = True\n if minimum != -1 or maximum != +1:\n newConditions.append(condition)\n else:\n # condition out of range, remove entire record\n newConditions = None\n break\n\n else:\n log.warning(\n "Condition table {0} of FeatureVariationRecord {1} has "\n "unsupported format ({2}); ignored".format(i, recIdx, condition.Format)\n )\n applies = False\n newConditions.append(condition)\n\n if newConditions is not None and shouldKeep:\n record.ConditionSet.ConditionTable = newConditions\n if not newConditions:\n record.ConditionSet = None\n shouldKeep = True\n else:\n shouldKeep = False\n\n # Does this *always* apply?\n universal = shouldKeep and not newConditions\n\n return applies, shouldKeep, universal\n\n\ndef _instantiateFeatureVariations(table, fvarAxes, axisLimits):\n pinnedAxes = set(axisLimits.pinnedLocation())\n axisOrder = [axis.axisTag for axis in fvarAxes if axis.axisTag not in pinnedAxes]\n axisIndexMap = {axisTag: axisOrder.index(axisTag) for axisTag in axisOrder}\n\n featureVariationApplied = False\n uniqueRecords = set()\n newRecords = []\n defaultsSubsts = None\n\n for i, record in enumerate(table.FeatureVariations.FeatureVariationRecord):\n applies, shouldKeep, universal = _instantiateFeatureVariationRecord(\n record, i, axisLimits, fvarAxes, axisIndexMap\n )\n\n if shouldKeep and _featureVariationRecordIsUnique(record, uniqueRecords):\n newRecords.append(record)\n\n if applies and not featureVariationApplied:\n assert record.FeatureTableSubstitution.Version == 0x00010000\n defaultsSubsts = deepcopy(record.FeatureTableSubstitution)\n for default, rec in zip(\n defaultsSubsts.SubstitutionRecord,\n record.FeatureTableSubstitution.SubstitutionRecord,\n ):\n default.Feature = deepcopy(\n table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature\n )\n table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = deepcopy(\n rec.Feature\n )\n # Set variations only once\n featureVariationApplied = True\n\n # Further records don't have a chance to apply after a universal record\n if universal:\n break\n\n # Insert a catch-all record to reinstate the old features if necessary\n if featureVariationApplied and newRecords and not universal:\n defaultRecord = ot.FeatureVariationRecord()\n defaultRecord.ConditionSet = ot.ConditionSet()\n defaultRecord.ConditionSet.ConditionTable = []\n defaultRecord.ConditionSet.ConditionCount = 0\n defaultRecord.FeatureTableSubstitution = defaultsSubsts\n\n newRecords.append(defaultRecord)\n\n if newRecords:\n table.FeatureVariations.FeatureVariationRecord = newRecords\n table.FeatureVariations.FeatureVariationCount = len(newRecords)\n else:\n del table.FeatureVariations\n # downgrade table version if there are no FeatureVariations left\n table.Version = 0x00010000\n\n\ndef instantiateFeatureVariations(varfont, axisLimits):\n for tableTag in ("GPOS", "GSUB"):\n if tableTag not in varfont or not getattr(\n varfont[tableTag].table, "FeatureVariations", None\n ):\n continue\n log.info("Instantiating FeatureVariations of %s table", tableTag)\n _instantiateFeatureVariations(\n varfont[tableTag].table, varfont["fvar"].axes, axisLimits\n )\n # remove unreferenced lookups\n varfont[tableTag].prune_lookups()\n
.venv\Lib\site-packages\fontTools\varLib\instancer\featureVars.py
featureVars.py
Python
7,300
0.95
0.2
0.113924
python-kit
519
2024-05-19T12:56:39.708337
Apache-2.0
false
9bdaaffb274aa30e939ae35375cd2b34
from fontTools.varLib.models import supportScalar\nfrom fontTools.misc.fixedTools import MAX_F2DOT14\nfrom functools import lru_cache\n\n__all__ = ["rebaseTent"]\n\nEPSILON = 1 / (1 << 14)\n\n\ndef _reverse_negate(v):\n return (-v[2], -v[1], -v[0])\n\n\ndef _solve(tent, axisLimit, negative=False):\n axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit\n lower, peak, upper = tent\n\n # Mirror the problem such that axisDef <= peak\n if axisDef > peak:\n return [\n (scalar, _reverse_negate(t) if t is not None else None)\n for scalar, t in _solve(\n _reverse_negate(tent),\n axisLimit.reverse_negate(),\n not negative,\n )\n ]\n # axisDef <= peak\n\n # case 1: The whole deltaset falls outside the new limit; we can drop it\n #\n # peak\n # 1.........................................o..........\n # / \\n # / \\n # / \\n # / \\n # 0---|-----------|----------|-------- o o----1\n # axisMin axisDef axisMax lower upper\n #\n if axisMax <= lower and axisMax < peak:\n return [] # No overlap\n\n # case 2: Only the peak and outermost bound fall outside the new limit;\n # we keep the deltaset, update peak and outermost bound and and scale deltas\n # by the scalar value for the restricted axis at the new limit, and solve\n # recursively.\n #\n # |peak\n # 1...............................|.o..........\n # |/ \\n # / \\n # /| \\n # / | \\n # 0--------------------------- o | o----1\n # lower | upper\n # |\n # axisMax\n #\n # Convert to:\n #\n # 1............................................\n # |\n # o peak\n # /|\n # /x|\n # 0--------------------------- o o upper ----1\n # lower |\n # |\n # axisMax\n if axisMax < peak:\n mult = supportScalar({"tag": axisMax}, {"tag": tent})\n tent = (lower, axisMax, axisMax)\n return [(scalar * mult, t) for scalar, t in _solve(tent, axisLimit)]\n\n # lower <= axisDef <= peak <= axisMax\n\n gain = supportScalar({"tag": axisDef}, {"tag": tent})\n out = [(gain, None)]\n\n # First, the positive side\n\n # outGain is the scalar of axisMax at the tent.\n outGain = supportScalar({"tag": axisMax}, {"tag": tent})\n\n # Case 3a: Gain is more than outGain. The tent down-slope crosses\n # the axis into negative. We have to split it into multiples.\n #\n # | peak |\n # 1...................|.o.....|..............\n # |/x\_ |\n # gain................+....+_.|..............\n # /| |y\|\n # ................../.|....|..+_......outGain\n # / | | | \\n # 0---|-----------o | | | o----------1\n # axisMin lower | | | upper\n # | | |\n # axisDef | axisMax\n # |\n # crossing\n if gain >= outGain:\n # Note that this is the branch taken if both gain and outGain are 0.\n\n # Crossing point on the axis.\n crossing = peak + (1 - gain) * (upper - peak)\n\n loc = (max(lower, axisDef), peak, crossing)\n scalar = 1\n\n # The part before the crossing point.\n out.append((scalar - gain, loc))\n\n # The part after the crossing point may use one or two tents,\n # depending on whether upper is before axisMax or not, in one\n # case we need to keep it down to eternity.\n\n # Case 3a1, similar to case 1neg; just one tent needed, as in\n # the drawing above.\n if upper >= axisMax:\n loc = (crossing, axisMax, axisMax)\n scalar = outGain\n\n out.append((scalar - gain, loc))\n\n # Case 3a2: Similar to case 2neg; two tents needed, to keep\n # down to eternity.\n #\n # | peak |\n # 1...................|.o................|...\n # |/ \_ |\n # gain................+....+_............|...\n # /| | \xxxxxxxxxxy|\n # / | | \_xxxxxyyyy|\n # / | | \xxyyyyyy|\n # 0---|-----------o | | o-------|--1\n # axisMin lower | | upper |\n # | | |\n # axisDef | axisMax\n # |\n # crossing\n else:\n # A tent's peak cannot fall on axis default. Nudge it.\n if upper == axisDef:\n upper += EPSILON\n\n # Downslope.\n loc1 = (crossing, upper, axisMax)\n scalar1 = 0\n\n # Eternity justify.\n loc2 = (upper, axisMax, axisMax)\n scalar2 = 0\n\n out.append((scalar1 - gain, loc1))\n out.append((scalar2 - gain, loc2))\n\n else:\n # Special-case if peak is at axisMax.\n if axisMax == peak:\n upper = peak\n\n # Case 3:\n # We keep delta as is and only scale the axis upper to achieve\n # the desired new tent if feasible.\n #\n # peak\n # 1.....................o....................\n # / \_|\n # ..................../....+_.........outGain\n # / | \\n # gain..............+......|..+_.............\n # /| | | \\n # 0---|-----------o | | | o----------1\n # axisMin lower| | | upper\n # | | newUpper\n # axisDef axisMax\n #\n newUpper = peak + (1 - gain) * (upper - peak)\n assert axisMax <= newUpper # Because outGain > gain\n # Disabled because ots doesn't like us:\n # https://github.com/fonttools/fonttools/issues/3350\n if False and newUpper <= axisDef + (axisMax - axisDef) * 2:\n upper = newUpper\n if not negative and axisDef + (axisMax - axisDef) * MAX_F2DOT14 < upper:\n # we clamp +2.0 to the max F2Dot14 (~1.99994) for convenience\n upper = axisDef + (axisMax - axisDef) * MAX_F2DOT14\n assert peak < upper\n\n loc = (max(axisDef, lower), peak, upper)\n scalar = 1\n\n out.append((scalar - gain, loc))\n\n # Case 4: New limit doesn't fit; we need to chop into two tents,\n # because the shape of a triangle with part of one side cut off\n # cannot be represented as a triangle itself.\n #\n # | peak |\n # 1.........|......o.|....................\n # ..........|...../x\|.............outGain\n # | |xxy|\_\n # | /xxxy| \_\n # | |xxxxy| \_\n # | /xxxxy| \_\n # 0---|-----|-oxxxxxx| o----------1\n # axisMin | lower | upper\n # | |\n # axisDef axisMax\n #\n else:\n loc1 = (max(axisDef, lower), peak, axisMax)\n scalar1 = 1\n\n loc2 = (peak, axisMax, axisMax)\n scalar2 = outGain\n\n out.append((scalar1 - gain, loc1))\n # Don't add a dirac delta!\n if peak < axisMax:\n out.append((scalar2 - gain, loc2))\n\n # Now, the negative side\n\n # Case 1neg: Lower extends beyond axisMin: we chop. Simple.\n #\n # | |peak\n # 1..................|...|.o.................\n # | |/ \\n # gain...............|...+...\...............\n # |x_/| \\n # |/ | \\n # _/| | \\n # 0---------------o | | o----------1\n # lower | | upper\n # | |\n # axisMin axisDef\n #\n if lower <= axisMin:\n loc = (axisMin, axisMin, axisDef)\n scalar = supportScalar({"tag": axisMin}, {"tag": tent})\n\n out.append((scalar - gain, loc))\n\n # Case 2neg: Lower is betwen axisMin and axisDef: we add two\n # tents to keep it down all the way to eternity.\n #\n # | |peak\n # 1...|...............|.o.................\n # | |/ \\n # gain|...............+...\...............\n # |yxxxxxxxxxxxxx/| \\n # |yyyyyyxxxxxxx/ | \\n # |yyyyyyyyyyyx/ | \\n # 0---|-----------o | o----------1\n # axisMin lower | upper\n # |\n # axisDef\n #\n else:\n # A tent's peak cannot fall on axis default. Nudge it.\n if lower == axisDef:\n lower -= EPSILON\n\n # Downslope.\n loc1 = (axisMin, lower, axisDef)\n scalar1 = 0\n\n # Eternity justify.\n loc2 = (axisMin, axisMin, lower)\n scalar2 = 0\n\n out.append((scalar1 - gain, loc1))\n out.append((scalar2 - gain, loc2))\n\n return out\n\n\n@lru_cache(128)\ndef rebaseTent(tent, axisLimit):\n """Given a tuple (lower,peak,upper) "tent" and new axis limits\n (axisMin,axisDefault,axisMax), solves how to represent the tent\n under the new axis configuration. All values are in normalized\n -1,0,+1 coordinate system. Tent values can be outside this range.\n\n Return value is a list of tuples. Each tuple is of the form\n (scalar,tent), where scalar is a multipler to multiply any\n delta-sets by, and tent is a new tent for that output delta-set.\n If tent value is None, that is a special deltaset that should\n be always-enabled (called "gain")."""\n\n axisMin, axisDef, axisMax, _distanceNegative, _distancePositive = axisLimit\n assert -1 <= axisMin <= axisDef <= axisMax <= +1\n\n lower, peak, upper = tent\n assert -2 <= lower <= peak <= upper <= +2\n\n assert peak != 0\n\n sols = _solve(tent, axisLimit)\n\n n = lambda v: axisLimit.renormalizeValue(v)\n sols = [\n (scalar, (n(v[0]), n(v[1]), n(v[2])) if v is not None else None)\n for scalar, v in sols\n if scalar\n ]\n\n return sols\n
.venv\Lib\site-packages\fontTools\varLib\instancer\solver.py
solver.py
Python
11,311
0.95
0.087379
0.597701
node-utils
925
2024-01-23T11:37:11.986956
MIT
false
d398650182ec247de738d95d22c5248c
import sys\nfrom fontTools.varLib.instancer import main\n\nif __name__ == "__main__":\n sys.exit(main())\n
.venv\Lib\site-packages\fontTools\varLib\instancer\__main__.py
__main__.py
Python
109
0.85
0.2
0
python-kit
515
2023-12-07T08:02:13.442379
MIT
false
6ab36a7fac4b351cf14b5bfe015efcfb
\n\n
.venv\Lib\site-packages\fontTools\varLib\instancer\__pycache__\featureVars.cpython-313.pyc
featureVars.cpython-313.pyc
Other
7,193
0.8
0
0
react-lib
229
2025-04-09T03:51:19.984767
MIT
false
46294c93bcd42f5789d495e64fd27299
\n\n
.venv\Lib\site-packages\fontTools\varLib\instancer\__pycache__\names.cpython-313.pyc
names.cpython-313.pyc
Other
18,380
0.95
0.026178
0
awesome-app
499
2024-03-03T02:43:52.273286
Apache-2.0
false
9e42cfdfe8909433735153873cfb12cd
\n\n
.venv\Lib\site-packages\fontTools\varLib\instancer\__pycache__\solver.cpython-313.pyc
solver.cpython-313.pyc
Other
4,555
0.8
0.013333
0.014706
python-kit
608
2024-10-26T22:56:10.234699
MIT
false
aaf20f9c150105f83c29e73b487c7ec8
\n\n
.venv\Lib\site-packages\fontTools\varLib\instancer\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
81,730
0.75
0.045812
0.009887
react-lib
357
2023-11-05T06:22:21.607844
Apache-2.0
false
bcc96a01fbfcb815b00535a05c4fb1e0
\n\n
.venv\Lib\site-packages\fontTools\varLib\instancer\__pycache__\__main__.cpython-313.pyc
__main__.cpython-313.pyc
Other
388
0.7
0
0
vue-tools
850
2023-12-01T23:24:13.125225
Apache-2.0
false
b71dec23e8dc7bcfec1d94a8620725dd
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\avar.cpython-313.pyc
avar.cpython-313.pyc
Other
10,858
0.95
0
0
node-utils
822
2025-01-22T22:44:48.018063
BSD-3-Clause
false
3d50e96498deecf55943a1bf70c0328a
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\avarPlanner.cpython-313.pyc
avarPlanner.cpython-313.pyc
Other
29,727
0.95
0.08867
0.008065
node-utils
624
2025-07-08T02:02:44.673647
MIT
false
ddb6a4c82659b93ccdd6781ea339003c
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\builder.cpython-313.pyc
builder.cpython-313.pyc
Other
11,133
0.8
0.011364
0
vue-tools
420
2023-07-21T22:47:31.621199
MIT
false
911f8a60782c5a5252e13313302c4254
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\cff.cpython-313.pyc
cff.cpython-313.pyc
Other
25,329
0.95
0.004739
0.015707
vue-tools
836
2025-03-03T09:21:33.937166
MIT
false
bc94f216050a739a3d04fceaaaaa921a
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\errors.cpython-313.pyc
errors.cpython-313.pyc
Other
12,980
0.8
0.017544
0
awesome-app
138
2024-03-14T07:14:30.332356
BSD-3-Clause
false
8b48a4bf6789dda7e76574c01229d84a
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\featureVars.cpython-313.pyc
featureVars.cpython-313.pyc
Other
27,893
0.95
0.039286
0.048
python-kit
245
2025-03-02T02:13:36.572416
BSD-3-Clause
false
6916c88f79e95a0b623be370cb1bb334
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\hvar.cpython-313.pyc
hvar.cpython-313.pyc
Other
4,599
0.8
0
0
node-utils
169
2024-05-20T07:17:55.612854
BSD-3-Clause
false
b3625c0c0bb37e27e46066f7fec2906d
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\interpolatable.cpython-313.pyc
interpolatable.cpython-313.pyc
Other
41,002
0.8
0.009036
0.009524
awesome-app
172
2023-07-17T08:53:49.132950
GPL-3.0
false
87dad3e1a7f61f519795f05fb93a96d6
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\interpolatableHelpers.cpython-313.pyc
interpolatableHelpers.cpython-313.pyc
Other
18,645
0.8
0
0.00565
node-utils
811
2024-03-19T22:42:16.351835
GPL-3.0
false
913aff7899aba8d0c652b793cc4beaa1
\n\n
.venv\Lib\site-packages\fontTools\varLib\__pycache__\interpolatablePlot.cpython-313.pyc
interpolatablePlot.cpython-313.pyc
Other
52,834
0.8
0.00159
0.003472
awesome-app
615
2025-01-28T01:13:52.926411
MIT
false
5e5bf205c092e15b42a21a658e2a7be8