content
stringlengths 1
103k
⌀ | path
stringlengths 8
216
| filename
stringlengths 2
179
| language
stringclasses 15
values | size_bytes
int64 2
189k
| quality_score
float64 0.5
0.95
| complexity
float64 0
1
| documentation_ratio
float64 0
1
| repository
stringclasses 5
values | stars
int64 0
1k
| created_date
stringdate 2023-07-10 19:21:08
2025-07-09 19:11:45
| license
stringclasses 4
values | is_test
bool 2
classes | file_hash
stringlengths 32
32
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
from fontTools.ttLib import TTFont\nfrom fontTools.feaLib.builder import addOpenTypeFeatures, Builder\nfrom fontTools.feaLib.error import FeatureLibError\nfrom fontTools import configLogger\nfrom fontTools.misc.cliTools import makeOutputFileName\nimport sys\nimport argparse\nimport logging\n\n\nlog = logging.getLogger("fontTools.feaLib")\n\n\ndef main(args=None):\n """Add features from a feature file (.fea) into an OTF font"""\n parser = argparse.ArgumentParser(\n description="Use fontTools to compile OpenType feature files (*.fea)."\n )\n parser.add_argument(\n "input_fea", metavar="FEATURES", help="Path to the feature file"\n )\n parser.add_argument(\n "input_font", metavar="INPUT_FONT", help="Path to the input font"\n )\n parser.add_argument(\n "-o",\n "--output",\n dest="output_font",\n metavar="OUTPUT_FONT",\n help="Path to the output font.",\n )\n parser.add_argument(\n "-t",\n "--tables",\n metavar="TABLE_TAG",\n choices=Builder.supportedTables,\n nargs="+",\n help="Specify the table(s) to be built.",\n )\n parser.add_argument(\n "-d",\n "--debug",\n action="store_true",\n help="Add source-level debugging information to font.",\n )\n parser.add_argument(\n "-v",\n "--verbose",\n help="Increase the logger verbosity. Multiple -v " "options are allowed.",\n action="count",\n default=0,\n )\n parser.add_argument(\n "--traceback", help="show traceback for exceptions.", action="store_true"\n )\n options = parser.parse_args(args)\n\n levels = ["WARNING", "INFO", "DEBUG"]\n configLogger(level=levels[min(len(levels) - 1, options.verbose)])\n\n output_font = options.output_font or makeOutputFileName(options.input_font)\n log.info("Compiling features to '%s'" % (output_font))\n\n font = TTFont(options.input_font)\n try:\n addOpenTypeFeatures(\n font, options.input_fea, tables=options.tables, debug=options.debug\n )\n except FeatureLibError as e:\n if options.traceback:\n raise\n log.error(e)\n sys.exit(1)\n font.save(output_font)\n\n\nif __name__ == "__main__":\n sys.exit(main())\n
|
.venv\Lib\site-packages\fontTools\feaLib\__main__.py
|
__main__.py
|
Python
| 2,318 | 0.85 | 0.064103 | 0 |
python-kit
| 388 |
2024-04-15T20:21:31.671675
|
GPL-3.0
| false |
92f9cbb320cd434f364661308cd8a6d5
|
\n\n
|
.venv\Lib\site-packages\fontTools\feaLib\__pycache__\builder.cpython-313.pyc
|
builder.cpython-313.pyc
|
Other
| 86,176 | 0.75 | 0.02443 | 0.005172 |
awesome-app
| 39 |
2023-10-04T09:37:41.561984
|
BSD-3-Clause
| false |
5d95a8c5af07d4a75ef896a9b132466c
|
\n\n
|
.venv\Lib\site-packages\fontTools\feaLib\__pycache__\error.cpython-313.pyc
|
error.cpython-313.pyc
|
Other
| 1,563 | 0.8 | 0 | 0 |
react-lib
| 547 |
2025-06-18T00:26:01.126367
|
MIT
| false |
0a8b3ea05a84a23d6ef747af935fe041
|
\n\n
|
.venv\Lib\site-packages\fontTools\feaLib\__pycache__\lexer.cpython-313.pyc
|
lexer.cpython-313.pyc
|
Other
| 15,349 | 0.95 | 0.023077 | 0 |
python-kit
| 989 |
2023-08-26T06:46:07.049277
|
MIT
| false |
c80416735f9b985b6e5ac13655ee1061
|
\n\n
|
.venv\Lib\site-packages\fontTools\feaLib\__pycache__\location.cpython-313.pyc
|
location.cpython-313.pyc
|
Other
| 871 | 0.7 | 0 | 0 |
python-kit
| 555 |
2025-02-16T09:38:25.281465
|
GPL-3.0
| false |
65944fa8306b5b973cf81442f8d25a3a
|
\n\n
|
.venv\Lib\site-packages\fontTools\feaLib\__pycache__\lookupDebugInfo.cpython-313.pyc
|
lookupDebugInfo.cpython-313.pyc
|
Other
| 789 | 0.7 | 0 | 0 |
node-utils
| 668 |
2025-06-18T16:12:15.845485
|
Apache-2.0
| false |
61d20a4d6636e41ec0acd02e810bb585
|
\n\n
|
.venv\Lib\site-packages\fontTools\feaLib\__pycache__\variableScalar.cpython-313.pyc
|
variableScalar.cpython-313.pyc
|
Other
| 7,665 | 0.8 | 0 | 0 |
vue-tools
| 355 |
2023-07-16T12:50:04.632913
|
BSD-3-Clause
| false |
2a7d3efc303a92331acbb1483c6276e6
|
\n\n
|
.venv\Lib\site-packages\fontTools\feaLib\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 276 | 0.7 | 0.5 | 0 |
awesome-app
| 631 |
2023-09-09T04:10:28.608712
|
MIT
| false |
dbc46b34cb2e1b993e07be7923444f9e
|
\n\n
|
.venv\Lib\site-packages\fontTools\feaLib\__pycache__\__main__.cpython-313.pyc
|
__main__.cpython-313.pyc
|
Other
| 3,265 | 0.8 | 0.030303 | 0 |
vue-tools
| 331 |
2023-11-17T10:07:37.423167
|
GPL-3.0
| false |
bf02f9854743dd1d7e3ed0982cd25e7f
|
# Copyright 2013 Google, Inc. All Rights Reserved.\n#\n# Google Author(s): Behdad Esfahbod, Roozbeh Pournader\n\nfrom fontTools.ttLib.tables.DefaultTable import DefaultTable\nimport logging\n\n\nlog = logging.getLogger("fontTools.merge")\n\n\ndef add_method(*clazzes, **kwargs):\n """Returns a decorator function that adds a new method to one or\n more classes."""\n allowDefault = kwargs.get("allowDefaultTable", False)\n\n def wrapper(method):\n done = []\n for clazz in clazzes:\n if clazz in done:\n continue # Support multiple names of a clazz\n done.append(clazz)\n assert allowDefault or clazz != DefaultTable, "Oops, table class not found."\n assert (\n method.__name__ not in clazz.__dict__\n ), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)\n setattr(clazz, method.__name__, method)\n return None\n\n return wrapper\n\n\ndef mergeObjects(lst):\n lst = [item for item in lst if item is not NotImplemented]\n if not lst:\n return NotImplemented\n lst = [item for item in lst if item is not None]\n if not lst:\n return None\n\n clazz = lst[0].__class__\n assert all(type(item) == clazz for item in lst), lst\n\n logic = clazz.mergeMap\n returnTable = clazz()\n returnDict = {}\n\n allKeys = set.union(set(), *(vars(table).keys() for table in lst))\n for key in allKeys:\n try:\n mergeLogic = logic[key]\n except KeyError:\n try:\n mergeLogic = logic["*"]\n except KeyError:\n raise Exception(\n "Don't know how to merge key %s of class %s" % (key, clazz.__name__)\n )\n if mergeLogic is NotImplemented:\n continue\n value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)\n if value is not NotImplemented:\n returnDict[key] = value\n\n returnTable.__dict__ = returnDict\n\n return returnTable\n\n\n@add_method(DefaultTable, allowDefaultTable=True)\ndef merge(self, m, tables):\n if not hasattr(self, "mergeMap"):\n log.info("Don't know how to merge '%s'.", self.tableTag)\n return NotImplemented\n\n logic = self.mergeMap\n\n if isinstance(logic, dict):\n return m.mergeObjects(self, self.mergeMap, tables)\n else:\n return logic(tables)\n
|
.venv\Lib\site-packages\fontTools\merge\base.py
|
base.py
|
Python
| 2,470 | 0.95 | 0.320988 | 0.047619 |
react-lib
| 199 |
2025-05-18T23:27:48.010185
|
MIT
| false |
3915e1817eef237651a29451c5cd7656
|
# Copyright 2013 Google, Inc. All Rights Reserved.\n#\n# Google Author(s): Behdad Esfahbod, Roozbeh Pournader\n\nfrom fontTools import ttLib\nfrom fontTools.ttLib.tables.DefaultTable import DefaultTable\nfrom fontTools.ttLib.tables import otTables\nfrom fontTools.merge.base import add_method, mergeObjects\nfrom fontTools.merge.util import *\nimport logging\n\n\nlog = logging.getLogger("fontTools.merge")\n\n\ndef mergeLookupLists(lst):\n # TODO Do smarter merge.\n return sumLists(lst)\n\n\ndef mergeFeatures(lst):\n assert lst\n self = otTables.Feature()\n self.FeatureParams = None\n self.LookupListIndex = mergeLookupLists(\n [l.LookupListIndex for l in lst if l.LookupListIndex]\n )\n self.LookupCount = len(self.LookupListIndex)\n return self\n\n\ndef mergeFeatureLists(lst):\n d = {}\n for l in lst:\n for f in l:\n tag = f.FeatureTag\n if tag not in d:\n d[tag] = []\n d[tag].append(f.Feature)\n ret = []\n for tag in sorted(d.keys()):\n rec = otTables.FeatureRecord()\n rec.FeatureTag = tag\n rec.Feature = mergeFeatures(d[tag])\n ret.append(rec)\n return ret\n\n\ndef mergeLangSyses(lst):\n assert lst\n\n # TODO Support merging ReqFeatureIndex\n assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)\n\n self = otTables.LangSys()\n self.LookupOrder = None\n self.ReqFeatureIndex = 0xFFFF\n self.FeatureIndex = mergeFeatureLists(\n [l.FeatureIndex for l in lst if l.FeatureIndex]\n )\n self.FeatureCount = len(self.FeatureIndex)\n return self\n\n\ndef mergeScripts(lst):\n assert lst\n\n if len(lst) == 1:\n return lst[0]\n langSyses = {}\n for sr in lst:\n for lsr in sr.LangSysRecord:\n if lsr.LangSysTag not in langSyses:\n langSyses[lsr.LangSysTag] = []\n langSyses[lsr.LangSysTag].append(lsr.LangSys)\n lsrecords = []\n for tag, langSys_list in sorted(langSyses.items()):\n lsr = otTables.LangSysRecord()\n lsr.LangSys = mergeLangSyses(langSys_list)\n lsr.LangSysTag = tag\n lsrecords.append(lsr)\n\n self = otTables.Script()\n self.LangSysRecord = lsrecords\n self.LangSysCount = len(lsrecords)\n dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]\n if dfltLangSyses:\n self.DefaultLangSys = mergeLangSyses(dfltLangSyses)\n else:\n self.DefaultLangSys = None\n return self\n\n\ndef mergeScriptRecords(lst):\n d = {}\n for l in lst:\n for s in l:\n tag = s.ScriptTag\n if tag not in d:\n d[tag] = []\n d[tag].append(s.Script)\n ret = []\n for tag in sorted(d.keys()):\n rec = otTables.ScriptRecord()\n rec.ScriptTag = tag\n rec.Script = mergeScripts(d[tag])\n ret.append(rec)\n return ret\n\n\notTables.ScriptList.mergeMap = {\n "ScriptCount": lambda lst: None, # TODO\n "ScriptRecord": mergeScriptRecords,\n}\notTables.BaseScriptList.mergeMap = {\n "BaseScriptCount": lambda lst: None, # TODO\n # TODO: Merge duplicate entries\n "BaseScriptRecord": lambda lst: sorted(\n sumLists(lst), key=lambda s: s.BaseScriptTag\n ),\n}\n\notTables.FeatureList.mergeMap = {\n "FeatureCount": sum,\n "FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),\n}\n\notTables.LookupList.mergeMap = {\n "LookupCount": sum,\n "Lookup": sumLists,\n}\n\notTables.Coverage.mergeMap = {\n "Format": min,\n "glyphs": sumLists,\n}\n\notTables.ClassDef.mergeMap = {\n "Format": min,\n "classDefs": sumDicts,\n}\n\notTables.LigCaretList.mergeMap = {\n "Coverage": mergeObjects,\n "LigGlyphCount": sum,\n "LigGlyph": sumLists,\n}\n\notTables.AttachList.mergeMap = {\n "Coverage": mergeObjects,\n "GlyphCount": sum,\n "AttachPoint": sumLists,\n}\n\n# XXX Renumber MarkFilterSets of lookups\notTables.MarkGlyphSetsDef.mergeMap = {\n "MarkSetTableFormat": equal,\n "MarkSetCount": sum,\n "Coverage": sumLists,\n}\n\notTables.Axis.mergeMap = {\n "*": mergeObjects,\n}\n\n# XXX Fix BASE table merging\notTables.BaseTagList.mergeMap = {\n "BaseTagCount": sum,\n "BaselineTag": sumLists,\n}\n\notTables.GDEF.mergeMap = otTables.GSUB.mergeMap = otTables.GPOS.mergeMap = (\n otTables.BASE.mergeMap\n) = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {\n "*": mergeObjects,\n "Version": max,\n}\n\nttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass("GSUB").mergeMap = (\n ttLib.getTableClass("GPOS").mergeMap\n) = ttLib.getTableClass("BASE").mergeMap = ttLib.getTableClass(\n "JSTF"\n).mergeMap = ttLib.getTableClass(\n "MATH"\n).mergeMap = {\n "tableTag": onlyExisting(equal), # XXX clean me up\n "table": mergeObjects,\n}\n\n\n@add_method(ttLib.getTableClass("GSUB"))\ndef merge(self, m, tables):\n assert len(tables) == len(m.duplicateGlyphsPerFont)\n for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):\n if not dups:\n continue\n if table is None or table is NotImplemented:\n log.warning(\n "Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s",\n m.fonts[i]._merger__name,\n dups,\n )\n continue\n\n synthFeature = None\n synthLookup = None\n for script in table.table.ScriptList.ScriptRecord:\n if script.ScriptTag == "DFLT":\n continue # XXX\n for langsys in [script.Script.DefaultLangSys] + [\n l.LangSys for l in script.Script.LangSysRecord\n ]:\n if langsys is None:\n continue # XXX Create!\n feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"]\n assert len(feature) <= 1\n if feature:\n feature = feature[0]\n else:\n if not synthFeature:\n synthFeature = otTables.FeatureRecord()\n synthFeature.FeatureTag = "locl"\n f = synthFeature.Feature = otTables.Feature()\n f.FeatureParams = None\n f.LookupCount = 0\n f.LookupListIndex = []\n table.table.FeatureList.FeatureRecord.append(synthFeature)\n table.table.FeatureList.FeatureCount += 1\n feature = synthFeature\n langsys.FeatureIndex.append(feature)\n langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)\n\n if not synthLookup:\n subtable = otTables.SingleSubst()\n subtable.mapping = dups\n synthLookup = otTables.Lookup()\n synthLookup.LookupFlag = 0\n synthLookup.LookupType = 1\n synthLookup.SubTableCount = 1\n synthLookup.SubTable = [subtable]\n if table.table.LookupList is None:\n # mtiLib uses None as default value for LookupList,\n # while feaLib points to an empty array with count 0\n # TODO: make them do the same\n table.table.LookupList = otTables.LookupList()\n table.table.LookupList.Lookup = []\n table.table.LookupList.LookupCount = 0\n table.table.LookupList.Lookup.append(synthLookup)\n table.table.LookupList.LookupCount += 1\n\n if feature.Feature.LookupListIndex[:1] != [synthLookup]:\n feature.Feature.LookupListIndex[:0] = [synthLookup]\n feature.Feature.LookupCount += 1\n\n DefaultTable.merge(self, m, tables)\n return self\n\n\n@add_method(\n otTables.SingleSubst,\n otTables.MultipleSubst,\n otTables.AlternateSubst,\n otTables.LigatureSubst,\n otTables.ReverseChainSingleSubst,\n otTables.SinglePos,\n otTables.PairPos,\n otTables.CursivePos,\n otTables.MarkBasePos,\n otTables.MarkLigPos,\n otTables.MarkMarkPos,\n)\ndef mapLookups(self, lookupMap):\n pass\n\n\n# Copied and trimmed down from subset.py\n@add_method(\n otTables.ContextSubst,\n otTables.ChainContextSubst,\n otTables.ContextPos,\n otTables.ChainContextPos,\n)\ndef __merge_classify_context(self):\n class ContextHelper(object):\n def __init__(self, klass, Format):\n if klass.__name__.endswith("Subst"):\n Typ = "Sub"\n Type = "Subst"\n else:\n Typ = "Pos"\n Type = "Pos"\n if klass.__name__.startswith("Chain"):\n Chain = "Chain"\n else:\n Chain = ""\n ChainTyp = Chain + Typ\n\n self.Typ = Typ\n self.Type = Type\n self.Chain = Chain\n self.ChainTyp = ChainTyp\n\n self.LookupRecord = Type + "LookupRecord"\n\n if Format == 1:\n self.Rule = ChainTyp + "Rule"\n self.RuleSet = ChainTyp + "RuleSet"\n elif Format == 2:\n self.Rule = ChainTyp + "ClassRule"\n self.RuleSet = ChainTyp + "ClassSet"\n\n if self.Format not in [1, 2, 3]:\n return None # Don't shoot the messenger; let it go\n if not hasattr(self.__class__, "_merge__ContextHelpers"):\n self.__class__._merge__ContextHelpers = {}\n if self.Format not in self.__class__._merge__ContextHelpers:\n helper = ContextHelper(self.__class__, self.Format)\n self.__class__._merge__ContextHelpers[self.Format] = helper\n return self.__class__._merge__ContextHelpers[self.Format]\n\n\n@add_method(\n otTables.ContextSubst,\n otTables.ChainContextSubst,\n otTables.ContextPos,\n otTables.ChainContextPos,\n)\ndef mapLookups(self, lookupMap):\n c = self.__merge_classify_context()\n\n if self.Format in [1, 2]:\n for rs in getattr(self, c.RuleSet):\n if not rs:\n continue\n for r in getattr(rs, c.Rule):\n if not r:\n continue\n for ll in getattr(r, c.LookupRecord):\n if not ll:\n continue\n ll.LookupListIndex = lookupMap[ll.LookupListIndex]\n elif self.Format == 3:\n for ll in getattr(self, c.LookupRecord):\n if not ll:\n continue\n ll.LookupListIndex = lookupMap[ll.LookupListIndex]\n else:\n assert 0, "unknown format: %s" % self.Format\n\n\n@add_method(otTables.ExtensionSubst, otTables.ExtensionPos)\ndef mapLookups(self, lookupMap):\n if self.Format == 1:\n self.ExtSubTable.mapLookups(lookupMap)\n else:\n assert 0, "unknown format: %s" % self.Format\n\n\n@add_method(otTables.Lookup)\ndef mapLookups(self, lookupMap):\n for st in self.SubTable:\n if not st:\n continue\n st.mapLookups(lookupMap)\n\n\n@add_method(otTables.LookupList)\ndef mapLookups(self, lookupMap):\n for l in self.Lookup:\n if not l:\n continue\n l.mapLookups(lookupMap)\n\n\n@add_method(otTables.Lookup)\ndef mapMarkFilteringSets(self, markFilteringSetMap):\n if self.LookupFlag & 0x0010:\n self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]\n\n\n@add_method(otTables.LookupList)\ndef mapMarkFilteringSets(self, markFilteringSetMap):\n for l in self.Lookup:\n if not l:\n continue\n l.mapMarkFilteringSets(markFilteringSetMap)\n\n\n@add_method(otTables.Feature)\ndef mapLookups(self, lookupMap):\n self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]\n\n\n@add_method(otTables.FeatureList)\ndef mapLookups(self, lookupMap):\n for f in self.FeatureRecord:\n if not f or not f.Feature:\n continue\n f.Feature.mapLookups(lookupMap)\n\n\n@add_method(otTables.DefaultLangSys, otTables.LangSys)\ndef mapFeatures(self, featureMap):\n self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]\n if self.ReqFeatureIndex != 65535:\n self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]\n\n\n@add_method(otTables.Script)\ndef mapFeatures(self, featureMap):\n if self.DefaultLangSys:\n self.DefaultLangSys.mapFeatures(featureMap)\n for l in self.LangSysRecord:\n if not l or not l.LangSys:\n continue\n l.LangSys.mapFeatures(featureMap)\n\n\n@add_method(otTables.ScriptList)\ndef mapFeatures(self, featureMap):\n for s in self.ScriptRecord:\n if not s or not s.Script:\n continue\n s.Script.mapFeatures(featureMap)\n\n\ndef layoutPreMerge(font):\n # Map indices to references\n\n GDEF = font.get("GDEF")\n GSUB = font.get("GSUB")\n GPOS = font.get("GPOS")\n\n for t in [GSUB, GPOS]:\n if not t:\n continue\n\n if t.table.LookupList:\n lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)}\n t.table.LookupList.mapLookups(lookupMap)\n t.table.FeatureList.mapLookups(lookupMap)\n\n if (\n GDEF\n and GDEF.table.Version >= 0x00010002\n and GDEF.table.MarkGlyphSetsDef\n ):\n markFilteringSetMap = {\n i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)\n }\n t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)\n\n if t.table.FeatureList and t.table.ScriptList:\n featureMap = {i: v for i, v in enumerate(t.table.FeatureList.FeatureRecord)}\n t.table.ScriptList.mapFeatures(featureMap)\n\n # TODO FeatureParams nameIDs\n\n\ndef layoutPostMerge(font):\n # Map references back to indices\n\n GDEF = font.get("GDEF")\n GSUB = font.get("GSUB")\n GPOS = font.get("GPOS")\n\n for t in [GSUB, GPOS]:\n if not t:\n continue\n\n if t.table.FeatureList and t.table.ScriptList:\n # Collect unregistered (new) features.\n featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)\n t.table.ScriptList.mapFeatures(featureMap)\n\n # Record used features.\n featureMap = AttendanceRecordingIdentityDict(\n t.table.FeatureList.FeatureRecord\n )\n t.table.ScriptList.mapFeatures(featureMap)\n usedIndices = featureMap.s\n\n # Remove unused features\n t.table.FeatureList.FeatureRecord = [\n f\n for i, f in enumerate(t.table.FeatureList.FeatureRecord)\n if i in usedIndices\n ]\n\n # Map back to indices.\n featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)\n t.table.ScriptList.mapFeatures(featureMap)\n\n t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)\n\n if t.table.LookupList:\n # Collect unregistered (new) lookups.\n lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)\n t.table.FeatureList.mapLookups(lookupMap)\n t.table.LookupList.mapLookups(lookupMap)\n\n # Record used lookups.\n lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)\n t.table.FeatureList.mapLookups(lookupMap)\n t.table.LookupList.mapLookups(lookupMap)\n usedIndices = lookupMap.s\n\n # Remove unused lookups\n t.table.LookupList.Lookup = [\n l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices\n ]\n\n # Map back to indices.\n lookupMap = NonhashableDict(t.table.LookupList.Lookup)\n t.table.FeatureList.mapLookups(lookupMap)\n t.table.LookupList.mapLookups(lookupMap)\n\n t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)\n\n if GDEF and GDEF.table.Version >= 0x00010002:\n markFilteringSetMap = NonhashableDict(\n GDEF.table.MarkGlyphSetsDef.Coverage\n )\n t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)\n\n # TODO FeatureParams nameIDs\n
|
.venv\Lib\site-packages\fontTools\merge\layout.py
|
layout.py
|
Python
| 16,601 | 0.95 | 0.214829 | 0.055427 |
vue-tools
| 822 |
2024-09-09T06:45:26.719104
|
MIT
| false |
3890f22ce35094e4af79f2ad0d1ec8f5
|
# Copyright 2013 Google, Inc. All Rights Reserved.\n#\n# Google Author(s): Behdad Esfahbod, Roozbeh Pournader\n\n\nclass Options(object):\n class UnknownOptionError(Exception):\n pass\n\n def __init__(self, **kwargs):\n self.verbose = False\n self.timing = False\n self.drop_tables = []\n self.input_file = None\n self.output_file = "merged.ttf"\n self.import_file = None\n\n self.set(**kwargs)\n\n def set(self, **kwargs):\n for k, v in kwargs.items():\n if not hasattr(self, k):\n raise self.UnknownOptionError("Unknown option '%s'" % k)\n setattr(self, k, v)\n\n def parse_opts(self, argv, ignore_unknown=[]):\n ret = []\n opts = {}\n for a in argv:\n orig_a = a\n if not a.startswith("--"):\n ret.append(a)\n continue\n a = a[2:]\n i = a.find("=")\n op = "="\n if i == -1:\n if a.startswith("no-"):\n k = a[3:]\n v = False\n else:\n k = a\n v = True\n else:\n k = a[:i]\n if k[-1] in "-+":\n op = k[-1] + "=" # Ops is '-=' or '+=' now.\n k = k[:-1]\n v = a[i + 1 :]\n ok = k\n k = k.replace("-", "_")\n if not hasattr(self, k):\n if ignore_unknown is True or ok in ignore_unknown:\n ret.append(orig_a)\n continue\n else:\n raise self.UnknownOptionError("Unknown option '%s'" % a)\n\n ov = getattr(self, k)\n if isinstance(ov, bool):\n v = bool(v)\n elif isinstance(ov, int):\n v = int(v)\n elif isinstance(ov, list):\n vv = v.split(",")\n if vv == [""]:\n vv = []\n vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]\n if op == "=":\n v = vv\n elif op == "+=":\n v = ov\n v.extend(vv)\n elif op == "-=":\n v = ov\n for x in vv:\n if x in v:\n v.remove(x)\n else:\n assert 0\n\n opts[k] = v\n self.set(**opts)\n\n return ret\n
|
.venv\Lib\site-packages\fontTools\merge\options.py
|
options.py
|
Python
| 2,586 | 0.95 | 0.247059 | 0.039474 |
node-utils
| 204 |
2024-04-27T14:20:25.277511
|
GPL-3.0
| false |
9ae5655642f18fe844662d5138844f3c
|
# Copyright 2013 Google, Inc. All Rights Reserved.\n#\n# Google Author(s): Behdad Esfahbod, Roozbeh Pournader\n\nfrom fontTools import ttLib, cffLib\nfrom fontTools.misc.psCharStrings import T2WidthExtractor\nfrom fontTools.ttLib.tables.DefaultTable import DefaultTable\nfrom fontTools.merge.base import add_method, mergeObjects\nfrom fontTools.merge.cmap import computeMegaCmap\nfrom fontTools.merge.util import *\nimport logging\n\n\nlog = logging.getLogger("fontTools.merge")\n\n\nttLib.getTableClass("maxp").mergeMap = {\n "*": max,\n "tableTag": equal,\n "tableVersion": equal,\n "numGlyphs": sum,\n "maxStorage": first,\n "maxFunctionDefs": first,\n "maxInstructionDefs": first,\n # TODO When we correctly merge hinting data, update these values:\n # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions\n}\n\nheadFlagsMergeBitMap = {\n "size": 16,\n "*": bitwise_or,\n 1: bitwise_and, # Baseline at y = 0\n 2: bitwise_and, # lsb at x = 0\n 3: bitwise_and, # Force ppem to integer values. FIXME?\n 5: bitwise_and, # Font is vertical\n 6: lambda bit: 0, # Always set to zero\n 11: bitwise_and, # Font data is 'lossless'\n 13: bitwise_and, # Optimized for ClearType\n 14: bitwise_and, # Last resort font. FIXME? equal or first may be better\n 15: lambda bit: 0, # Always set to zero\n}\n\nttLib.getTableClass("head").mergeMap = {\n "tableTag": equal,\n "tableVersion": max,\n "fontRevision": max,\n "checkSumAdjustment": lambda lst: 0, # We need *something* here\n "magicNumber": equal,\n "flags": mergeBits(headFlagsMergeBitMap),\n "unitsPerEm": equal,\n "created": current_time,\n "modified": current_time,\n "xMin": min,\n "yMin": min,\n "xMax": max,\n "yMax": max,\n "macStyle": first,\n "lowestRecPPEM": max,\n "fontDirectionHint": lambda lst: 2,\n "indexToLocFormat": first,\n "glyphDataFormat": equal,\n}\n\nttLib.getTableClass("hhea").mergeMap = {\n "*": equal,\n "tableTag": equal,\n "tableVersion": max,\n "ascent": max,\n "descent": min,\n "lineGap": max,\n "advanceWidthMax": max,\n "minLeftSideBearing": min,\n "minRightSideBearing": min,\n "xMaxExtent": max,\n "caretSlopeRise": first,\n "caretSlopeRun": first,\n "caretOffset": first,\n "numberOfHMetrics": recalculate,\n}\n\nttLib.getTableClass("vhea").mergeMap = {\n "*": equal,\n "tableTag": equal,\n "tableVersion": max,\n "ascent": max,\n "descent": min,\n "lineGap": max,\n "advanceHeightMax": max,\n "minTopSideBearing": min,\n "minBottomSideBearing": min,\n "yMaxExtent": max,\n "caretSlopeRise": first,\n "caretSlopeRun": first,\n "caretOffset": first,\n "numberOfVMetrics": recalculate,\n}\n\nos2FsTypeMergeBitMap = {\n "size": 16,\n "*": lambda bit: 0,\n 1: bitwise_or, # no embedding permitted\n 2: bitwise_and, # allow previewing and printing documents\n 3: bitwise_and, # allow editing documents\n 8: bitwise_or, # no subsetting permitted\n 9: bitwise_or, # no embedding of outlines permitted\n}\n\n\ndef mergeOs2FsType(lst):\n lst = list(lst)\n if all(item == 0 for item in lst):\n return 0\n\n # Compute least restrictive logic for each fsType value\n for i in range(len(lst)):\n # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set\n if lst[i] & 0x000C:\n lst[i] &= ~0x0002\n # set bit 2 (allow previewing) if bit 3 is set (allow editing)\n elif lst[i] & 0x0008:\n lst[i] |= 0x0004\n # set bits 2 and 3 if everything is allowed\n elif lst[i] == 0:\n lst[i] = 0x000C\n\n fsType = mergeBits(os2FsTypeMergeBitMap)(lst)\n # unset bits 2 and 3 if bit 1 is set (some font is "no embedding")\n if fsType & 0x0002:\n fsType &= ~0x000C\n return fsType\n\n\nttLib.getTableClass("OS/2").mergeMap = {\n "*": first,\n "tableTag": equal,\n "version": max,\n "xAvgCharWidth": first, # Will be recalculated at the end on the merged font\n "fsType": mergeOs2FsType, # Will be overwritten\n "panose": first, # FIXME: should really be the first Latin font\n "ulUnicodeRange1": bitwise_or,\n "ulUnicodeRange2": bitwise_or,\n "ulUnicodeRange3": bitwise_or,\n "ulUnicodeRange4": bitwise_or,\n "fsFirstCharIndex": min,\n "fsLastCharIndex": max,\n "sTypoAscender": max,\n "sTypoDescender": min,\n "sTypoLineGap": max,\n "usWinAscent": max,\n "usWinDescent": max,\n # Version 1\n "ulCodePageRange1": onlyExisting(bitwise_or),\n "ulCodePageRange2": onlyExisting(bitwise_or),\n # Version 2, 3, 4\n "sxHeight": onlyExisting(max),\n "sCapHeight": onlyExisting(max),\n "usDefaultChar": onlyExisting(first),\n "usBreakChar": onlyExisting(first),\n "usMaxContext": onlyExisting(max),\n # version 5\n "usLowerOpticalPointSize": onlyExisting(min),\n "usUpperOpticalPointSize": onlyExisting(max),\n}\n\n\n@add_method(ttLib.getTableClass("OS/2"))\ndef merge(self, m, tables):\n DefaultTable.merge(self, m, tables)\n if self.version < 2:\n # bits 8 and 9 are reserved and should be set to zero\n self.fsType &= ~0x0300\n if self.version >= 3:\n # Only one of bits 1, 2, and 3 may be set. We already take\n # care of bit 1 implications in mergeOs2FsType. So unset\n # bit 2 if bit 3 is already set.\n if self.fsType & 0x0008:\n self.fsType &= ~0x0004\n return self\n\n\nttLib.getTableClass("post").mergeMap = {\n "*": first,\n "tableTag": equal,\n "formatType": max,\n "isFixedPitch": min,\n "minMemType42": max,\n "maxMemType42": lambda lst: 0,\n "minMemType1": max,\n "maxMemType1": lambda lst: 0,\n "mapping": onlyExisting(sumDicts),\n "extraNames": lambda lst: [],\n}\n\nttLib.getTableClass("vmtx").mergeMap = ttLib.getTableClass("hmtx").mergeMap = {\n "tableTag": equal,\n "metrics": sumDicts,\n}\n\nttLib.getTableClass("name").mergeMap = {\n "tableTag": equal,\n "names": first, # FIXME? Does mixing name records make sense?\n}\n\nttLib.getTableClass("loca").mergeMap = {\n "*": recalculate,\n "tableTag": equal,\n}\n\nttLib.getTableClass("glyf").mergeMap = {\n "tableTag": equal,\n "glyphs": sumDicts,\n "glyphOrder": sumLists,\n "_reverseGlyphOrder": recalculate,\n "axisTags": equal,\n}\n\n\n@add_method(ttLib.getTableClass("glyf"))\ndef merge(self, m, tables):\n for i, table in enumerate(tables):\n for g in table.glyphs.values():\n if i:\n # Drop hints for all but first font, since\n # we don't map functions / CVT values.\n g.removeHinting()\n # Expand composite glyphs to load their\n # composite glyph names.\n if g.isComposite():\n g.expand(table)\n return DefaultTable.merge(self, m, tables)\n\n\nttLib.getTableClass("prep").mergeMap = lambda self, lst: first(lst)\nttLib.getTableClass("fpgm").mergeMap = lambda self, lst: first(lst)\nttLib.getTableClass("cvt ").mergeMap = lambda self, lst: first(lst)\nttLib.getTableClass("gasp").mergeMap = lambda self, lst: first(\n lst\n) # FIXME? Appears irreconcilable\n\n\n@add_method(ttLib.getTableClass("CFF "))\ndef merge(self, m, tables):\n if any(hasattr(table.cff[0], "FDSelect") for table in tables):\n raise NotImplementedError("Merging CID-keyed CFF tables is not supported yet")\n\n for table in tables:\n table.cff.desubroutinize()\n\n newcff = tables[0]\n newfont = newcff.cff[0]\n private = newfont.Private\n newDefaultWidthX, newNominalWidthX = private.defaultWidthX, private.nominalWidthX\n storedNamesStrings = []\n glyphOrderStrings = []\n glyphOrder = set(newfont.getGlyphOrder())\n\n for name in newfont.strings.strings:\n if name not in glyphOrder:\n storedNamesStrings.append(name)\n else:\n glyphOrderStrings.append(name)\n\n chrset = list(newfont.charset)\n newcs = newfont.CharStrings\n log.debug("FONT 0 CharStrings: %d.", len(newcs))\n\n for i, table in enumerate(tables[1:], start=1):\n font = table.cff[0]\n defaultWidthX, nominalWidthX = (\n font.Private.defaultWidthX,\n font.Private.nominalWidthX,\n )\n widthsDiffer = (\n defaultWidthX != newDefaultWidthX or nominalWidthX != newNominalWidthX\n )\n font.Private = private\n fontGlyphOrder = set(font.getGlyphOrder())\n for name in font.strings.strings:\n if name in fontGlyphOrder:\n glyphOrderStrings.append(name)\n cs = font.CharStrings\n gs = table.cff.GlobalSubrs\n log.debug("Font %d CharStrings: %d.", i, len(cs))\n chrset.extend(font.charset)\n if newcs.charStringsAreIndexed:\n for i, name in enumerate(cs.charStrings, start=len(newcs)):\n newcs.charStrings[name] = i\n newcs.charStringsIndex.items.append(None)\n for name in cs.charStrings:\n if widthsDiffer:\n c = cs[name]\n defaultWidthXToken = object()\n extractor = T2WidthExtractor([], [], nominalWidthX, defaultWidthXToken)\n extractor.execute(c)\n width = extractor.width\n if width is not defaultWidthXToken:\n # The following will be wrong if the width is added\n # by a subroutine. Ouch!\n c.program.pop(0)\n else:\n width = defaultWidthX\n if width != newDefaultWidthX:\n c.program.insert(0, width - newNominalWidthX)\n newcs[name] = cs[name]\n\n newfont.charset = chrset\n newfont.numGlyphs = len(chrset)\n newfont.strings.strings = glyphOrderStrings + storedNamesStrings\n\n return newcff\n\n\n@add_method(ttLib.getTableClass("cmap"))\ndef merge(self, m, tables):\n if not hasattr(m, "cmap"):\n computeMegaCmap(m, tables)\n cmap = m.cmap\n\n cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF}\n self.tables = []\n module = ttLib.getTableModule("cmap")\n if len(cmapBmpOnly) != len(cmap):\n # format-12 required.\n cmapTable = module.cmap_classes[12](12)\n cmapTable.platformID = 3\n cmapTable.platEncID = 10\n cmapTable.language = 0\n cmapTable.cmap = cmap\n self.tables.append(cmapTable)\n # always create format-4\n cmapTable = module.cmap_classes[4](4)\n cmapTable.platformID = 3\n cmapTable.platEncID = 1\n cmapTable.language = 0\n cmapTable.cmap = cmapBmpOnly\n # ordered by platform then encoding\n self.tables.insert(0, cmapTable)\n\n uvsDict = m.uvsDict\n if uvsDict:\n # format-14\n uvsTable = module.cmap_classes[14](14)\n uvsTable.platformID = 0\n uvsTable.platEncID = 5\n uvsTable.language = 0\n uvsTable.cmap = {}\n uvsTable.uvsDict = uvsDict\n # ordered by platform then encoding\n self.tables.insert(0, uvsTable)\n self.tableVersion = 0\n self.numSubTables = len(self.tables)\n return self\n
|
.venv\Lib\site-packages\fontTools\merge\tables.py
|
tables.py
|
Python
| 11,310 | 0.95 | 0.127841 | 0.090032 |
node-utils
| 174 |
2023-08-03T19:08:53.841271
|
GPL-3.0
| false |
8faf1d77b00d7b1a38e25033acadc1be
|
# Copyright 2021 Behdad Esfahbod. All Rights Reserved.\n\n\ndef is_Default_Ignorable(u):\n # http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point\n #\n # TODO Move me to unicodedata module and autogenerate.\n #\n # Unicode 14.0:\n # $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'\n # 00AD # Cf SOFT HYPHEN\n # 034F # Mn COMBINING GRAPHEME JOINER\n # 061C # Cf ARABIC LETTER MARK\n # 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER\n # 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA\n # 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE\n # 180E # Cf MONGOLIAN VOWEL SEPARATOR\n # 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR\n # 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK\n # 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE\n # 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS\n # 2065 # Cn <reserved-2065>\n # 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES\n # 3164 # Lo HANGUL FILLER\n # FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16\n # FEFF # Cf ZERO WIDTH NO-BREAK SPACE\n # FFA0 # Lo HALFWIDTH HANGUL FILLER\n # FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8>\n # 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP\n # 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE\n # E0000 # Cn <reserved-E0000>\n # E0001 # Cf LANGUAGE TAG\n # E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F>\n # E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG\n # E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF>\n # E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256\n # E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>\n return (\n u == 0x00AD\n or u == 0x034F # Cf SOFT HYPHEN\n or u == 0x061C # Mn COMBINING GRAPHEME JOINER\n or 0x115F <= u <= 0x1160 # Cf ARABIC LETTER MARK\n or 0x17B4 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER\n <= u\n <= 0x17B5\n or 0x180B # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA\n <= u\n <= 0x180D\n or u # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE\n == 0x180E\n or u == 0x180F # Cf MONGOLIAN VOWEL SEPARATOR\n or 0x200B <= u <= 0x200F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR\n or 0x202A <= u <= 0x202E # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK\n or 0x2060 # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE\n <= u\n <= 0x2064\n or u == 0x2065 # Cf [5] WORD JOINER..INVISIBLE PLUS\n or 0x2066 <= u <= 0x206F # Cn <reserved-2065>\n or u == 0x3164 # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES\n or 0xFE00 <= u <= 0xFE0F # Lo HANGUL FILLER\n or u == 0xFEFF # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16\n or u == 0xFFA0 # Cf ZERO WIDTH NO-BREAK SPACE\n or 0xFFF0 <= u <= 0xFFF8 # Lo HALFWIDTH HANGUL FILLER\n or 0x1BCA0 <= u <= 0x1BCA3 # Cn [9] <reserved-FFF0>..<reserved-FFF8>\n or 0x1D173 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP\n <= u\n <= 0x1D17A\n or u == 0xE0000 # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE\n or u == 0xE0001 # Cn <reserved-E0000>\n or 0xE0002 <= u <= 0xE001F # Cf LANGUAGE TAG\n or 0xE0020 <= u <= 0xE007F # Cn [30] <reserved-E0002>..<reserved-E001F>\n or 0xE0080 <= u <= 0xE00FF # Cf [96] TAG SPACE..CANCEL TAG\n or 0xE0100 <= u <= 0xE01EF # Cn [128] <reserved-E0080>..<reserved-E00FF>\n or 0xE01F0 # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256\n <= u\n <= 0xE0FFF\n or False # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>\n )\n
|
.venv\Lib\site-packages\fontTools\merge\unicode.py
|
unicode.py
|
Python
| 4,351 | 0.95 | 0.012821 | 0.447368 |
node-utils
| 465 |
2024-12-07T13:38:52.311105
|
GPL-3.0
| false |
a98ca77e3b597fde3eab153b10e32014
|
# Copyright 2013 Google, Inc. All Rights Reserved.\n#\n# Google Author(s): Behdad Esfahbod, Roozbeh Pournader\n\nfrom fontTools.misc.timeTools import timestampNow\nfrom fontTools.ttLib.tables.DefaultTable import DefaultTable\nfrom functools import reduce\nimport operator\nimport logging\n\n\nlog = logging.getLogger("fontTools.merge")\n\n\n# General utility functions for merging values from different fonts\n\n\ndef equal(lst):\n lst = list(lst)\n t = iter(lst)\n first = next(t)\n assert all(item == first for item in t), "Expected all items to be equal: %s" % lst\n return first\n\n\ndef first(lst):\n return next(iter(lst))\n\n\ndef recalculate(lst):\n return NotImplemented\n\n\ndef current_time(lst):\n return timestampNow()\n\n\ndef bitwise_and(lst):\n return reduce(operator.and_, lst)\n\n\ndef bitwise_or(lst):\n return reduce(operator.or_, lst)\n\n\ndef avg_int(lst):\n lst = list(lst)\n return sum(lst) // len(lst)\n\n\ndef onlyExisting(func):\n """Returns a filter func that when called with a list,\n only calls func on the non-NotImplemented items of the list,\n and only so if there's at least one item remaining.\n Otherwise returns NotImplemented."""\n\n def wrapper(lst):\n items = [item for item in lst if item is not NotImplemented]\n return func(items) if items else NotImplemented\n\n return wrapper\n\n\ndef sumLists(lst):\n l = []\n for item in lst:\n l.extend(item)\n return l\n\n\ndef sumDicts(lst):\n d = {}\n for item in lst:\n d.update(item)\n return d\n\n\ndef mergeBits(bitmap):\n def wrapper(lst):\n lst = list(lst)\n returnValue = 0\n for bitNumber in range(bitmap["size"]):\n try:\n mergeLogic = bitmap[bitNumber]\n except KeyError:\n try:\n mergeLogic = bitmap["*"]\n except KeyError:\n raise Exception("Don't know how to merge bit %s" % bitNumber)\n shiftedBit = 1 << bitNumber\n mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)\n returnValue |= mergedValue << bitNumber\n return returnValue\n\n return wrapper\n\n\nclass AttendanceRecordingIdentityDict(object):\n """A dictionary-like object that records indices of items actually accessed\n from a list."""\n\n def __init__(self, lst):\n self.l = lst\n self.d = {id(v): i for i, v in enumerate(lst)}\n self.s = set()\n\n def __getitem__(self, v):\n self.s.add(self.d[id(v)])\n return v\n\n\nclass GregariousIdentityDict(object):\n """A dictionary-like object that welcomes guests without reservations and\n adds them to the end of the guest list."""\n\n def __init__(self, lst):\n self.l = lst\n self.s = set(id(v) for v in lst)\n\n def __getitem__(self, v):\n if id(v) not in self.s:\n self.s.add(id(v))\n self.l.append(v)\n return v\n\n\nclass NonhashableDict(object):\n """A dictionary-like object mapping objects to values."""\n\n def __init__(self, keys, values=None):\n if values is None:\n self.d = {id(v): i for i, v in enumerate(keys)}\n else:\n self.d = {id(k): v for k, v in zip(keys, values)}\n\n def __getitem__(self, k):\n return self.d[id(k)]\n\n def __setitem__(self, k, v):\n self.d[id(k)] = v\n\n def __delitem__(self, k):\n del self.d[id(k)]\n
|
.venv\Lib\site-packages\fontTools\merge\util.py
|
util.py
|
Python
| 3,521 | 0.95 | 0.293706 | 0.040404 |
react-lib
| 538 |
2024-05-04T00:59:12.121093
|
GPL-3.0
| false |
2d1d36067e288e7337d4488d78053903
|
# Copyright 2013 Google, Inc. All Rights Reserved.\n#\n# Google Author(s): Behdad Esfahbod, Roozbeh Pournader\n\nfrom fontTools import ttLib\nimport fontTools.merge.base\nfrom fontTools.merge.cmap import (\n computeMegaGlyphOrder,\n computeMegaCmap,\n renameCFFCharStrings,\n)\nfrom fontTools.merge.layout import layoutPreMerge, layoutPostMerge\nfrom fontTools.merge.options import Options\nimport fontTools.merge.tables\nfrom fontTools.misc.loggingTools import Timer\nfrom functools import reduce\nimport sys\nimport logging\n\n\nlog = logging.getLogger("fontTools.merge")\ntimer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO)\n\n\nclass Merger(object):\n """Font merger.\n\n This class merges multiple files into a single OpenType font, taking into\n account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and\n cross-font metrics (for example ``hhea.ascent`` is set to the maximum value\n across all the fonts).\n\n If multiple glyphs map to the same Unicode value, and the glyphs are considered\n sufficiently different (that is, they differ in any of paths, widths, or\n height), then subsequent glyphs are renamed and a lookup in the ``locl``\n feature will be created to disambiguate them. For example, if the arguments\n are an Arabic font and a Latin font and both contain a set of parentheses,\n the Latin glyphs will be renamed to ``parenleft.1`` and ``parenright.1``,\n and a lookup will be inserted into the to ``locl`` feature (creating it if\n necessary) under the ``latn`` script to substitute ``parenleft`` with\n ``parenleft.1`` etc.\n\n Restrictions:\n\n - All fonts must have the same units per em.\n - If duplicate glyph disambiguation takes place as described above then the\n fonts must have a ``GSUB`` table.\n\n Attributes:\n options: Currently unused.\n """\n\n def __init__(self, options=None):\n if not options:\n options = Options()\n\n self.options = options\n\n def _openFonts(self, fontfiles):\n fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]\n for font, fontfile in zip(fonts, fontfiles):\n font._merger__fontfile = fontfile\n font._merger__name = font["name"].getDebugName(4)\n return fonts\n\n def merge(self, fontfiles):\n """Merges fonts together.\n\n Args:\n fontfiles: A list of file names to be merged\n\n Returns:\n A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on\n this to write it out to an OTF file.\n """\n #\n # Settle on a mega glyph order.\n #\n fonts = self._openFonts(fontfiles)\n glyphOrders = [list(font.getGlyphOrder()) for font in fonts]\n computeMegaGlyphOrder(self, glyphOrders)\n\n # Take first input file sfntVersion\n sfntVersion = fonts[0].sfntVersion\n\n # Reload fonts and set new glyph names on them.\n fonts = self._openFonts(fontfiles)\n for font, glyphOrder in zip(fonts, glyphOrders):\n font.setGlyphOrder(glyphOrder)\n if "CFF " in font:\n renameCFFCharStrings(self, glyphOrder, font["CFF "])\n\n cmaps = [font["cmap"] for font in fonts]\n self.duplicateGlyphsPerFont = [{} for _ in fonts]\n computeMegaCmap(self, cmaps)\n\n mega = ttLib.TTFont(sfntVersion=sfntVersion)\n mega.setGlyphOrder(self.glyphOrder)\n\n for font in fonts:\n self._preMerge(font)\n\n self.fonts = fonts\n\n allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())\n allTags.remove("GlyphOrder")\n\n for tag in sorted(allTags):\n if tag in self.options.drop_tables:\n continue\n\n with timer("merge '%s'" % tag):\n tables = [font.get(tag, NotImplemented) for font in fonts]\n\n log.info("Merging '%s'.", tag)\n clazz = ttLib.getTableClass(tag)\n table = clazz(tag).merge(self, tables)\n # XXX Clean this up and use: table = mergeObjects(tables)\n\n if table is not NotImplemented and table is not False:\n mega[tag] = table\n log.info("Merged '%s'.", tag)\n else:\n log.info("Dropped '%s'.", tag)\n\n del self.duplicateGlyphsPerFont\n del self.fonts\n\n self._postMerge(mega)\n\n return mega\n\n def mergeObjects(self, returnTable, logic, tables):\n # Right now we don't use self at all. Will use in the future\n # for options and logging.\n\n allKeys = set.union(\n set(),\n *(vars(table).keys() for table in tables if table is not NotImplemented),\n )\n for key in allKeys:\n log.info(" %s", key)\n try:\n mergeLogic = logic[key]\n except KeyError:\n try:\n mergeLogic = logic["*"]\n except KeyError:\n raise Exception(\n "Don't know how to merge key %s of class %s"\n % (key, returnTable.__class__.__name__)\n )\n if mergeLogic is NotImplemented:\n continue\n value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)\n if value is not NotImplemented:\n setattr(returnTable, key, value)\n\n return returnTable\n\n def _preMerge(self, font):\n layoutPreMerge(font)\n\n def _postMerge(self, font):\n layoutPostMerge(font)\n\n if "OS/2" in font:\n # https://github.com/fonttools/fonttools/issues/2538\n # TODO: Add an option to disable this?\n font["OS/2"].recalcAvgCharWidth(font)\n\n\n__all__ = ["Options", "Merger", "main"]\n\n\n@timer("make one with everything (TOTAL TIME)")\ndef main(args=None):\n """Merge multiple fonts into one"""\n from fontTools import configLogger\n\n if args is None:\n args = sys.argv[1:]\n\n options = Options()\n args = options.parse_opts(args)\n fontfiles = []\n if options.input_file:\n with open(options.input_file) as inputfile:\n fontfiles = [\n line.strip()\n for line in inputfile.readlines()\n if not line.lstrip().startswith("#")\n ]\n for g in args:\n fontfiles.append(g)\n\n if len(fontfiles) < 1:\n print(\n "usage: pyftmerge [font1 ... fontN] [--input-file=filelist.txt] [--output-file=merged.ttf] [--import-file=tables.ttx]",\n file=sys.stderr,\n )\n print(\n " [--drop-tables=tags] [--verbose] [--timing]",\n file=sys.stderr,\n )\n print("", file=sys.stderr)\n print(" font1 ... fontN Files to merge.", file=sys.stderr)\n print(\n " --input-file=<filename> Read files to merge from a text file, each path new line. # Comment lines allowed.",\n file=sys.stderr,\n )\n print(\n " --output-file=<filename> Specify output file name (default: merged.ttf).",\n file=sys.stderr,\n )\n print(\n " --import-file=<filename> TTX file to import after merging. This can be used to set metadata.",\n file=sys.stderr,\n )\n print(\n " --drop-tables=<table tags> Comma separated list of table tags to skip, case sensitive.",\n file=sys.stderr,\n )\n print(\n " --verbose Output progress information.",\n file=sys.stderr,\n )\n print(" --timing Output progress timing.", file=sys.stderr)\n return 1\n\n configLogger(level=logging.INFO if options.verbose else logging.WARNING)\n if options.timing:\n timer.logger.setLevel(logging.DEBUG)\n else:\n timer.logger.disabled = True\n\n merger = Merger(options=options)\n font = merger.merge(fontfiles)\n\n if options.import_file:\n font.importXML(options.import_file)\n\n with timer("compile and save font"):\n font.save(options.output_file)\n\n\nif __name__ == "__main__":\n sys.exit(main())\n
|
.venv\Lib\site-packages\fontTools\merge\__init__.py
|
__init__.py
|
Python
| 8,498 | 0.95 | 0.193548 | 0.070352 |
python-kit
| 651 |
2024-09-29T19:50:19.212196
|
Apache-2.0
| false |
5fc2fd8c18e2bd12410d54f2b21b7847
|
import sys\nfrom fontTools.merge import main\n\n\nif __name__ == "__main__":\n sys.exit(main())\n
|
.venv\Lib\site-packages\fontTools\merge\__main__.py
|
__main__.py
|
Python
| 100 | 0.65 | 0.166667 | 0 |
python-kit
| 297 |
2024-09-29T16:22:35.550481
|
GPL-3.0
| false |
983ff2e36c95d629f497b1e92325a0f2
|
\n\n
|
.venv\Lib\site-packages\fontTools\merge\__pycache__\base.cpython-313.pyc
|
base.cpython-313.pyc
|
Other
| 3,925 | 0.95 | 0.114286 | 0 |
awesome-app
| 31 |
2023-10-14T00:14:45.579287
|
BSD-3-Clause
| false |
b6ab66bb9a48ae5d5e19a6663910776f
|
\n\n
|
.venv\Lib\site-packages\fontTools\merge\__pycache__\cmap.cpython-313.pyc
|
cmap.cpython-313.pyc
|
Other
| 6,570 | 0.8 | 0 | 0.036364 |
node-utils
| 278 |
2024-03-29T22:57:12.569888
|
BSD-3-Clause
| false |
9dea2a85520ce4e5cfd74b113a3a1212
|
\n\n
|
.venv\Lib\site-packages\fontTools\merge\__pycache__\layout.cpython-313.pyc
|
layout.cpython-313.pyc
|
Other
| 24,310 | 0.8 | 0.005405 | 0.00578 |
awesome-app
| 952 |
2024-02-09T00:20:13.297956
|
Apache-2.0
| false |
8c682cbb20f25d093a4589cd66ce7b64
|
\n\n
|
.venv\Lib\site-packages\fontTools\merge\__pycache__\options.cpython-313.pyc
|
options.cpython-313.pyc
|
Other
| 3,468 | 0.95 | 0 | 0.021739 |
react-lib
| 507 |
2023-09-11T22:12:04.536457
|
Apache-2.0
| false |
f1a03d78dbc507668c1e8c85866984d2
|
\n\n
|
.venv\Lib\site-packages\fontTools\merge\__pycache__\tables.cpython-313.pyc
|
tables.cpython-313.pyc
|
Other
| 13,087 | 0.8 | 0 | 0 |
vue-tools
| 745 |
2023-12-01T02:48:25.702511
|
GPL-3.0
| false |
5a21d06335c54f1a329e83886382df96
|
\n\n
|
.venv\Lib\site-packages\fontTools\merge\__pycache__\unicode.cpython-313.pyc
|
unicode.cpython-313.pyc
|
Other
| 2,373 | 0.8 | 0.066667 | 0 |
node-utils
| 264 |
2024-03-08T13:47:25.621673
|
GPL-3.0
| false |
ab71bb3d43bae670d482e9c00d1d4f43
|
\n\n
|
.venv\Lib\site-packages\fontTools\merge\__pycache__\util.cpython-313.pyc
|
util.cpython-313.pyc
|
Other
| 7,709 | 0.8 | 0.020408 | 0 |
vue-tools
| 974 |
2023-10-26T06:28:39.764558
|
GPL-3.0
| false |
e8ab4a94755206aab243d1f0d7a1b6f7
|
\n\n
|
.venv\Lib\site-packages\fontTools\merge\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 11,443 | 0.95 | 0.05042 | 0 |
node-utils
| 906 |
2023-11-25T11:57:10.488697
|
BSD-3-Clause
| false |
e88b8fdf0c4b693c15d04ae7209f803d
|
\n\n
|
.venv\Lib\site-packages\fontTools\merge\__pycache__\__main__.cpython-313.pyc
|
__main__.cpython-313.pyc
|
Other
| 368 | 0.7 | 0 | 0 |
python-kit
| 750 |
2023-10-11T08:15:22.246349
|
GPL-3.0
| false |
fd5ba764a1e1807f6853d19c41742333
|
"""Routines for calculating bounding boxes, point in rectangle calculations and\nso on.\n"""\n\nfrom fontTools.misc.roundTools import otRound\nfrom fontTools.misc.vector import Vector as _Vector\nimport math\nimport warnings\n\n\ndef calcBounds(array):\n """Calculate the bounding rectangle of a 2D points array.\n\n Args:\n array: A sequence of 2D tuples.\n\n Returns:\n A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``.\n """\n if not array:\n return 0, 0, 0, 0\n xs = [x for x, y in array]\n ys = [y for x, y in array]\n return min(xs), min(ys), max(xs), max(ys)\n\n\ndef calcIntBounds(array, round=otRound):\n """Calculate the integer bounding rectangle of a 2D points array.\n\n Values are rounded to closest integer towards ``+Infinity`` using the\n :func:`fontTools.misc.fixedTools.otRound` function by default, unless\n an optional ``round`` function is passed.\n\n Args:\n array: A sequence of 2D tuples.\n round: A rounding function of type ``f(x: float) -> int``.\n\n Returns:\n A four-item tuple of integers representing the bounding rectangle:\n ``(xMin, yMin, xMax, yMax)``.\n """\n return tuple(round(v) for v in calcBounds(array))\n\n\ndef updateBounds(bounds, p, min=min, max=max):\n """Add a point to a bounding rectangle.\n\n Args:\n bounds: A bounding rectangle expressed as a tuple\n ``(xMin, yMin, xMax, yMax), or None``.\n p: A 2D tuple representing a point.\n min,max: functions to compute the minimum and maximum.\n\n Returns:\n The updated bounding rectangle ``(xMin, yMin, xMax, yMax)``.\n """\n (x, y) = p\n if bounds is None:\n return x, y, x, y\n xMin, yMin, xMax, yMax = bounds\n return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)\n\n\ndef pointInRect(p, rect):\n """Test if a point is inside a bounding rectangle.\n\n Args:\n p: A 2D tuple representing a point.\n rect: A bounding rectangle expressed as a tuple\n ``(xMin, yMin, xMax, yMax)``.\n\n Returns:\n ``True`` if the point is inside the rectangle, ``False`` otherwise.\n """\n (x, y) = p\n xMin, yMin, xMax, yMax = rect\n return (xMin <= x <= xMax) and (yMin <= y <= yMax)\n\n\ndef pointsInRect(array, rect):\n """Determine which points are inside a bounding rectangle.\n\n Args:\n array: A sequence of 2D tuples.\n rect: A bounding rectangle expressed as a tuple\n ``(xMin, yMin, xMax, yMax)``.\n\n Returns:\n A list containing the points inside the rectangle.\n """\n if len(array) < 1:\n return []\n xMin, yMin, xMax, yMax = rect\n return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]\n\n\ndef vectorLength(vector):\n """Calculate the length of the given vector.\n\n Args:\n vector: A 2D tuple.\n\n Returns:\n The Euclidean length of the vector.\n """\n x, y = vector\n return math.sqrt(x**2 + y**2)\n\n\ndef asInt16(array):\n """Round a list of floats to 16-bit signed integers.\n\n Args:\n array: List of float values.\n\n Returns:\n A list of rounded integers.\n """\n return [int(math.floor(i + 0.5)) for i in array]\n\n\ndef normRect(rect):\n """Normalize a bounding box rectangle.\n\n This function "turns the rectangle the right way up", so that the following\n holds::\n\n xMin <= xMax and yMin <= yMax\n\n Args:\n rect: A bounding rectangle expressed as a tuple\n ``(xMin, yMin, xMax, yMax)``.\n\n Returns:\n A normalized bounding rectangle.\n """\n (xMin, yMin, xMax, yMax) = rect\n return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)\n\n\ndef scaleRect(rect, x, y):\n """Scale a bounding box rectangle.\n\n Args:\n rect: A bounding rectangle expressed as a tuple\n ``(xMin, yMin, xMax, yMax)``.\n x: Factor to scale the rectangle along the X axis.\n Y: Factor to scale the rectangle along the Y axis.\n\n Returns:\n A scaled bounding rectangle.\n """\n (xMin, yMin, xMax, yMax) = rect\n return xMin * x, yMin * y, xMax * x, yMax * y\n\n\ndef offsetRect(rect, dx, dy):\n """Offset a bounding box rectangle.\n\n Args:\n rect: A bounding rectangle expressed as a tuple\n ``(xMin, yMin, xMax, yMax)``.\n dx: Amount to offset the rectangle along the X axis.\n dY: Amount to offset the rectangle along the Y axis.\n\n Returns:\n An offset bounding rectangle.\n """\n (xMin, yMin, xMax, yMax) = rect\n return xMin + dx, yMin + dy, xMax + dx, yMax + dy\n\n\ndef insetRect(rect, dx, dy):\n """Inset a bounding box rectangle on all sides.\n\n Args:\n rect: A bounding rectangle expressed as a tuple\n ``(xMin, yMin, xMax, yMax)``.\n dx: Amount to inset the rectangle along the X axis.\n dY: Amount to inset the rectangle along the Y axis.\n\n Returns:\n An inset bounding rectangle.\n """\n (xMin, yMin, xMax, yMax) = rect\n return xMin + dx, yMin + dy, xMax - dx, yMax - dy\n\n\ndef sectRect(rect1, rect2):\n """Test for rectangle-rectangle intersection.\n\n Args:\n rect1: First bounding rectangle, expressed as tuples\n ``(xMin, yMin, xMax, yMax)``.\n rect2: Second bounding rectangle.\n\n Returns:\n A boolean and a rectangle.\n If the input rectangles intersect, returns ``True`` and the intersecting\n rectangle. Returns ``False`` and ``(0, 0, 0, 0)`` if the input\n rectangles don't intersect.\n """\n (xMin1, yMin1, xMax1, yMax1) = rect1\n (xMin2, yMin2, xMax2, yMax2) = rect2\n xMin, yMin, xMax, yMax = (\n max(xMin1, xMin2),\n max(yMin1, yMin2),\n min(xMax1, xMax2),\n min(yMax1, yMax2),\n )\n if xMin >= xMax or yMin >= yMax:\n return False, (0, 0, 0, 0)\n return True, (xMin, yMin, xMax, yMax)\n\n\ndef unionRect(rect1, rect2):\n """Determine union of bounding rectangles.\n\n Args:\n rect1: First bounding rectangle, expressed as tuples\n ``(xMin, yMin, xMax, yMax)``.\n rect2: Second bounding rectangle.\n\n Returns:\n The smallest rectangle in which both input rectangles are fully\n enclosed.\n """\n (xMin1, yMin1, xMax1, yMax1) = rect1\n (xMin2, yMin2, xMax2, yMax2) = rect2\n xMin, yMin, xMax, yMax = (\n min(xMin1, xMin2),\n min(yMin1, yMin2),\n max(xMax1, xMax2),\n max(yMax1, yMax2),\n )\n return (xMin, yMin, xMax, yMax)\n\n\ndef rectCenter(rect):\n """Determine rectangle center.\n\n Args:\n rect: Bounding rectangle, expressed as tuples\n ``(xMin, yMin, xMax, yMax)``.\n\n Returns:\n A 2D tuple representing the point at the center of the rectangle.\n """\n (xMin, yMin, xMax, yMax) = rect\n return (xMin + xMax) / 2, (yMin + yMax) / 2\n\n\ndef rectArea(rect):\n """Determine rectangle area.\n\n Args:\n rect: Bounding rectangle, expressed as tuples\n ``(xMin, yMin, xMax, yMax)``.\n\n Returns:\n The area of the rectangle.\n """\n (xMin, yMin, xMax, yMax) = rect\n return (yMax - yMin) * (xMax - xMin)\n\n\ndef intRect(rect):\n """Round a rectangle to integer values.\n\n Guarantees that the resulting rectangle is NOT smaller than the original.\n\n Args:\n rect: Bounding rectangle, expressed as tuples\n ``(xMin, yMin, xMax, yMax)``.\n\n Returns:\n A rounded bounding rectangle.\n """\n (xMin, yMin, xMax, yMax) = rect\n xMin = int(math.floor(xMin))\n yMin = int(math.floor(yMin))\n xMax = int(math.ceil(xMax))\n yMax = int(math.ceil(yMax))\n return (xMin, yMin, xMax, yMax)\n\n\ndef quantizeRect(rect, factor=1):\n """\n >>> bounds = (72.3, -218.4, 1201.3, 919.1)\n >>> quantizeRect(bounds)\n (72, -219, 1202, 920)\n >>> quantizeRect(bounds, factor=10)\n (70, -220, 1210, 920)\n >>> quantizeRect(bounds, factor=100)\n (0, -300, 1300, 1000)\n """\n if factor < 1:\n raise ValueError(f"Expected quantization factor >= 1, found: {factor!r}")\n xMin, yMin, xMax, yMax = normRect(rect)\n return (\n int(math.floor(xMin / factor) * factor),\n int(math.floor(yMin / factor) * factor),\n int(math.ceil(xMax / factor) * factor),\n int(math.ceil(yMax / factor) * factor),\n )\n\n\nclass Vector(_Vector):\n def __init__(self, *args, **kwargs):\n warnings.warn(\n "fontTools.misc.arrayTools.Vector has been deprecated, please use "\n "fontTools.misc.vector.Vector instead.",\n DeprecationWarning,\n )\n\n\ndef pairwise(iterable, reverse=False):\n """Iterate over current and next items in iterable.\n\n Args:\n iterable: An iterable\n reverse: If true, iterate in reverse order.\n\n Returns:\n A iterable yielding two elements per iteration.\n\n Example:\n\n >>> tuple(pairwise([]))\n ()\n >>> tuple(pairwise([], reverse=True))\n ()\n >>> tuple(pairwise([0]))\n ((0, 0),)\n >>> tuple(pairwise([0], reverse=True))\n ((0, 0),)\n >>> tuple(pairwise([0, 1]))\n ((0, 1), (1, 0))\n >>> tuple(pairwise([0, 1], reverse=True))\n ((1, 0), (0, 1))\n >>> tuple(pairwise([0, 1, 2]))\n ((0, 1), (1, 2), (2, 0))\n >>> tuple(pairwise([0, 1, 2], reverse=True))\n ((2, 1), (1, 0), (0, 2))\n >>> tuple(pairwise(['a', 'b', 'c', 'd']))\n (('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a'))\n >>> tuple(pairwise(['a', 'b', 'c', 'd'], reverse=True))\n (('d', 'c'), ('c', 'b'), ('b', 'a'), ('a', 'd'))\n """\n if not iterable:\n return\n if reverse:\n it = reversed(iterable)\n else:\n it = iter(iterable)\n first = next(it, None)\n a = first\n for b in it:\n yield (a, b)\n a = b\n yield (a, first)\n\n\ndef _test():\n """\n >>> import math\n >>> calcBounds([])\n (0, 0, 0, 0)\n >>> calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)])\n (0, 10, 80, 100)\n >>> updateBounds((0, 0, 0, 0), (100, 100))\n (0, 0, 100, 100)\n >>> pointInRect((50, 50), (0, 0, 100, 100))\n True\n >>> pointInRect((0, 0), (0, 0, 100, 100))\n True\n >>> pointInRect((100, 100), (0, 0, 100, 100))\n True\n >>> not pointInRect((101, 100), (0, 0, 100, 100))\n True\n >>> list(pointsInRect([(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100)))\n [True, True, True, False]\n >>> vectorLength((3, 4))\n 5.0\n >>> vectorLength((1, 1)) == math.sqrt(2)\n True\n >>> list(asInt16([0, 0.1, 0.5, 0.9]))\n [0, 0, 1, 1]\n >>> normRect((0, 10, 100, 200))\n (0, 10, 100, 200)\n >>> normRect((100, 200, 0, 10))\n (0, 10, 100, 200)\n >>> scaleRect((10, 20, 50, 150), 1.5, 2)\n (15.0, 40, 75.0, 300)\n >>> offsetRect((10, 20, 30, 40), 5, 6)\n (15, 26, 35, 46)\n >>> insetRect((10, 20, 50, 60), 5, 10)\n (15, 30, 45, 50)\n >>> insetRect((10, 20, 50, 60), -5, -10)\n (5, 10, 55, 70)\n >>> intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50))\n >>> not intersects\n True\n >>> intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50))\n >>> intersects\n 1\n >>> rect\n (5, 20, 20, 30)\n >>> unionRect((0, 10, 20, 30), (0, 40, 20, 50))\n (0, 10, 20, 50)\n >>> rectCenter((0, 0, 100, 200))\n (50.0, 100.0)\n >>> rectCenter((0, 0, 100, 199.0))\n (50.0, 99.5)\n >>> intRect((0.9, 2.9, 3.1, 4.1))\n (0, 2, 4, 5)\n """\n\n\nif __name__ == "__main__":\n import sys\n import doctest\n\n sys.exit(doctest.testmod().failed)\n
|
.venv\Lib\site-packages\fontTools\misc\arrayTools.py
|
arrayTools.py
|
Python
| 11,907 | 0.85 | 0.103774 | 0 |
python-kit
| 154 |
2024-07-06T04:29:00.572168
|
BSD-3-Clause
| false |
bb6010c3f1352539b580d26d11f58454
|
""" fontTools.misc.classifyTools.py -- tools for classifying things.\n"""\n\n\nclass Classifier(object):\n """\n Main Classifier object, used to classify things into similar sets.\n """\n\n def __init__(self, sort=True):\n self._things = set() # set of all things known so far\n self._sets = [] # list of class sets produced so far\n self._mapping = {} # map from things to their class set\n self._dirty = False\n self._sort = sort\n\n def add(self, set_of_things):\n """\n Add a set to the classifier. Any iterable is accepted.\n """\n if not set_of_things:\n return\n\n self._dirty = True\n\n things, sets, mapping = self._things, self._sets, self._mapping\n\n s = set(set_of_things)\n intersection = s.intersection(things) # existing things\n s.difference_update(intersection) # new things\n difference = s\n del s\n\n # Add new class for new things\n if difference:\n things.update(difference)\n sets.append(difference)\n for thing in difference:\n mapping[thing] = difference\n del difference\n\n while intersection:\n # Take one item and process the old class it belongs to\n old_class = mapping[next(iter(intersection))]\n old_class_intersection = old_class.intersection(intersection)\n\n # Update old class to remove items from new set\n old_class.difference_update(old_class_intersection)\n\n # Remove processed items from todo list\n intersection.difference_update(old_class_intersection)\n\n # Add new class for the intersection with old class\n sets.append(old_class_intersection)\n for thing in old_class_intersection:\n mapping[thing] = old_class_intersection\n del old_class_intersection\n\n def update(self, list_of_sets):\n """\n Add a a list of sets to the classifier. Any iterable of iterables is accepted.\n """\n for s in list_of_sets:\n self.add(s)\n\n def _process(self):\n if not self._dirty:\n return\n\n # Do any deferred processing\n sets = self._sets\n self._sets = [s for s in sets if s]\n\n if self._sort:\n self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s)))\n\n self._dirty = False\n\n # Output methods\n\n def getThings(self):\n """Returns the set of all things known so far.\n\n The return value belongs to the Classifier object and should NOT\n be modified while the classifier is still in use.\n """\n self._process()\n return self._things\n\n def getMapping(self):\n """Returns the mapping from things to their class set.\n\n The return value belongs to the Classifier object and should NOT\n be modified while the classifier is still in use.\n """\n self._process()\n return self._mapping\n\n def getClasses(self):\n """Returns the list of class sets.\n\n The return value belongs to the Classifier object and should NOT\n be modified while the classifier is still in use.\n """\n self._process()\n return self._sets\n\n\ndef classify(list_of_sets, sort=True):\n """\n Takes a iterable of iterables (list of sets from here on; but any\n iterable works.), and returns the smallest list of sets such that\n each set, is either a subset, or is disjoint from, each of the input\n sets.\n\n In other words, this function classifies all the things present in\n any of the input sets, into similar classes, based on which sets\n things are a member of.\n\n If sort=True, return class sets are sorted by decreasing size and\n their natural sort order within each class size. Otherwise, class\n sets are returned in the order that they were identified, which is\n generally not significant.\n\n >>> classify([]) == ([], {})\n True\n >>> classify([[]]) == ([], {})\n True\n >>> classify([[], []]) == ([], {})\n True\n >>> classify([[1]]) == ([{1}], {1: {1}})\n True\n >>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})\n True\n >>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})\n True\n >>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})\n True\n >>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})\n True\n >>> classify([[1,2],[2,4,5]]) == (\n ... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})\n True\n >>> classify([[1,2],[2,4,5]], sort=False) == (\n ... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})\n True\n >>> classify([[1,2,9],[2,4,5]], sort=False) == (\n ... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5},\n ... 9: {1, 9}})\n True\n >>> classify([[1,2,9,15],[2,4,5]], sort=False) == (\n ... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5},\n ... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}})\n True\n >>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False)\n >>> set([frozenset(c) for c in classes]) == set(\n ... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})])\n True\n >>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}}\n True\n """\n classifier = Classifier(sort=sort)\n classifier.update(list_of_sets)\n return classifier.getClasses(), classifier.getMapping()\n\n\nif __name__ == "__main__":\n import sys, doctest\n\n sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)\n
|
.venv\Lib\site-packages\fontTools\misc\classifyTools.py
|
classifyTools.py
|
Python
| 5,783 | 0.95 | 0.241176 | 0.050725 |
vue-tools
| 225 |
2025-01-28T09:41:45.853309
|
BSD-3-Clause
| false |
a8bdd92461abd52715e34862041c2679
|
"""Collection of utilities for command-line interfaces and console scripts."""\n\nimport os\nimport re\n\n\nnumberAddedRE = re.compile(r"#\d+$")\n\n\ndef makeOutputFileName(\n input, outputDir=None, extension=None, overWrite=False, suffix=""\n):\n """Generates a suitable file name for writing output.\n\n Often tools will want to take a file, do some kind of transformation to it,\n and write it out again. This function determines an appropriate name for the\n output file, through one or more of the following steps:\n\n - changing the output directory\n - appending suffix before file extension\n - replacing the file extension\n - suffixing the filename with a number (``#1``, ``#2``, etc.) to avoid\n overwriting an existing file.\n\n Args:\n input: Name of input file.\n outputDir: Optionally, a new directory to write the file into.\n suffix: Optionally, a string suffix is appended to file name before\n the extension.\n extension: Optionally, a replacement for the current file extension.\n overWrite: Overwriting an existing file is permitted if true; if false\n and the proposed filename exists, a new name will be generated by\n adding an appropriate number suffix.\n\n Returns:\n str: Suitable output filename\n """\n dirName, fileName = os.path.split(input)\n fileName, ext = os.path.splitext(fileName)\n if outputDir:\n dirName = outputDir\n fileName = numberAddedRE.split(fileName)[0]\n if extension is None:\n extension = os.path.splitext(input)[1]\n output = os.path.join(dirName, fileName + suffix + extension)\n n = 1\n if not overWrite:\n while os.path.exists(output):\n output = os.path.join(\n dirName, fileName + suffix + "#" + repr(n) + extension\n )\n n += 1\n return output\n
|
.venv\Lib\site-packages\fontTools\misc\cliTools.py
|
cliTools.py
|
Python
| 1,915 | 0.95 | 0.226415 | 0 |
node-utils
| 616 |
2024-09-09T00:40:01.560513
|
Apache-2.0
| false |
5b8cd0d5f8859779b7e9df4f2b3b1c4b
|
"""\nCode of the config system; not related to fontTools or fonts in particular.\n\nThe options that are specific to fontTools are in :mod:`fontTools.config`.\n\nTo create your own config system, you need to create an instance of\n:class:`Options`, and a subclass of :class:`AbstractConfig` with its\n``options`` class variable set to your instance of Options.\n\n"""\n\nfrom __future__ import annotations\n\nimport logging\nfrom dataclasses import dataclass\nfrom typing import (\n Any,\n Callable,\n ClassVar,\n Dict,\n Iterable,\n Mapping,\n MutableMapping,\n Optional,\n Set,\n Union,\n)\n\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\n "AbstractConfig",\n "ConfigAlreadyRegisteredError",\n "ConfigError",\n "ConfigUnknownOptionError",\n "ConfigValueParsingError",\n "ConfigValueValidationError",\n "Option",\n "Options",\n]\n\n\nclass ConfigError(Exception):\n """Base exception for the config module."""\n\n\nclass ConfigAlreadyRegisteredError(ConfigError):\n """Raised when a module tries to register a configuration option that\n already exists.\n\n Should not be raised too much really, only when developing new fontTools\n modules.\n """\n\n def __init__(self, name):\n super().__init__(f"Config option {name} is already registered.")\n\n\nclass ConfigValueParsingError(ConfigError):\n """Raised when a configuration value cannot be parsed."""\n\n def __init__(self, name, value):\n super().__init__(\n f"Config option {name}: value cannot be parsed (given {repr(value)})"\n )\n\n\nclass ConfigValueValidationError(ConfigError):\n """Raised when a configuration value cannot be validated."""\n\n def __init__(self, name, value):\n super().__init__(\n f"Config option {name}: value is invalid (given {repr(value)})"\n )\n\n\nclass ConfigUnknownOptionError(ConfigError):\n """Raised when a configuration option is unknown."""\n\n def __init__(self, option_or_name):\n name = (\n f"'{option_or_name.name}' (id={id(option_or_name)})>"\n if isinstance(option_or_name, Option)\n else f"'{option_or_name}'"\n )\n super().__init__(f"Config option {name} is unknown")\n\n\n# eq=False because Options are unique, not fungible objects\n@dataclass(frozen=True, eq=False)\nclass Option:\n name: str\n """Unique name identifying the option (e.g. package.module:MY_OPTION)."""\n help: str\n """Help text for this option."""\n default: Any\n """Default value for this option."""\n parse: Callable[[str], Any]\n """Turn input (e.g. string) into proper type. Only when reading from file."""\n validate: Optional[Callable[[Any], bool]] = None\n """Return true if the given value is an acceptable value."""\n\n @staticmethod\n def parse_optional_bool(v: str) -> Optional[bool]:\n s = str(v).lower()\n if s in {"0", "no", "false"}:\n return False\n if s in {"1", "yes", "true"}:\n return True\n if s in {"auto", "none"}:\n return None\n raise ValueError("invalid optional bool: {v!r}")\n\n @staticmethod\n def validate_optional_bool(v: Any) -> bool:\n return v is None or isinstance(v, bool)\n\n\nclass Options(Mapping):\n """Registry of available options for a given config system.\n\n Define new options using the :meth:`register()` method.\n\n Access existing options using the Mapping interface.\n """\n\n __options: Dict[str, Option]\n\n def __init__(self, other: "Options" = None) -> None:\n self.__options = {}\n if other is not None:\n for option in other.values():\n self.register_option(option)\n\n def register(\n self,\n name: str,\n help: str,\n default: Any,\n parse: Callable[[str], Any],\n validate: Optional[Callable[[Any], bool]] = None,\n ) -> Option:\n """Create and register a new option."""\n return self.register_option(Option(name, help, default, parse, validate))\n\n def register_option(self, option: Option) -> Option:\n """Register a new option."""\n name = option.name\n if name in self.__options:\n raise ConfigAlreadyRegisteredError(name)\n self.__options[name] = option\n return option\n\n def is_registered(self, option: Option) -> bool:\n """Return True if the same option object is already registered."""\n return self.__options.get(option.name) is option\n\n def __getitem__(self, key: str) -> Option:\n return self.__options.__getitem__(key)\n\n def __iter__(self) -> Iterator[str]:\n return self.__options.__iter__()\n\n def __len__(self) -> int:\n return self.__options.__len__()\n\n def __repr__(self) -> str:\n return (\n f"{self.__class__.__name__}({{\n"\n + "".join(\n f" {k!r}: Option(default={v.default!r}, ...),\n"\n for k, v in self.__options.items()\n )\n + "})"\n )\n\n\n_USE_GLOBAL_DEFAULT = object()\n\n\nclass AbstractConfig(MutableMapping):\n """\n Create a set of config values, optionally pre-filled with values from\n the given dictionary or pre-existing config object.\n\n The class implements the MutableMapping protocol keyed by option name (`str`).\n For convenience its methods accept either Option or str as the key parameter.\n\n .. seealso:: :meth:`set()`\n\n This config class is abstract because it needs its ``options`` class\n var to be set to an instance of :class:`Options` before it can be\n instanciated and used.\n\n .. code:: python\n\n class MyConfig(AbstractConfig):\n options = Options()\n\n MyConfig.register_option( "test:option_name", "This is an option", 0, int, lambda v: isinstance(v, int))\n\n cfg = MyConfig({"test:option_name": 10})\n\n """\n\n options: ClassVar[Options]\n\n @classmethod\n def register_option(\n cls,\n name: str,\n help: str,\n default: Any,\n parse: Callable[[str], Any],\n validate: Optional[Callable[[Any], bool]] = None,\n ) -> Option:\n """Register an available option in this config system."""\n return cls.options.register(\n name, help=help, default=default, parse=parse, validate=validate\n )\n\n _values: Dict[str, Any]\n\n def __init__(\n self,\n values: Union[AbstractConfig, Dict[Union[Option, str], Any]] = {},\n parse_values: bool = False,\n skip_unknown: bool = False,\n ):\n self._values = {}\n values_dict = values._values if isinstance(values, AbstractConfig) else values\n for name, value in values_dict.items():\n self.set(name, value, parse_values, skip_unknown)\n\n def _resolve_option(self, option_or_name: Union[Option, str]) -> Option:\n if isinstance(option_or_name, Option):\n option = option_or_name\n if not self.options.is_registered(option):\n raise ConfigUnknownOptionError(option)\n return option\n elif isinstance(option_or_name, str):\n name = option_or_name\n try:\n return self.options[name]\n except KeyError:\n raise ConfigUnknownOptionError(name)\n else:\n raise TypeError(\n "expected Option or str, found "\n f"{type(option_or_name).__name__}: {option_or_name!r}"\n )\n\n def set(\n self,\n option_or_name: Union[Option, str],\n value: Any,\n parse_values: bool = False,\n skip_unknown: bool = False,\n ):\n """Set the value of an option.\n\n Args:\n * `option_or_name`: an `Option` object or its name (`str`).\n * `value`: the value to be assigned to given option.\n * `parse_values`: parse the configuration value from a string into\n its proper type, as per its `Option` object. The default\n behavior is to raise `ConfigValueValidationError` when the value\n is not of the right type. Useful when reading options from a\n file type that doesn't support as many types as Python.\n * `skip_unknown`: skip unknown configuration options. The default\n behaviour is to raise `ConfigUnknownOptionError`. Useful when\n reading options from a configuration file that has extra entries\n (e.g. for a later version of fontTools)\n """\n try:\n option = self._resolve_option(option_or_name)\n except ConfigUnknownOptionError as e:\n if skip_unknown:\n log.debug(str(e))\n return\n raise\n\n # Can be useful if the values come from a source that doesn't have\n # strict typing (.ini file? Terminal input?)\n if parse_values:\n try:\n value = option.parse(value)\n except Exception as e:\n raise ConfigValueParsingError(option.name, value) from e\n\n if option.validate is not None and not option.validate(value):\n raise ConfigValueValidationError(option.name, value)\n\n self._values[option.name] = value\n\n def get(\n self, option_or_name: Union[Option, str], default: Any = _USE_GLOBAL_DEFAULT\n ) -> Any:\n """\n Get the value of an option. The value which is returned is the first\n provided among:\n\n 1. a user-provided value in the options's ``self._values`` dict\n 2. a caller-provided default value to this method call\n 3. the global default for the option provided in ``fontTools.config``\n\n This is to provide the ability to migrate progressively from config\n options passed as arguments to fontTools APIs to config options read\n from the current TTFont, e.g.\n\n .. code:: python\n\n def fontToolsAPI(font, some_option):\n value = font.cfg.get("someLib.module:SOME_OPTION", some_option)\n # use value\n\n That way, the function will work the same for users of the API that\n still pass the option to the function call, but will favour the new\n config mechanism if the given font specifies a value for that option.\n """\n option = self._resolve_option(option_or_name)\n if option.name in self._values:\n return self._values[option.name]\n if default is not _USE_GLOBAL_DEFAULT:\n return default\n return option.default\n\n def copy(self):\n return self.__class__(self._values)\n\n def __getitem__(self, option_or_name: Union[Option, str]) -> Any:\n return self.get(option_or_name)\n\n def __setitem__(self, option_or_name: Union[Option, str], value: Any) -> None:\n return self.set(option_or_name, value)\n\n def __delitem__(self, option_or_name: Union[Option, str]) -> None:\n option = self._resolve_option(option_or_name)\n del self._values[option.name]\n\n def __iter__(self) -> Iterable[str]:\n return self._values.__iter__()\n\n def __len__(self) -> int:\n return len(self._values)\n\n def __repr__(self) -> str:\n return f"{self.__class__.__name__}({repr(self._values)})"\n
|
.venv\Lib\site-packages\fontTools\misc\configTools.py
|
configTools.py
|
Python
| 11,537 | 0.95 | 0.22063 | 0.029197 |
vue-tools
| 941 |
2025-04-04T08:56:27.285701
|
MIT
| false |
16f32fc3355ac8c0a89e070fb6e7f58a
|
""" Exports a no-op 'cython' namespace similar to\nhttps://github.com/cython/cython/blob/master/Cython/Shadow.py\n\nThis allows to optionally compile @cython decorated functions\n(when cython is available at built time), or run the same code\nas pure-python, without runtime dependency on cython module.\n\nWe only define the symbols that we use. E.g. see fontTools.cu2qu\n"""\n\nfrom types import SimpleNamespace\n\n\ndef _empty_decorator(x):\n return x\n\n\ncompiled = False\n\nfor name in ("double", "complex", "int"):\n globals()[name] = None\n\nfor name in ("cfunc", "inline"):\n globals()[name] = _empty_decorator\n\nlocals = lambda **_: _empty_decorator\nreturns = lambda _: _empty_decorator\n
|
.venv\Lib\site-packages\fontTools\misc\cython.py
|
cython.py
|
Python
| 709 | 0.95 | 0.111111 | 0 |
node-utils
| 910 |
2024-08-12T22:18:02.319381
|
BSD-3-Clause
| false |
ed39f1b746db0574df61f65f110a0554
|
"""Misc dict tools."""\n\n__all__ = ["hashdict"]\n\n\n# https://stackoverflow.com/questions/1151658/python-hashable-dicts\nclass hashdict(dict):\n """\n hashable dict implementation, suitable for use as a key into\n other dicts.\n\n >>> h1 = hashdict({"apples": 1, "bananas":2})\n >>> h2 = hashdict({"bananas": 3, "mangoes": 5})\n >>> h1+h2\n hashdict(apples=1, bananas=3, mangoes=5)\n >>> d1 = {}\n >>> d1[h1] = "salad"\n >>> d1[h1]\n 'salad'\n >>> d1[h2]\n Traceback (most recent call last):\n ...\n KeyError: hashdict(bananas=3, mangoes=5)\n\n based on answers from\n http://stackoverflow.com/questions/1151658/python-hashable-dicts\n\n """\n\n def __key(self):\n return tuple(sorted(self.items()))\n\n def __repr__(self):\n return "{0}({1})".format(\n self.__class__.__name__,\n ", ".join("{0}={1}".format(str(i[0]), repr(i[1])) for i in self.__key()),\n )\n\n def __hash__(self):\n return hash(self.__key())\n\n def __setitem__(self, key, value):\n raise TypeError(\n "{0} does not support item assignment".format(self.__class__.__name__)\n )\n\n def __delitem__(self, key):\n raise TypeError(\n "{0} does not support item assignment".format(self.__class__.__name__)\n )\n\n def clear(self):\n raise TypeError(\n "{0} does not support item assignment".format(self.__class__.__name__)\n )\n\n def pop(self, *args, **kwargs):\n raise TypeError(\n "{0} does not support item assignment".format(self.__class__.__name__)\n )\n\n def popitem(self, *args, **kwargs):\n raise TypeError(\n "{0} does not support item assignment".format(self.__class__.__name__)\n )\n\n def setdefault(self, *args, **kwargs):\n raise TypeError(\n "{0} does not support item assignment".format(self.__class__.__name__)\n )\n\n def update(self, *args, **kwargs):\n raise TypeError(\n "{0} does not support item assignment".format(self.__class__.__name__)\n )\n\n # update is not ok because it mutates the object\n # __add__ is ok because it creates a new object\n # while the new object is under construction, it's ok to mutate it\n def __add__(self, right):\n result = hashdict(self)\n dict.update(result, right)\n return result\n
|
.venv\Lib\site-packages\fontTools\misc\dictTools.py
|
dictTools.py
|
Python
| 2,500 | 0.95 | 0.180723 | 0.060606 |
node-utils
| 229 |
2024-04-18T02:41:25.587346
|
BSD-3-Clause
| false |
945cbd64b488c2b3ed4c1c87ec19cd9d
|
"""\nPostScript Type 1 fonts make use of two types of encryption: charstring\nencryption and ``eexec`` encryption. Charstring encryption is used for\nthe charstrings themselves, while ``eexec`` is used to encrypt larger\nsections of the font program, such as the ``Private`` and ``CharStrings``\ndictionaries. Despite the different names, the algorithm is the same,\nalthough ``eexec`` encryption uses a fixed initial key R=55665.\n\nThe algorithm uses cipher feedback, meaning that the ciphertext is used\nto modify the key. Because of this, the routines in this module return\nthe new key at the end of the operation.\n\n"""\n\nfrom fontTools.misc.textTools import bytechr, bytesjoin, byteord\n\n\ndef _decryptChar(cipher, R):\n cipher = byteord(cipher)\n plain = ((cipher ^ (R >> 8))) & 0xFF\n R = ((cipher + R) * 52845 + 22719) & 0xFFFF\n return bytechr(plain), R\n\n\ndef _encryptChar(plain, R):\n plain = byteord(plain)\n cipher = ((plain ^ (R >> 8))) & 0xFF\n R = ((cipher + R) * 52845 + 22719) & 0xFFFF\n return bytechr(cipher), R\n\n\ndef decrypt(cipherstring, R):\n r"""\n Decrypts a string using the Type 1 encryption algorithm.\n\n Args:\n cipherstring: String of ciphertext.\n R: Initial key.\n\n Returns:\n decryptedStr: Plaintext string.\n R: Output key for subsequent decryptions.\n\n Examples::\n\n >>> testStr = b"\0\0asdadads asds\265"\n >>> decryptedStr, R = decrypt(testStr, 12321)\n >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'\n True\n >>> R == 36142\n True\n """\n plainList = []\n for cipher in cipherstring:\n plain, R = _decryptChar(cipher, R)\n plainList.append(plain)\n plainstring = bytesjoin(plainList)\n return plainstring, int(R)\n\n\ndef encrypt(plainstring, R):\n r"""\n Encrypts a string using the Type 1 encryption algorithm.\n\n Note that the algorithm as described in the Type 1 specification requires the\n plaintext to be prefixed with a number of random bytes. (For ``eexec`` the\n number of random bytes is set to 4.) This routine does *not* add the random\n prefix to its input.\n\n Args:\n plainstring: String of plaintext.\n R: Initial key.\n\n Returns:\n cipherstring: Ciphertext string.\n R: Output key for subsequent encryptions.\n\n Examples::\n\n >>> testStr = b"\0\0asdadads asds\265"\n >>> decryptedStr, R = decrypt(testStr, 12321)\n >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'\n True\n >>> R == 36142\n True\n\n >>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'\n >>> encryptedStr, R = encrypt(testStr, 12321)\n >>> encryptedStr == b"\0\0asdadads asds\265"\n True\n >>> R == 36142\n True\n """\n cipherList = []\n for plain in plainstring:\n cipher, R = _encryptChar(plain, R)\n cipherList.append(cipher)\n cipherstring = bytesjoin(cipherList)\n return cipherstring, int(R)\n\n\ndef hexString(s):\n import binascii\n\n return binascii.hexlify(s)\n\n\ndef deHexString(h):\n import binascii\n\n h = bytesjoin(h.split())\n return binascii.unhexlify(h)\n\n\nif __name__ == "__main__":\n import sys\n import doctest\n\n sys.exit(doctest.testmod().failed)\n
|
.venv\Lib\site-packages\fontTools\misc\eexec.py
|
eexec.py
|
Python
| 3,450 | 0.85 | 0.109244 | 0 |
node-utils
| 244 |
2024-01-03T17:43:36.887796
|
BSD-3-Clause
| false |
c55a72c0c1aac8941b742f7ae3dc37e5
|
"""\nThis module implements the algorithm for converting between a "user name" -\nsomething that a user can choose arbitrarily inside a font editor - and a file\nname suitable for use in a wide range of operating systems and filesystems.\n\nThe `UFO 3 specification <http://unifiedfontobject.org/versions/ufo3/conventions/>`_\nprovides an example of an algorithm for such conversion, which avoids illegal\ncharacters, reserved file names, ambiguity between upper- and lower-case\ncharacters, and clashes with existing files.\n\nThis code was originally copied from\n`ufoLib <https://github.com/unified-font-object/ufoLib/blob/8747da7/Lib/ufoLib/filenames.py>`_\nby Tal Leming and is copyright (c) 2005-2016, The RoboFab Developers:\n\n- Erik van Blokland\n- Tal Leming\n- Just van Rossum\n"""\n\nillegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ")\nillegalCharacters += [chr(i) for i in range(1, 32)]\nillegalCharacters += [chr(0x7F)]\nreservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ")\nreservedFileNames += "LPT1 LPT2 LPT3 COM2 COM3 COM4".lower().split(" ")\nmaxFileNameLength = 255\n\n\nclass NameTranslationError(Exception):\n pass\n\n\ndef userNameToFileName(userName, existing=[], prefix="", suffix=""):\n """Converts from a user name to a file name.\n\n Takes care to avoid illegal characters, reserved file names, ambiguity between\n upper- and lower-case characters, and clashes with existing files.\n\n Args:\n userName (str): The input file name.\n existing: A case-insensitive list of all existing file names.\n prefix: Prefix to be prepended to the file name.\n suffix: Suffix to be appended to the file name.\n\n Returns:\n A suitable filename.\n\n Raises:\n NameTranslationError: If no suitable name could be generated.\n\n Examples::\n\n >>> userNameToFileName("a") == "a"\n True\n >>> userNameToFileName("A") == "A_"\n True\n >>> userNameToFileName("AE") == "A_E_"\n True\n >>> userNameToFileName("Ae") == "A_e"\n True\n >>> userNameToFileName("ae") == "ae"\n True\n >>> userNameToFileName("aE") == "aE_"\n True\n >>> userNameToFileName("a.alt") == "a.alt"\n True\n >>> userNameToFileName("A.alt") == "A_.alt"\n True\n >>> userNameToFileName("A.Alt") == "A_.A_lt"\n True\n >>> userNameToFileName("A.aLt") == "A_.aL_t"\n True\n >>> userNameToFileName(u"A.alT") == "A_.alT_"\n True\n >>> userNameToFileName("T_H") == "T__H_"\n True\n >>> userNameToFileName("T_h") == "T__h"\n True\n >>> userNameToFileName("t_h") == "t_h"\n True\n >>> userNameToFileName("F_F_I") == "F__F__I_"\n True\n >>> userNameToFileName("f_f_i") == "f_f_i"\n True\n >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"\n True\n >>> userNameToFileName(".notdef") == "_notdef"\n True\n >>> userNameToFileName("con") == "_con"\n True\n >>> userNameToFileName("CON") == "C_O_N_"\n True\n >>> userNameToFileName("con.alt") == "_con.alt"\n True\n >>> userNameToFileName("alt.con") == "alt._con"\n True\n """\n # the incoming name must be a str\n if not isinstance(userName, str):\n raise ValueError("The value for userName must be a string.")\n # establish the prefix and suffix lengths\n prefixLength = len(prefix)\n suffixLength = len(suffix)\n # replace an initial period with an _\n # if no prefix is to be added\n if not prefix and userName[0] == ".":\n userName = "_" + userName[1:]\n # filter the user name\n filteredUserName = []\n for character in userName:\n # replace illegal characters with _\n if character in illegalCharacters:\n character = "_"\n # add _ to all non-lower characters\n elif character != character.lower():\n character += "_"\n filteredUserName.append(character)\n userName = "".join(filteredUserName)\n # clip to 255\n sliceLength = maxFileNameLength - prefixLength - suffixLength\n userName = userName[:sliceLength]\n # test for illegal files names\n parts = []\n for part in userName.split("."):\n if part.lower() in reservedFileNames:\n part = "_" + part\n parts.append(part)\n userName = ".".join(parts)\n # test for clash\n fullName = prefix + userName + suffix\n if fullName.lower() in existing:\n fullName = handleClash1(userName, existing, prefix, suffix)\n # finished\n return fullName\n\n\ndef handleClash1(userName, existing=[], prefix="", suffix=""):\n """\n existing should be a case-insensitive list\n of all existing file names.\n\n >>> prefix = ("0" * 5) + "."\n >>> suffix = "." + ("0" * 10)\n >>> existing = ["a" * 5]\n\n >>> e = list(existing)\n >>> handleClash1(userName="A" * 5, existing=e,\n ... prefix=prefix, suffix=suffix) == (\n ... '00000.AAAAA000000000000001.0000000000')\n True\n\n >>> e = list(existing)\n >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)\n >>> handleClash1(userName="A" * 5, existing=e,\n ... prefix=prefix, suffix=suffix) == (\n ... '00000.AAAAA000000000000002.0000000000')\n True\n\n >>> e = list(existing)\n >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)\n >>> handleClash1(userName="A" * 5, existing=e,\n ... prefix=prefix, suffix=suffix) == (\n ... '00000.AAAAA000000000000001.0000000000')\n True\n """\n # if the prefix length + user name length + suffix length + 15 is at\n # or past the maximum length, silce 15 characters off of the user name\n prefixLength = len(prefix)\n suffixLength = len(suffix)\n if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:\n l = prefixLength + len(userName) + suffixLength + 15\n sliceLength = maxFileNameLength - l\n userName = userName[:sliceLength]\n finalName = None\n # try to add numbers to create a unique name\n counter = 1\n while finalName is None:\n name = userName + str(counter).zfill(15)\n fullName = prefix + name + suffix\n if fullName.lower() not in existing:\n finalName = fullName\n break\n else:\n counter += 1\n if counter >= 999999999999999:\n break\n # if there is a clash, go to the next fallback\n if finalName is None:\n finalName = handleClash2(existing, prefix, suffix)\n # finished\n return finalName\n\n\ndef handleClash2(existing=[], prefix="", suffix=""):\n """\n existing should be a case-insensitive list\n of all existing file names.\n\n >>> prefix = ("0" * 5) + "."\n >>> suffix = "." + ("0" * 10)\n >>> existing = [prefix + str(i) + suffix for i in range(100)]\n\n >>> e = list(existing)\n >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (\n ... '00000.100.0000000000')\n True\n\n >>> e = list(existing)\n >>> e.remove(prefix + "1" + suffix)\n >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (\n ... '00000.1.0000000000')\n True\n\n >>> e = list(existing)\n >>> e.remove(prefix + "2" + suffix)\n >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (\n ... '00000.2.0000000000')\n True\n """\n # calculate the longest possible string\n maxLength = maxFileNameLength - len(prefix) - len(suffix)\n maxValue = int("9" * maxLength)\n # try to find a number\n finalName = None\n counter = 1\n while finalName is None:\n fullName = prefix + str(counter) + suffix\n if fullName.lower() not in existing:\n finalName = fullName\n break\n else:\n counter += 1\n if counter >= maxValue:\n break\n # raise an error if nothing has been found\n if finalName is None:\n raise NameTranslationError("No unique name could be found.")\n # finished\n return finalName\n\n\nif __name__ == "__main__":\n import doctest\n import sys\n\n sys.exit(doctest.testmod().failed)\n
|
.venv\Lib\site-packages\fontTools\misc\filenames.py
|
filenames.py
|
Python
| 8,468 | 0.95 | 0.142857 | 0.092593 |
vue-tools
| 938 |
2023-09-02T19:13:02.491321
|
GPL-3.0
| false |
269f6cdbec506a71559fa7cf343f1135
|
__all__ = ["popCount", "bit_count", "bit_indices"]\n\n\ntry:\n bit_count = int.bit_count\nexcept AttributeError:\n\n def bit_count(v):\n return bin(v).count("1")\n\n\n"""Return number of 1 bits (population count) of the absolute value of an integer.\n\nSee https://docs.python.org/3.10/library/stdtypes.html#int.bit_count\n"""\npopCount = bit_count # alias\n\n\ndef bit_indices(v):\n """Return list of indices where bits are set, 0 being the index of the least significant bit.\n\n >>> bit_indices(0b101)\n [0, 2]\n """\n return [i for i, b in enumerate(bin(v)[::-1]) if b == "1"]\n
|
.venv\Lib\site-packages\fontTools\misc\intTools.py
|
intTools.py
|
Python
| 611 | 0.95 | 0.2 | 0 |
awesome-app
| 998 |
2025-02-16T00:43:57.809419
|
GPL-3.0
| false |
aaf42ec44c5f9f6c6bf41b8a23140538
|
from itertools import *\n\n# Python 3.12:\nif "batched" not in globals():\n # https://docs.python.org/3/library/itertools.html#itertools.batched\n def batched(iterable, n):\n # batched('ABCDEFG', 3) --> ABC DEF G\n if n < 1:\n raise ValueError("n must be at least one")\n it = iter(iterable)\n while batch := tuple(islice(it, n)):\n yield batch\n
|
.venv\Lib\site-packages\fontTools\misc\iterTools.py
|
iterTools.py
|
Python
| 402 | 0.95 | 0.333333 | 0.272727 |
react-lib
| 835 |
2024-03-12T12:43:52.113216
|
BSD-3-Clause
| false |
0081223e024e7336348e5fceffe81e53
|
from collections import UserDict, UserList\n\n__all__ = ["LazyDict", "LazyList"]\n\n\nclass LazyDict(UserDict):\n def __init__(self, data):\n super().__init__()\n self.data = data\n\n def __getitem__(self, k):\n v = self.data[k]\n if callable(v):\n v = v(k)\n self.data[k] = v\n return v\n\n\nclass LazyList(UserList):\n def __getitem__(self, k):\n if isinstance(k, slice):\n indices = range(*k.indices(len(self)))\n return [self[i] for i in indices]\n v = self.data[k]\n if callable(v):\n v = v(k)\n self.data[k] = v\n return v\n\n def __add__(self, other):\n if isinstance(other, LazyList):\n other = list(other)\n elif isinstance(other, list):\n pass\n else:\n return NotImplemented\n return list(self) + other\n\n def __radd__(self, other):\n if not isinstance(other, list):\n return NotImplemented\n return other + list(self)\n
|
.venv\Lib\site-packages\fontTools\misc\lazyTools.py
|
lazyTools.py
|
Python
| 1,062 | 0.85 | 0.309524 | 0 |
awesome-app
| 496 |
2024-09-21T12:38:10.620916
|
Apache-2.0
| false |
9defc7b9499ccb5fa99d0573cafbd626
|
from fontTools.misc.textTools import Tag, bytesjoin, strjoin\n\ntry:\n import xattr\nexcept ImportError:\n xattr = None\n\n\ndef _reverseString(s):\n s = list(s)\n s.reverse()\n return strjoin(s)\n\n\ndef getMacCreatorAndType(path):\n """Returns file creator and file type codes for a path.\n\n Args:\n path (str): A file path.\n\n Returns:\n A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first\n representing the file creator and the second representing the\n file type.\n """\n if xattr is not None:\n try:\n finderInfo = xattr.getxattr(path, "com.apple.FinderInfo")\n except (KeyError, IOError):\n pass\n else:\n fileType = Tag(finderInfo[:4])\n fileCreator = Tag(finderInfo[4:8])\n return fileCreator, fileType\n return None, None\n\n\ndef setMacCreatorAndType(path, fileCreator, fileType):\n """Set file creator and file type codes for a path.\n\n Note that if the ``xattr`` module is not installed, no action is\n taken but no error is raised.\n\n Args:\n path (str): A file path.\n fileCreator: A four-character file creator tag.\n fileType: A four-character file type tag.\n\n """\n if xattr is not None:\n from fontTools.misc.textTools import pad\n\n if not all(len(s) == 4 for s in (fileCreator, fileType)):\n raise TypeError("arg must be string of 4 chars")\n finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)\n xattr.setxattr(path, "com.apple.FinderInfo", finderInfo)\n
|
.venv\Lib\site-packages\fontTools\misc\macCreatorType.py
|
macCreatorType.py
|
Python
| 1,649 | 0.85 | 0.232143 | 0 |
react-lib
| 197 |
2024-06-21T12:10:15.301572
|
MIT
| false |
7f4f22697e80545a8626fc2b264cdd82
|
from io import BytesIO\nimport struct\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import bytesjoin, tostr\nfrom collections import OrderedDict\nfrom collections.abc import MutableMapping\n\n\nclass ResourceError(Exception):\n pass\n\n\nclass ResourceReader(MutableMapping):\n """Reader for Mac OS resource forks.\n\n Parses a resource fork and returns resources according to their type.\n If run on OS X, this will open the resource fork in the filesystem.\n Otherwise, it will open the file itself and attempt to read it as\n though it were a resource fork.\n\n The returned object can be indexed by type and iterated over,\n returning in each case a list of py:class:`Resource` objects\n representing all the resources of a certain type.\n\n """\n\n def __init__(self, fileOrPath):\n """Open a file\n\n Args:\n fileOrPath: Either an object supporting a ``read`` method, an\n ``os.PathLike`` object, or a string.\n """\n self._resources = OrderedDict()\n if hasattr(fileOrPath, "read"):\n self.file = fileOrPath\n else:\n try:\n # try reading from the resource fork (only works on OS X)\n self.file = self.openResourceFork(fileOrPath)\n self._readFile()\n return\n except (ResourceError, IOError):\n # if it fails, use the data fork\n self.file = self.openDataFork(fileOrPath)\n self._readFile()\n\n @staticmethod\n def openResourceFork(path):\n if hasattr(path, "__fspath__"): # support os.PathLike objects\n path = path.__fspath__()\n with open(path + "/..namedfork/rsrc", "rb") as resfork:\n data = resfork.read()\n infile = BytesIO(data)\n infile.name = path\n return infile\n\n @staticmethod\n def openDataFork(path):\n with open(path, "rb") as datafork:\n data = datafork.read()\n infile = BytesIO(data)\n infile.name = path\n return infile\n\n def _readFile(self):\n self._readHeaderAndMap()\n self._readTypeList()\n\n def _read(self, numBytes, offset=None):\n if offset is not None:\n try:\n self.file.seek(offset)\n except OverflowError:\n raise ResourceError("Failed to seek offset ('offset' is too large)")\n if self.file.tell() != offset:\n raise ResourceError("Failed to seek offset (reached EOF)")\n try:\n data = self.file.read(numBytes)\n except OverflowError:\n raise ResourceError("Cannot read resource ('numBytes' is too large)")\n if len(data) != numBytes:\n raise ResourceError("Cannot read resource (not enough data)")\n return data\n\n def _readHeaderAndMap(self):\n self.file.seek(0)\n headerData = self._read(ResourceForkHeaderSize)\n sstruct.unpack(ResourceForkHeader, headerData, self)\n # seek to resource map, skip reserved\n mapOffset = self.mapOffset + 22\n resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)\n sstruct.unpack(ResourceMapHeader, resourceMapData, self)\n self.absTypeListOffset = self.mapOffset + self.typeListOffset\n self.absNameListOffset = self.mapOffset + self.nameListOffset\n\n def _readTypeList(self):\n absTypeListOffset = self.absTypeListOffset\n numTypesData = self._read(2, absTypeListOffset)\n (self.numTypes,) = struct.unpack(">H", numTypesData)\n absTypeListOffset2 = absTypeListOffset + 2\n for i in range(self.numTypes + 1):\n resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i\n resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)\n item = sstruct.unpack(ResourceTypeItem, resTypeItemData)\n resType = tostr(item["type"], encoding="mac-roman")\n refListOffset = absTypeListOffset + item["refListOffset"]\n numRes = item["numRes"] + 1\n resources = self._readReferenceList(resType, refListOffset, numRes)\n self._resources[resType] = resources\n\n def _readReferenceList(self, resType, refListOffset, numRes):\n resources = []\n for i in range(numRes):\n refOffset = refListOffset + ResourceRefItemSize * i\n refData = self._read(ResourceRefItemSize, refOffset)\n res = Resource(resType)\n res.decompile(refData, self)\n resources.append(res)\n return resources\n\n def __getitem__(self, resType):\n return self._resources[resType]\n\n def __delitem__(self, resType):\n del self._resources[resType]\n\n def __setitem__(self, resType, resources):\n self._resources[resType] = resources\n\n def __len__(self):\n return len(self._resources)\n\n def __iter__(self):\n return iter(self._resources)\n\n def keys(self):\n return self._resources.keys()\n\n @property\n def types(self):\n """A list of the types of resources in the resource fork."""\n return list(self._resources.keys())\n\n def countResources(self, resType):\n """Return the number of resources of a given type."""\n try:\n return len(self[resType])\n except KeyError:\n return 0\n\n def getIndices(self, resType):\n """Returns a list of indices of resources of a given type."""\n numRes = self.countResources(resType)\n if numRes:\n return list(range(1, numRes + 1))\n else:\n return []\n\n def getNames(self, resType):\n """Return list of names of all resources of a given type."""\n return [res.name for res in self.get(resType, []) if res.name is not None]\n\n def getIndResource(self, resType, index):\n """Return resource of given type located at an index ranging from 1\n to the number of resources for that type, or None if not found.\n """\n if index < 1:\n return None\n try:\n res = self[resType][index - 1]\n except (KeyError, IndexError):\n return None\n return res\n\n def getNamedResource(self, resType, name):\n """Return the named resource of given type, else return None."""\n name = tostr(name, encoding="mac-roman")\n for res in self.get(resType, []):\n if res.name == name:\n return res\n return None\n\n def close(self):\n if not self.file.closed:\n self.file.close()\n\n\nclass Resource(object):\n """Represents a resource stored within a resource fork.\n\n Attributes:\n type: resource type.\n data: resource data.\n id: ID.\n name: resource name.\n attr: attributes.\n """\n\n def __init__(\n self, resType=None, resData=None, resID=None, resName=None, resAttr=None\n ):\n self.type = resType\n self.data = resData\n self.id = resID\n self.name = resName\n self.attr = resAttr\n\n def decompile(self, refData, reader):\n sstruct.unpack(ResourceRefItem, refData, self)\n # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct\n (self.dataOffset,) = struct.unpack(">L", bytesjoin([b"\0", self.dataOffset]))\n absDataOffset = reader.dataOffset + self.dataOffset\n (dataLength,) = struct.unpack(">L", reader._read(4, absDataOffset))\n self.data = reader._read(dataLength)\n if self.nameOffset == -1:\n return\n absNameOffset = reader.absNameListOffset + self.nameOffset\n (nameLength,) = struct.unpack("B", reader._read(1, absNameOffset))\n (name,) = struct.unpack(">%ss" % nameLength, reader._read(nameLength))\n self.name = tostr(name, encoding="mac-roman")\n\n\nResourceForkHeader = """\n > # big endian\n dataOffset: L\n mapOffset: L\n dataLen: L\n mapLen: L\n"""\n\nResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)\n\nResourceMapHeader = """\n > # big endian\n attr: H\n typeListOffset: H\n nameListOffset: H\n"""\n\nResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)\n\nResourceTypeItem = """\n > # big endian\n type: 4s\n numRes: H\n refListOffset: H\n"""\n\nResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)\n\nResourceRefItem = """\n > # big endian\n id: h\n nameOffset: h\n attr: B\n dataOffset: 3s\n reserved: L\n"""\n\nResourceRefItemSize = sstruct.calcsize(ResourceRefItem)\n
|
.venv\Lib\site-packages\fontTools\misc\macRes.py
|
macRes.py
|
Python
| 8,840 | 0.95 | 0.199234 | 0.018349 |
node-utils
| 232 |
2024-10-27T18:18:20.763836
|
GPL-3.0
| false |
6464152c50e32a801ac6be44d8055047
|
"""psCharStrings.py -- module implementing various kinds of CharStrings:\nCFF dictionary data and Type1/Type2 CharStrings.\n"""\n\nfrom fontTools.misc.fixedTools import (\n fixedToFloat,\n floatToFixed,\n floatToFixedToStr,\n strToFixedToFloat,\n)\nfrom fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin\nfrom fontTools.pens.boundsPen import BoundsPen\nimport struct\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\n\ndef read_operator(self, b0, data, index):\n if b0 == 12:\n op = (b0, byteord(data[index]))\n index = index + 1\n else:\n op = b0\n try:\n operator = self.operators[op]\n except KeyError:\n return None, index\n value = self.handle_operator(operator)\n return value, index\n\n\ndef read_byte(self, b0, data, index):\n return b0 - 139, index\n\n\ndef read_smallInt1(self, b0, data, index):\n b1 = byteord(data[index])\n return (b0 - 247) * 256 + b1 + 108, index + 1\n\n\ndef read_smallInt2(self, b0, data, index):\n b1 = byteord(data[index])\n return -(b0 - 251) * 256 - b1 - 108, index + 1\n\n\ndef read_shortInt(self, b0, data, index):\n (value,) = struct.unpack(">h", data[index : index + 2])\n return value, index + 2\n\n\ndef read_longInt(self, b0, data, index):\n (value,) = struct.unpack(">l", data[index : index + 4])\n return value, index + 4\n\n\ndef read_fixed1616(self, b0, data, index):\n (value,) = struct.unpack(">l", data[index : index + 4])\n return fixedToFloat(value, precisionBits=16), index + 4\n\n\ndef read_reserved(self, b0, data, index):\n assert NotImplementedError\n return NotImplemented, index\n\n\ndef read_realNumber(self, b0, data, index):\n number = ""\n while True:\n b = byteord(data[index])\n index = index + 1\n nibble0 = (b & 0xF0) >> 4\n nibble1 = b & 0x0F\n if nibble0 == 0xF:\n break\n number = number + realNibbles[nibble0]\n if nibble1 == 0xF:\n break\n number = number + realNibbles[nibble1]\n return float(number), index\n\n\nt1OperandEncoding = [None] * 256\nt1OperandEncoding[0:32] = (32) * [read_operator]\nt1OperandEncoding[32:247] = (247 - 32) * [read_byte]\nt1OperandEncoding[247:251] = (251 - 247) * [read_smallInt1]\nt1OperandEncoding[251:255] = (255 - 251) * [read_smallInt2]\nt1OperandEncoding[255] = read_longInt\nassert len(t1OperandEncoding) == 256\n\nt2OperandEncoding = t1OperandEncoding[:]\nt2OperandEncoding[28] = read_shortInt\nt2OperandEncoding[255] = read_fixed1616\n\ncffDictOperandEncoding = t2OperandEncoding[:]\ncffDictOperandEncoding[29] = read_longInt\ncffDictOperandEncoding[30] = read_realNumber\ncffDictOperandEncoding[255] = read_reserved\n\n\nrealNibbles = [\n "0",\n "1",\n "2",\n "3",\n "4",\n "5",\n "6",\n "7",\n "8",\n "9",\n ".",\n "E",\n "E-",\n None,\n "-",\n]\nrealNibblesDict = {v: i for i, v in enumerate(realNibbles)}\n\nmaxOpStack = 193\n\n\ndef buildOperatorDict(operatorList):\n oper = {}\n opc = {}\n for item in operatorList:\n if len(item) == 2:\n oper[item[0]] = item[1]\n else:\n oper[item[0]] = item[1:]\n if isinstance(item[0], tuple):\n opc[item[1]] = item[0]\n else:\n opc[item[1]] = (item[0],)\n return oper, opc\n\n\nt2Operators = [\n # opcode name\n (1, "hstem"),\n (3, "vstem"),\n (4, "vmoveto"),\n (5, "rlineto"),\n (6, "hlineto"),\n (7, "vlineto"),\n (8, "rrcurveto"),\n (10, "callsubr"),\n (11, "return"),\n (14, "endchar"),\n (15, "vsindex"),\n (16, "blend"),\n (18, "hstemhm"),\n (19, "hintmask"),\n (20, "cntrmask"),\n (21, "rmoveto"),\n (22, "hmoveto"),\n (23, "vstemhm"),\n (24, "rcurveline"),\n (25, "rlinecurve"),\n (26, "vvcurveto"),\n (27, "hhcurveto"),\n # (28, 'shortint'), # not really an operator\n (29, "callgsubr"),\n (30, "vhcurveto"),\n (31, "hvcurveto"),\n ((12, 0), "ignore"), # dotsection. Yes, there a few very early OTF/CFF\n # fonts with this deprecated operator. Just ignore it.\n ((12, 3), "and"),\n ((12, 4), "or"),\n ((12, 5), "not"),\n ((12, 8), "store"),\n ((12, 9), "abs"),\n ((12, 10), "add"),\n ((12, 11), "sub"),\n ((12, 12), "div"),\n ((12, 13), "load"),\n ((12, 14), "neg"),\n ((12, 15), "eq"),\n ((12, 18), "drop"),\n ((12, 20), "put"),\n ((12, 21), "get"),\n ((12, 22), "ifelse"),\n ((12, 23), "random"),\n ((12, 24), "mul"),\n ((12, 26), "sqrt"),\n ((12, 27), "dup"),\n ((12, 28), "exch"),\n ((12, 29), "index"),\n ((12, 30), "roll"),\n ((12, 34), "hflex"),\n ((12, 35), "flex"),\n ((12, 36), "hflex1"),\n ((12, 37), "flex1"),\n]\n\n\ndef getIntEncoder(format):\n if format == "cff":\n twoByteOp = bytechr(28)\n fourByteOp = bytechr(29)\n elif format == "t1":\n twoByteOp = None\n fourByteOp = bytechr(255)\n else:\n assert format == "t2"\n twoByteOp = bytechr(28)\n fourByteOp = None\n\n def encodeInt(\n value,\n fourByteOp=fourByteOp,\n bytechr=bytechr,\n pack=struct.pack,\n unpack=struct.unpack,\n twoByteOp=twoByteOp,\n ):\n if -107 <= value <= 107:\n code = bytechr(value + 139)\n elif 108 <= value <= 1131:\n value = value - 108\n code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF)\n elif -1131 <= value <= -108:\n value = -value - 108\n code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF)\n elif twoByteOp is not None and -32768 <= value <= 32767:\n code = twoByteOp + pack(">h", value)\n elif fourByteOp is None:\n # Backwards compatible hack: due to a previous bug in FontTools,\n # 16.16 fixed numbers were written out as 4-byte ints. When\n # these numbers were small, they were wrongly written back as\n # small ints instead of 4-byte ints, breaking round-tripping.\n # This here workaround doesn't do it any better, since we can't\n # distinguish anymore between small ints that were supposed to\n # be small fixed numbers and small ints that were just small\n # ints. Hence the warning.\n log.warning(\n "4-byte T2 number got passed to the "\n "IntType handler. This should happen only when reading in "\n "old XML files.\n"\n )\n code = bytechr(255) + pack(">l", value)\n else:\n code = fourByteOp + pack(">l", value)\n return code\n\n return encodeInt\n\n\nencodeIntCFF = getIntEncoder("cff")\nencodeIntT1 = getIntEncoder("t1")\nencodeIntT2 = getIntEncoder("t2")\n\n\ndef encodeFixed(f, pack=struct.pack):\n """For T2 only"""\n value = floatToFixed(f, precisionBits=16)\n if value & 0xFFFF == 0: # check if the fractional part is zero\n return encodeIntT2(value >> 16) # encode only the integer part\n else:\n return b"\xff" + pack(">l", value) # encode the entire fixed point value\n\n\nrealZeroBytes = bytechr(30) + bytechr(0xF)\n\n\ndef encodeFloat(f):\n # For CFF only, used in cffLib\n if f == 0.0: # 0.0 == +0.0 == -0.0\n return realZeroBytes\n # Note: 14 decimal digits seems to be the limitation for CFF real numbers\n # in macOS. However, we use 8 here to match the implementation of AFDKO.\n s = "%.8G" % f\n if s[:2] == "0.":\n s = s[1:]\n elif s[:3] == "-0.":\n s = "-" + s[2:]\n elif s.endswith("000"):\n significantDigits = s.rstrip("0")\n s = "%sE%d" % (significantDigits, len(s) - len(significantDigits))\n else:\n dotIndex = s.find(".")\n eIndex = s.find("E")\n if dotIndex != -1 and eIndex != -1:\n integerPart = s[:dotIndex]\n fractionalPart = s[dotIndex + 1 : eIndex]\n exponent = int(s[eIndex + 1 :])\n newExponent = exponent - len(fractionalPart)\n if newExponent == 1:\n s = "%s%s0" % (integerPart, fractionalPart)\n else:\n s = "%s%sE%d" % (integerPart, fractionalPart, newExponent)\n if s.startswith((".0", "-.0")):\n sign, s = s.split(".", 1)\n s = "%s%sE-%d" % (sign, s.lstrip("0"), len(s))\n nibbles = []\n while s:\n c = s[0]\n s = s[1:]\n if c == "E":\n c2 = s[:1]\n if c2 == "-":\n s = s[1:]\n c = "E-"\n elif c2 == "+":\n s = s[1:]\n if s.startswith("0"):\n s = s[1:]\n nibbles.append(realNibblesDict[c])\n nibbles.append(0xF)\n if len(nibbles) % 2:\n nibbles.append(0xF)\n d = bytechr(30)\n for i in range(0, len(nibbles), 2):\n d = d + bytechr(nibbles[i] << 4 | nibbles[i + 1])\n return d\n\n\nclass CharStringCompileError(Exception):\n pass\n\n\nclass SimpleT2Decompiler(object):\n def __init__(self, localSubrs, globalSubrs, private=None, blender=None):\n self.localSubrs = localSubrs\n self.localBias = calcSubrBias(localSubrs)\n self.globalSubrs = globalSubrs\n self.globalBias = calcSubrBias(globalSubrs)\n self.private = private\n self.blender = blender\n self.reset()\n\n def reset(self):\n self.callingStack = []\n self.operandStack = []\n self.hintCount = 0\n self.hintMaskBytes = 0\n self.numRegions = 0\n self.vsIndex = 0\n\n def execute(self, charString):\n self.callingStack.append(charString)\n needsDecompilation = charString.needsDecompilation()\n if needsDecompilation:\n program = []\n pushToProgram = program.append\n else:\n pushToProgram = lambda x: None\n pushToStack = self.operandStack.append\n index = 0\n while True:\n token, isOperator, index = charString.getToken(index)\n if token is None:\n break # we're done!\n pushToProgram(token)\n if isOperator:\n handlerName = "op_" + token\n handler = getattr(self, handlerName, None)\n if handler is not None:\n rv = handler(index)\n if rv:\n hintMaskBytes, index = rv\n pushToProgram(hintMaskBytes)\n else:\n self.popall()\n else:\n pushToStack(token)\n if needsDecompilation:\n charString.setProgram(program)\n del self.callingStack[-1]\n\n def pop(self):\n value = self.operandStack[-1]\n del self.operandStack[-1]\n return value\n\n def popall(self):\n stack = self.operandStack[:]\n self.operandStack[:] = []\n return stack\n\n def push(self, value):\n self.operandStack.append(value)\n\n def op_return(self, index):\n if self.operandStack:\n pass\n\n def op_endchar(self, index):\n pass\n\n def op_ignore(self, index):\n pass\n\n def op_callsubr(self, index):\n subrIndex = self.pop()\n subr = self.localSubrs[subrIndex + self.localBias]\n self.execute(subr)\n\n def op_callgsubr(self, index):\n subrIndex = self.pop()\n subr = self.globalSubrs[subrIndex + self.globalBias]\n self.execute(subr)\n\n def op_hstem(self, index):\n self.countHints()\n\n def op_vstem(self, index):\n self.countHints()\n\n def op_hstemhm(self, index):\n self.countHints()\n\n def op_vstemhm(self, index):\n self.countHints()\n\n def op_hintmask(self, index):\n if not self.hintMaskBytes:\n self.countHints()\n self.hintMaskBytes = (self.hintCount + 7) // 8\n hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes)\n return hintMaskBytes, index\n\n op_cntrmask = op_hintmask\n\n def countHints(self):\n args = self.popall()\n self.hintCount = self.hintCount + len(args) // 2\n\n # misc\n def op_and(self, index):\n raise NotImplementedError\n\n def op_or(self, index):\n raise NotImplementedError\n\n def op_not(self, index):\n raise NotImplementedError\n\n def op_store(self, index):\n raise NotImplementedError\n\n def op_abs(self, index):\n raise NotImplementedError\n\n def op_add(self, index):\n raise NotImplementedError\n\n def op_sub(self, index):\n raise NotImplementedError\n\n def op_div(self, index):\n raise NotImplementedError\n\n def op_load(self, index):\n raise NotImplementedError\n\n def op_neg(self, index):\n raise NotImplementedError\n\n def op_eq(self, index):\n raise NotImplementedError\n\n def op_drop(self, index):\n raise NotImplementedError\n\n def op_put(self, index):\n raise NotImplementedError\n\n def op_get(self, index):\n raise NotImplementedError\n\n def op_ifelse(self, index):\n raise NotImplementedError\n\n def op_random(self, index):\n raise NotImplementedError\n\n def op_mul(self, index):\n raise NotImplementedError\n\n def op_sqrt(self, index):\n raise NotImplementedError\n\n def op_dup(self, index):\n raise NotImplementedError\n\n def op_exch(self, index):\n raise NotImplementedError\n\n def op_index(self, index):\n raise NotImplementedError\n\n def op_roll(self, index):\n raise NotImplementedError\n\n def op_blend(self, index):\n if self.numRegions == 0:\n self.numRegions = self.private.getNumRegions()\n numBlends = self.pop()\n numOps = numBlends * (self.numRegions + 1)\n if self.blender is None:\n del self.operandStack[\n -(numOps - numBlends) :\n ] # Leave the default operands on the stack.\n else:\n argi = len(self.operandStack) - numOps\n end_args = tuplei = argi + numBlends\n while argi < end_args:\n next_ti = tuplei + self.numRegions\n deltas = self.operandStack[tuplei:next_ti]\n delta = self.blender(self.vsIndex, deltas)\n self.operandStack[argi] += delta\n tuplei = next_ti\n argi += 1\n self.operandStack[end_args:] = []\n\n def op_vsindex(self, index):\n vi = self.pop()\n self.vsIndex = vi\n self.numRegions = self.private.getNumRegions(vi)\n\n\nt1Operators = [\n # opcode name\n (1, "hstem"),\n (3, "vstem"),\n (4, "vmoveto"),\n (5, "rlineto"),\n (6, "hlineto"),\n (7, "vlineto"),\n (8, "rrcurveto"),\n (9, "closepath"),\n (10, "callsubr"),\n (11, "return"),\n (13, "hsbw"),\n (14, "endchar"),\n (21, "rmoveto"),\n (22, "hmoveto"),\n (30, "vhcurveto"),\n (31, "hvcurveto"),\n ((12, 0), "dotsection"),\n ((12, 1), "vstem3"),\n ((12, 2), "hstem3"),\n ((12, 6), "seac"),\n ((12, 7), "sbw"),\n ((12, 12), "div"),\n ((12, 16), "callothersubr"),\n ((12, 17), "pop"),\n ((12, 33), "setcurrentpoint"),\n]\n\n\nclass T2WidthExtractor(SimpleT2Decompiler):\n def __init__(\n self,\n localSubrs,\n globalSubrs,\n nominalWidthX,\n defaultWidthX,\n private=None,\n blender=None,\n ):\n SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private, blender)\n self.nominalWidthX = nominalWidthX\n self.defaultWidthX = defaultWidthX\n\n def reset(self):\n SimpleT2Decompiler.reset(self)\n self.gotWidth = 0\n self.width = 0\n\n def popallWidth(self, evenOdd=0):\n args = self.popall()\n if not self.gotWidth:\n if evenOdd ^ (len(args) % 2):\n # For CFF2 charstrings, this should never happen\n assert (\n self.defaultWidthX is not None\n ), "CFF2 CharStrings must not have an initial width value"\n self.width = self.nominalWidthX + args[0]\n args = args[1:]\n else:\n self.width = self.defaultWidthX\n self.gotWidth = 1\n return args\n\n def countHints(self):\n args = self.popallWidth()\n self.hintCount = self.hintCount + len(args) // 2\n\n def op_rmoveto(self, index):\n self.popallWidth()\n\n def op_hmoveto(self, index):\n self.popallWidth(1)\n\n def op_vmoveto(self, index):\n self.popallWidth(1)\n\n def op_endchar(self, index):\n self.popallWidth()\n\n\nclass T2OutlineExtractor(T2WidthExtractor):\n def __init__(\n self,\n pen,\n localSubrs,\n globalSubrs,\n nominalWidthX,\n defaultWidthX,\n private=None,\n blender=None,\n ):\n T2WidthExtractor.__init__(\n self,\n localSubrs,\n globalSubrs,\n nominalWidthX,\n defaultWidthX,\n private,\n blender,\n )\n self.pen = pen\n self.subrLevel = 0\n\n def reset(self):\n T2WidthExtractor.reset(self)\n self.currentPoint = (0, 0)\n self.sawMoveTo = 0\n self.subrLevel = 0\n\n def execute(self, charString):\n self.subrLevel += 1\n super().execute(charString)\n self.subrLevel -= 1\n if self.subrLevel == 0:\n self.endPath()\n\n def _nextPoint(self, point):\n x, y = self.currentPoint\n point = x + point[0], y + point[1]\n self.currentPoint = point\n return point\n\n def rMoveTo(self, point):\n self.pen.moveTo(self._nextPoint(point))\n self.sawMoveTo = 1\n\n def rLineTo(self, point):\n if not self.sawMoveTo:\n self.rMoveTo((0, 0))\n self.pen.lineTo(self._nextPoint(point))\n\n def rCurveTo(self, pt1, pt2, pt3):\n if not self.sawMoveTo:\n self.rMoveTo((0, 0))\n nextPoint = self._nextPoint\n self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3))\n\n def closePath(self):\n if self.sawMoveTo:\n self.pen.closePath()\n self.sawMoveTo = 0\n\n def endPath(self):\n # In T2 there are no open paths, so always do a closePath when\n # finishing a sub path. We avoid spurious calls to closePath()\n # because its a real T1 op we're emulating in T2 whereas\n # endPath() is just a means to that emulation\n if self.sawMoveTo:\n self.closePath()\n\n #\n # hint operators\n #\n # def op_hstem(self, index):\n # self.countHints()\n # def op_vstem(self, index):\n # self.countHints()\n # def op_hstemhm(self, index):\n # self.countHints()\n # def op_vstemhm(self, index):\n # self.countHints()\n # def op_hintmask(self, index):\n # self.countHints()\n # def op_cntrmask(self, index):\n # self.countHints()\n\n #\n # path constructors, moveto\n #\n def op_rmoveto(self, index):\n self.endPath()\n self.rMoveTo(self.popallWidth())\n\n def op_hmoveto(self, index):\n self.endPath()\n self.rMoveTo((self.popallWidth(1)[0], 0))\n\n def op_vmoveto(self, index):\n self.endPath()\n self.rMoveTo((0, self.popallWidth(1)[0]))\n\n def op_endchar(self, index):\n self.endPath()\n args = self.popallWidth()\n if args:\n from fontTools.encodings.StandardEncoding import StandardEncoding\n\n # endchar can do seac accent bulding; The T2 spec says it's deprecated,\n # but recent software that shall remain nameless does output it.\n adx, ady, bchar, achar = args\n baseGlyph = StandardEncoding[bchar]\n self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))\n accentGlyph = StandardEncoding[achar]\n self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))\n\n #\n # path constructors, lines\n #\n def op_rlineto(self, index):\n args = self.popall()\n for i in range(0, len(args), 2):\n point = args[i : i + 2]\n self.rLineTo(point)\n\n def op_hlineto(self, index):\n self.alternatingLineto(1)\n\n def op_vlineto(self, index):\n self.alternatingLineto(0)\n\n #\n # path constructors, curves\n #\n def op_rrcurveto(self, index):\n """{dxa dya dxb dyb dxc dyc}+ rrcurveto"""\n args = self.popall()\n for i in range(0, len(args), 6):\n (\n dxa,\n dya,\n dxb,\n dyb,\n dxc,\n dyc,\n ) = args[i : i + 6]\n self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc))\n\n def op_rcurveline(self, index):\n """{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline"""\n args = self.popall()\n for i in range(0, len(args) - 2, 6):\n dxb, dyb, dxc, dyc, dxd, dyd = args[i : i + 6]\n self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))\n self.rLineTo(args[-2:])\n\n def op_rlinecurve(self, index):\n """{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve"""\n args = self.popall()\n lineArgs = args[:-6]\n for i in range(0, len(lineArgs), 2):\n self.rLineTo(lineArgs[i : i + 2])\n dxb, dyb, dxc, dyc, dxd, dyd = args[-6:]\n self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))\n\n def op_vvcurveto(self, index):\n "dx1? {dya dxb dyb dyc}+ vvcurveto"\n args = self.popall()\n if len(args) % 2:\n dx1 = args[0]\n args = args[1:]\n else:\n dx1 = 0\n for i in range(0, len(args), 4):\n dya, dxb, dyb, dyc = args[i : i + 4]\n self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc))\n dx1 = 0\n\n def op_hhcurveto(self, index):\n """dy1? {dxa dxb dyb dxc}+ hhcurveto"""\n args = self.popall()\n if len(args) % 2:\n dy1 = args[0]\n args = args[1:]\n else:\n dy1 = 0\n for i in range(0, len(args), 4):\n dxa, dxb, dyb, dxc = args[i : i + 4]\n self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0))\n dy1 = 0\n\n def op_vhcurveto(self, index):\n """dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30)\n {dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto\n """\n args = self.popall()\n while args:\n args = self.vcurveto(args)\n if args:\n args = self.hcurveto(args)\n\n def op_hvcurveto(self, index):\n """dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf?\n {dxa dxb dyb dyc dyd dxe dye dxf}+ dyf?\n """\n args = self.popall()\n while args:\n args = self.hcurveto(args)\n if args:\n args = self.vcurveto(args)\n\n #\n # path constructors, flex\n #\n def op_hflex(self, index):\n dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall()\n dy1 = dy3 = dy4 = dy6 = 0\n dy5 = -dy2\n self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))\n self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))\n\n def op_flex(self, index):\n dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall()\n self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))\n self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))\n\n def op_hflex1(self, index):\n dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall()\n dy3 = dy4 = 0\n dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5)\n\n self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))\n self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))\n\n def op_flex1(self, index):\n dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall()\n dx = dx1 + dx2 + dx3 + dx4 + dx5\n dy = dy1 + dy2 + dy3 + dy4 + dy5\n if abs(dx) > abs(dy):\n dx6 = d6\n dy6 = -dy\n else:\n dx6 = -dx\n dy6 = d6\n self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))\n self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))\n\n # misc\n def op_and(self, index):\n raise NotImplementedError\n\n def op_or(self, index):\n raise NotImplementedError\n\n def op_not(self, index):\n raise NotImplementedError\n\n def op_store(self, index):\n raise NotImplementedError\n\n def op_abs(self, index):\n raise NotImplementedError\n\n def op_add(self, index):\n raise NotImplementedError\n\n def op_sub(self, index):\n raise NotImplementedError\n\n def op_div(self, index):\n num2 = self.pop()\n num1 = self.pop()\n d1 = num1 // num2\n d2 = num1 / num2\n if d1 == d2:\n self.push(d1)\n else:\n self.push(d2)\n\n def op_load(self, index):\n raise NotImplementedError\n\n def op_neg(self, index):\n raise NotImplementedError\n\n def op_eq(self, index):\n raise NotImplementedError\n\n def op_drop(self, index):\n raise NotImplementedError\n\n def op_put(self, index):\n raise NotImplementedError\n\n def op_get(self, index):\n raise NotImplementedError\n\n def op_ifelse(self, index):\n raise NotImplementedError\n\n def op_random(self, index):\n raise NotImplementedError\n\n def op_mul(self, index):\n raise NotImplementedError\n\n def op_sqrt(self, index):\n raise NotImplementedError\n\n def op_dup(self, index):\n raise NotImplementedError\n\n def op_exch(self, index):\n raise NotImplementedError\n\n def op_index(self, index):\n raise NotImplementedError\n\n def op_roll(self, index):\n raise NotImplementedError\n\n #\n # miscellaneous helpers\n #\n def alternatingLineto(self, isHorizontal):\n args = self.popall()\n for arg in args:\n if isHorizontal:\n point = (arg, 0)\n else:\n point = (0, arg)\n self.rLineTo(point)\n isHorizontal = not isHorizontal\n\n def vcurveto(self, args):\n dya, dxb, dyb, dxc = args[:4]\n args = args[4:]\n if len(args) == 1:\n dyc = args[0]\n args = []\n else:\n dyc = 0\n self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc))\n return args\n\n def hcurveto(self, args):\n dxa, dxb, dyb, dyc = args[:4]\n args = args[4:]\n if len(args) == 1:\n dxc = args[0]\n args = []\n else:\n dxc = 0\n self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc))\n return args\n\n\nclass T1OutlineExtractor(T2OutlineExtractor):\n def __init__(self, pen, subrs):\n self.pen = pen\n self.subrs = subrs\n self.reset()\n\n def reset(self):\n self.flexing = 0\n self.width = 0\n self.sbx = 0\n T2OutlineExtractor.reset(self)\n\n def endPath(self):\n if self.sawMoveTo:\n self.pen.endPath()\n self.sawMoveTo = 0\n\n def popallWidth(self, evenOdd=0):\n return self.popall()\n\n def exch(self):\n stack = self.operandStack\n stack[-1], stack[-2] = stack[-2], stack[-1]\n\n #\n # path constructors\n #\n def op_rmoveto(self, index):\n if self.flexing:\n return\n self.endPath()\n self.rMoveTo(self.popall())\n\n def op_hmoveto(self, index):\n if self.flexing:\n # We must add a parameter to the stack if we are flexing\n self.push(0)\n return\n self.endPath()\n self.rMoveTo((self.popall()[0], 0))\n\n def op_vmoveto(self, index):\n if self.flexing:\n # We must add a parameter to the stack if we are flexing\n self.push(0)\n self.exch()\n return\n self.endPath()\n self.rMoveTo((0, self.popall()[0]))\n\n def op_closepath(self, index):\n self.closePath()\n\n def op_setcurrentpoint(self, index):\n args = self.popall()\n x, y = args\n self.currentPoint = x, y\n\n def op_endchar(self, index):\n self.endPath()\n\n def op_hsbw(self, index):\n sbx, wx = self.popall()\n self.width = wx\n self.sbx = sbx\n self.currentPoint = sbx, self.currentPoint[1]\n\n def op_sbw(self, index):\n self.popall() # XXX\n\n #\n def op_callsubr(self, index):\n subrIndex = self.pop()\n subr = self.subrs[subrIndex]\n self.execute(subr)\n\n def op_callothersubr(self, index):\n subrIndex = self.pop()\n nArgs = self.pop()\n # print nArgs, subrIndex, "callothersubr"\n if subrIndex == 0 and nArgs == 3:\n self.doFlex()\n self.flexing = 0\n elif subrIndex == 1 and nArgs == 0:\n self.flexing = 1\n # ignore...\n\n def op_pop(self, index):\n pass # ignore...\n\n def doFlex(self):\n finaly = self.pop()\n finalx = self.pop()\n self.pop() # flex height is unused\n\n p3y = self.pop()\n p3x = self.pop()\n bcp4y = self.pop()\n bcp4x = self.pop()\n bcp3y = self.pop()\n bcp3x = self.pop()\n p2y = self.pop()\n p2x = self.pop()\n bcp2y = self.pop()\n bcp2x = self.pop()\n bcp1y = self.pop()\n bcp1x = self.pop()\n rpy = self.pop()\n rpx = self.pop()\n\n # call rrcurveto\n self.push(bcp1x + rpx)\n self.push(bcp1y + rpy)\n self.push(bcp2x)\n self.push(bcp2y)\n self.push(p2x)\n self.push(p2y)\n self.op_rrcurveto(None)\n\n # call rrcurveto\n self.push(bcp3x)\n self.push(bcp3y)\n self.push(bcp4x)\n self.push(bcp4y)\n self.push(p3x)\n self.push(p3y)\n self.op_rrcurveto(None)\n\n # Push back final coords so subr 0 can find them\n self.push(finalx)\n self.push(finaly)\n\n def op_dotsection(self, index):\n self.popall() # XXX\n\n def op_hstem3(self, index):\n self.popall() # XXX\n\n def op_seac(self, index):\n "asb adx ady bchar achar seac"\n from fontTools.encodings.StandardEncoding import StandardEncoding\n\n asb, adx, ady, bchar, achar = self.popall()\n baseGlyph = StandardEncoding[bchar]\n self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))\n accentGlyph = StandardEncoding[achar]\n adx = adx + self.sbx - asb # seac weirdness\n self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))\n\n def op_vstem3(self, index):\n self.popall() # XXX\n\n\nclass T2CharString(object):\n operandEncoding = t2OperandEncoding\n operators, opcodes = buildOperatorDict(t2Operators)\n decompilerClass = SimpleT2Decompiler\n outlineExtractor = T2OutlineExtractor\n\n def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None):\n if program is None:\n program = []\n self.bytecode = bytecode\n self.program = program\n self.private = private\n self.globalSubrs = globalSubrs if globalSubrs is not None else []\n self._cur_vsindex = None\n\n def getNumRegions(self, vsindex=None):\n pd = self.private\n assert pd is not None\n if vsindex is not None:\n self._cur_vsindex = vsindex\n elif self._cur_vsindex is None:\n self._cur_vsindex = pd.vsindex if hasattr(pd, "vsindex") else 0\n return pd.getNumRegions(self._cur_vsindex)\n\n def __repr__(self):\n if self.bytecode is None:\n return "<%s (source) at %x>" % (self.__class__.__name__, id(self))\n else:\n return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self))\n\n def getIntEncoder(self):\n return encodeIntT2\n\n def getFixedEncoder(self):\n return encodeFixed\n\n def decompile(self):\n if not self.needsDecompilation():\n return\n subrs = getattr(self.private, "Subrs", [])\n decompiler = self.decompilerClass(subrs, self.globalSubrs, self.private)\n decompiler.execute(self)\n\n def draw(self, pen, blender=None):\n subrs = getattr(self.private, "Subrs", [])\n extractor = self.outlineExtractor(\n pen,\n subrs,\n self.globalSubrs,\n self.private.nominalWidthX,\n self.private.defaultWidthX,\n self.private,\n blender,\n )\n extractor.execute(self)\n self.width = extractor.width\n\n def calcBounds(self, glyphSet):\n boundsPen = BoundsPen(glyphSet)\n self.draw(boundsPen)\n return boundsPen.bounds\n\n def compile(self, isCFF2=False):\n if self.bytecode is not None:\n return\n opcodes = self.opcodes\n program = self.program\n\n if isCFF2:\n # If present, remove return and endchar operators.\n if program and program[-1] in ("return", "endchar"):\n program = program[:-1]\n elif program and not isinstance(program[-1], str):\n raise CharStringCompileError(\n "T2CharString or Subr has items on the stack after last operator."\n )\n\n bytecode = []\n encodeInt = self.getIntEncoder()\n encodeFixed = self.getFixedEncoder()\n i = 0\n end = len(program)\n while i < end:\n token = program[i]\n i = i + 1\n if isinstance(token, str):\n try:\n bytecode.extend(bytechr(b) for b in opcodes[token])\n except KeyError:\n raise CharStringCompileError("illegal operator: %s" % token)\n if token in ("hintmask", "cntrmask"):\n bytecode.append(program[i]) # hint mask\n i = i + 1\n elif isinstance(token, int):\n bytecode.append(encodeInt(token))\n elif isinstance(token, float):\n bytecode.append(encodeFixed(token))\n else:\n assert 0, "unsupported type: %s" % type(token)\n try:\n bytecode = bytesjoin(bytecode)\n except TypeError:\n log.error(bytecode)\n raise\n self.setBytecode(bytecode)\n\n def needsDecompilation(self):\n return self.bytecode is not None\n\n def setProgram(self, program):\n self.program = program\n self.bytecode = None\n\n def setBytecode(self, bytecode):\n self.bytecode = bytecode\n self.program = None\n\n def getToken(self, index, len=len, byteord=byteord, isinstance=isinstance):\n if self.bytecode is not None:\n if index >= len(self.bytecode):\n return None, 0, 0\n b0 = byteord(self.bytecode[index])\n index = index + 1\n handler = self.operandEncoding[b0]\n token, index = handler(self, b0, self.bytecode, index)\n else:\n if index >= len(self.program):\n return None, 0, 0\n token = self.program[index]\n index = index + 1\n isOperator = isinstance(token, str)\n return token, isOperator, index\n\n def getBytes(self, index, nBytes):\n if self.bytecode is not None:\n newIndex = index + nBytes\n bytes = self.bytecode[index:newIndex]\n index = newIndex\n else:\n bytes = self.program[index]\n index = index + 1\n assert len(bytes) == nBytes\n return bytes, index\n\n def handle_operator(self, operator):\n return operator\n\n def toXML(self, xmlWriter, ttFont=None):\n from fontTools.misc.textTools import num2binary\n\n if self.bytecode is not None:\n xmlWriter.dumphex(self.bytecode)\n else:\n index = 0\n args = []\n while True:\n token, isOperator, index = self.getToken(index)\n if token is None:\n break\n if isOperator:\n if token in ("hintmask", "cntrmask"):\n hintMask, isOperator, index = self.getToken(index)\n bits = []\n for byte in hintMask:\n bits.append(num2binary(byteord(byte), 8))\n hintMask = strjoin(bits)\n line = " ".join(args + [token, hintMask])\n else:\n line = " ".join(args + [token])\n xmlWriter.write(line)\n xmlWriter.newline()\n args = []\n else:\n if isinstance(token, float):\n token = floatToFixedToStr(token, precisionBits=16)\n else:\n token = str(token)\n args.append(token)\n if args:\n # NOTE: only CFF2 charstrings/subrs can have numeric arguments on\n # the stack after the last operator. Compiling this would fail if\n # this is part of CFF 1.0 table.\n line = " ".join(args)\n xmlWriter.write(line)\n\n def fromXML(self, name, attrs, content):\n from fontTools.misc.textTools import binary2num, readHex\n\n if attrs.get("raw"):\n self.setBytecode(readHex(content))\n return\n content = strjoin(content)\n content = content.split()\n program = []\n end = len(content)\n i = 0\n while i < end:\n token = content[i]\n i = i + 1\n try:\n token = int(token)\n except ValueError:\n try:\n token = strToFixedToFloat(token, precisionBits=16)\n except ValueError:\n program.append(token)\n if token in ("hintmask", "cntrmask"):\n mask = content[i]\n maskBytes = b""\n for j in range(0, len(mask), 8):\n maskBytes = maskBytes + bytechr(binary2num(mask[j : j + 8]))\n program.append(maskBytes)\n i = i + 1\n else:\n program.append(token)\n else:\n program.append(token)\n self.setProgram(program)\n\n\nclass T1CharString(T2CharString):\n operandEncoding = t1OperandEncoding\n operators, opcodes = buildOperatorDict(t1Operators)\n\n def __init__(self, bytecode=None, program=None, subrs=None):\n super().__init__(bytecode, program)\n self.subrs = subrs\n\n def getIntEncoder(self):\n return encodeIntT1\n\n def getFixedEncoder(self):\n def encodeFixed(value):\n raise TypeError("Type 1 charstrings don't support floating point operands")\n\n def decompile(self):\n if self.bytecode is None:\n return\n program = []\n index = 0\n while True:\n token, isOperator, index = self.getToken(index)\n if token is None:\n break\n program.append(token)\n self.setProgram(program)\n\n def draw(self, pen):\n extractor = T1OutlineExtractor(pen, self.subrs)\n extractor.execute(self)\n self.width = extractor.width\n\n\nclass DictDecompiler(object):\n operandEncoding = cffDictOperandEncoding\n\n def __init__(self, strings, parent=None):\n self.stack = []\n self.strings = strings\n self.dict = {}\n self.parent = parent\n\n def getDict(self):\n assert len(self.stack) == 0, "non-empty stack"\n return self.dict\n\n def decompile(self, data):\n index = 0\n lenData = len(data)\n push = self.stack.append\n while index < lenData:\n b0 = byteord(data[index])\n index = index + 1\n handler = self.operandEncoding[b0]\n value, index = handler(self, b0, data, index)\n if value is not None:\n push(value)\n\n def pop(self):\n value = self.stack[-1]\n del self.stack[-1]\n return value\n\n def popall(self):\n args = self.stack[:]\n del self.stack[:]\n return args\n\n def handle_operator(self, operator):\n operator, argType = operator\n if isinstance(argType, tuple):\n value = ()\n for i in range(len(argType) - 1, -1, -1):\n arg = argType[i]\n arghandler = getattr(self, "arg_" + arg)\n value = (arghandler(operator),) + value\n else:\n arghandler = getattr(self, "arg_" + argType)\n value = arghandler(operator)\n if operator == "blend":\n self.stack.extend(value)\n else:\n self.dict[operator] = value\n\n def arg_number(self, name):\n if isinstance(self.stack[0], list):\n out = self.arg_blend_number(self.stack)\n else:\n out = self.pop()\n return out\n\n def arg_blend_number(self, name):\n out = []\n blendArgs = self.pop()\n numMasters = len(blendArgs)\n out.append(blendArgs)\n out.append("blend")\n dummy = self.popall()\n return blendArgs\n\n def arg_SID(self, name):\n return self.strings[self.pop()]\n\n def arg_array(self, name):\n return self.popall()\n\n def arg_blendList(self, name):\n """\n There may be non-blend args at the top of the stack. We first calculate\n where the blend args start in the stack. These are the last\n numMasters*numBlends) +1 args.\n The blend args starts with numMasters relative coordinate values, the BlueValues in the list from the default master font. This is followed by\n numBlends list of values. Each of value in one of these lists is the\n Variable Font delta for the matching region.\n\n We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by\n the delta values. We then convert the default values, the first item in each entry, to an absolute value.\n """\n vsindex = self.dict.get("vsindex", 0)\n numMasters = (\n self.parent.getNumRegions(vsindex) + 1\n ) # only a PrivateDict has blended ops.\n numBlends = self.pop()\n args = self.popall()\n numArgs = len(args)\n # The spec says that there should be no non-blended Blue Values,.\n assert numArgs == numMasters * numBlends\n value = [None] * numBlends\n numDeltas = numMasters - 1\n i = 0\n prevVal = 0\n while i < numBlends:\n newVal = args[i] + prevVal\n prevVal = newVal\n masterOffset = numBlends + (i * numDeltas)\n blendList = [newVal] + args[masterOffset : masterOffset + numDeltas]\n value[i] = blendList\n i += 1\n return value\n\n def arg_delta(self, name):\n valueList = self.popall()\n out = []\n if valueList and isinstance(valueList[0], list):\n # arg_blendList() has already converted these to absolute values.\n out = valueList\n else:\n current = 0\n for v in valueList:\n current = current + v\n out.append(current)\n return out\n\n\ndef calcSubrBias(subrs):\n nSubrs = len(subrs)\n if nSubrs < 1240:\n bias = 107\n elif nSubrs < 33900:\n bias = 1131\n else:\n bias = 32768\n return bias\n
|
.venv\Lib\site-packages\fontTools\misc\psCharStrings.py
|
psCharStrings.py
|
Python
| 44,532 | 0.95 | 0.203209 | 0.056082 |
vue-tools
| 380 |
2024-01-06T11:06:10.479250
|
GPL-3.0
| false |
2068539534e567a092ab2df8d56d677b
|
"""Python 2/3 compat layer leftovers."""\n\nimport decimal as _decimal\nimport math as _math\nimport warnings\nfrom contextlib import redirect_stderr, redirect_stdout\nfrom io import BytesIO\nfrom io import StringIO as UnicodeIO\nfrom types import SimpleNamespace\n\nfrom .textTools import Tag, bytechr, byteord, bytesjoin, strjoin, tobytes, tostr\n\nwarnings.warn(\n "The py23 module has been deprecated and will be removed in a future release. "\n "Please update your code.",\n DeprecationWarning,\n)\n\n__all__ = [\n "basestring",\n "bytechr",\n "byteord",\n "BytesIO",\n "bytesjoin",\n "open",\n "Py23Error",\n "range",\n "RecursionError",\n "round",\n "SimpleNamespace",\n "StringIO",\n "strjoin",\n "Tag",\n "tobytes",\n "tostr",\n "tounicode",\n "unichr",\n "unicode",\n "UnicodeIO",\n "xrange",\n "zip",\n]\n\n\nclass Py23Error(NotImplementedError):\n pass\n\n\nRecursionError = RecursionError\nStringIO = UnicodeIO\n\nbasestring = str\nisclose = _math.isclose\nisfinite = _math.isfinite\nopen = open\nrange = range\nround = round3 = round\nunichr = chr\nunicode = str\nzip = zip\n\ntounicode = tostr\n\n\ndef xrange(*args, **kwargs):\n raise Py23Error("'xrange' is not defined. Use 'range' instead.")\n\n\ndef round2(number, ndigits=None):\n """\n Implementation of Python 2 built-in round() function.\n Rounds a number to a given precision in decimal digits (default\n 0 digits). The result is a floating point number. Values are rounded\n to the closest multiple of 10 to the power minus ndigits; if two\n multiples are equally close, rounding is done away from 0.\n ndigits may be negative.\n See Python 2 documentation:\n https://docs.python.org/2/library/functions.html?highlight=round#round\n """\n if ndigits is None:\n ndigits = 0\n\n if ndigits < 0:\n exponent = 10 ** (-ndigits)\n quotient, remainder = divmod(number, exponent)\n if remainder >= exponent // 2 and number >= 0:\n quotient += 1\n return float(quotient * exponent)\n else:\n exponent = _decimal.Decimal("10") ** (-ndigits)\n\n d = _decimal.Decimal.from_float(number).quantize(\n exponent, rounding=_decimal.ROUND_HALF_UP\n )\n\n return float(d)\n
|
.venv\Lib\site-packages\fontTools\misc\py23.py
|
py23.py
|
Python
| 2,334 | 0.95 | 0.083333 | 0 |
vue-tools
| 12 |
2023-08-25T05:15:50.389902
|
Apache-2.0
| false |
0e184acdd0951c0e5e83d6a798efcd23
|
"""sstruct.py -- SuperStruct\n\nHigher level layer on top of the struct module, enabling to\nbind names to struct elements. The interface is similar to\nstruct, except the objects passed and returned are not tuples\n(or argument lists), but dictionaries or instances.\n\nJust like struct, we use fmt strings to describe a data\nstructure, except we use one line per element. Lines are\nseparated by newlines or semi-colons. Each line contains\neither one of the special struct characters ('@', '=', '<',\n'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f').\nRepetitions, like the struct module offers them are not useful\nin this context, except for fixed length strings (eg. 'myInt:5h'\nis not allowed but 'myString:5s' is). The 'x' fmt character\n(pad byte) is treated as 'special', since it is by definition\nanonymous. Extra whitespace is allowed everywhere.\n\nThe sstruct module offers one feature that the "normal" struct\nmodule doesn't: support for fixed point numbers. These are spelled\nas "n.mF", where n is the number of bits before the point, and m\nthe number of bits after the point. Fixed point numbers get\nconverted to floats.\n\npack(fmt, object):\n 'object' is either a dictionary or an instance (or actually\n anything that has a __dict__ attribute). If it is a dictionary,\n its keys are used for names. If it is an instance, it's\n attributes are used to grab struct elements from. Returns\n a string containing the data.\n\nunpack(fmt, data, object=None)\n If 'object' is omitted (or None), a new dictionary will be\n returned. If 'object' is a dictionary, it will be used to add\n struct elements to. If it is an instance (or in fact anything\n that has a __dict__ attribute), an attribute will be added for\n each struct element. In the latter two cases, 'object' itself\n is returned.\n\nunpack2(fmt, data, object=None)\n Convenience function. Same as unpack, except data may be longer\n than needed. The returned value is a tuple: (object, leftoverdata).\n\ncalcsize(fmt)\n like struct.calcsize(), but uses our own fmt strings:\n it returns the size of the data in bytes.\n"""\n\nfrom fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi\nfrom fontTools.misc.textTools import tobytes, tostr\nimport struct\nimport re\n\n__version__ = "1.2"\n__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"\n\n\nclass Error(Exception):\n pass\n\n\ndef pack(fmt, obj):\n formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)\n elements = []\n if not isinstance(obj, dict):\n obj = obj.__dict__\n string_index = formatstring\n if formatstring.startswith(">"):\n string_index = formatstring[1:]\n for ix, name in enumerate(names.keys()):\n value = obj[name]\n if name in fixes:\n # fixed point conversion\n value = fl2fi(value, fixes[name])\n elif isinstance(value, str):\n value = tobytes(value)\n elements.append(value)\n # Check it fits\n try:\n struct.pack(names[name], value)\n except Exception as e:\n raise ValueError(\n "Value %s does not fit in format %s for %s" % (value, names[name], name)\n ) from e\n data = struct.pack(*(formatstring,) + tuple(elements))\n return data\n\n\ndef unpack(fmt, data, obj=None):\n if obj is None:\n obj = {}\n data = tobytes(data)\n formatstring, names, fixes = getformat(fmt)\n if isinstance(obj, dict):\n d = obj\n else:\n d = obj.__dict__\n elements = struct.unpack(formatstring, data)\n for i in range(len(names)):\n name = list(names.keys())[i]\n value = elements[i]\n if name in fixes:\n # fixed point conversion\n value = fi2fl(value, fixes[name])\n elif isinstance(value, bytes):\n try:\n value = tostr(value)\n except UnicodeDecodeError:\n pass\n d[name] = value\n return obj\n\n\ndef unpack2(fmt, data, obj=None):\n length = calcsize(fmt)\n return unpack(fmt, data[:length], obj), data[length:]\n\n\ndef calcsize(fmt):\n formatstring, names, fixes = getformat(fmt)\n return struct.calcsize(formatstring)\n\n\n# matches "name:formatchar" (whitespace is allowed)\n_elementRE = re.compile(\n r"\s*" # whitespace\n r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)\n r"\s*:\s*" # whitespace : whitespace\n r"([xcbB?hHiIlLqQfd]|" # formatchar...\n r"[0-9]+[ps]|" # ...formatchar...\n r"([0-9]+)\.([0-9]+)(F))" # ...formatchar\n r"\s*" # whitespace\n r"(#.*)?$" # [comment] + end of string\n)\n\n# matches the special struct fmt chars and 'x' (pad byte)\n_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")\n\n# matches an "empty" string, possibly containing whitespace and/or a comment\n_emptyRE = re.compile(r"\s*(#.*)?$")\n\n_fixedpointmappings = {8: "b", 16: "h", 32: "l"}\n\n_formatcache = {}\n\n\ndef getformat(fmt, keep_pad_byte=False):\n fmt = tostr(fmt, encoding="ascii")\n try:\n formatstring, names, fixes = _formatcache[fmt]\n except KeyError:\n lines = re.split("[\n;]", fmt)\n formatstring = ""\n names = {}\n fixes = {}\n for line in lines:\n if _emptyRE.match(line):\n continue\n m = _extraRE.match(line)\n if m:\n formatchar = m.group(1)\n if formatchar != "x" and formatstring:\n raise Error("a special fmt char must be first")\n else:\n m = _elementRE.match(line)\n if not m:\n raise Error("syntax error in fmt: '%s'" % line)\n name = m.group(1)\n formatchar = m.group(2)\n if keep_pad_byte or formatchar != "x":\n names[name] = formatchar\n if m.group(3):\n # fixed point\n before = int(m.group(3))\n after = int(m.group(4))\n bits = before + after\n if bits not in [8, 16, 32]:\n raise Error("fixed point must be 8, 16 or 32 bits long")\n formatchar = _fixedpointmappings[bits]\n names[name] = formatchar\n assert m.group(5) == "F"\n fixes[name] = after\n formatstring += formatchar\n _formatcache[fmt] = formatstring, names, fixes\n return formatstring, names, fixes\n\n\ndef _test():\n fmt = """\n # comments are allowed\n > # big endian (see documentation for struct)\n # empty lines are allowed:\n\n ashort: h\n along: l\n abyte: b # a byte\n achar: c\n astr: 5s\n afloat: f; adouble: d # multiple "statements" are allowed\n afixed: 16.16F\n abool: ?\n apad: x\n """\n\n print("size:", calcsize(fmt))\n\n class foo(object):\n pass\n\n i = foo()\n\n i.ashort = 0x7FFF\n i.along = 0x7FFFFFFF\n i.abyte = 0x7F\n i.achar = "a"\n i.astr = "12345"\n i.afloat = 0.5\n i.adouble = 0.5\n i.afixed = 1.5\n i.abool = True\n\n data = pack(fmt, i)\n print("data:", repr(data))\n print(unpack(fmt, data))\n i2 = foo()\n unpack(fmt, data, i2)\n print(vars(i2))\n\n\nif __name__ == "__main__":\n _test()\n
|
.venv\Lib\site-packages\fontTools\misc\sstruct.py
|
sstruct.py
|
Python
| 7,389 | 0.95 | 0.151515 | 0.046392 |
vue-tools
| 465 |
2025-03-28T15:44:26.775683
|
GPL-3.0
| false |
4cd22da24d31d2fe4524edc68e9b9299
|
"""Helpers for writing unit tests."""\n\nfrom collections.abc import Iterable\nfrom io import BytesIO\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nfrom unittest import TestCase as _TestCase\nfrom fontTools.config import Config\nfrom fontTools.misc.textTools import tobytes\nfrom fontTools.misc.xmlWriter import XMLWriter\n\n\ndef parseXML(xmlSnippet):\n """Parses a snippet of XML.\n\n Input can be either a single string (unicode or UTF-8 bytes), or a\n a sequence of strings.\n\n The result is in the same format that would be returned by\n XMLReader, but the parser imposes no constraints on the root\n element so it can be called on small snippets of TTX files.\n """\n # To support snippets with multiple elements, we add a fake root.\n reader = TestXMLReader_()\n xml = b"<root>"\n if isinstance(xmlSnippet, bytes):\n xml += xmlSnippet\n elif isinstance(xmlSnippet, str):\n xml += tobytes(xmlSnippet, "utf-8")\n elif isinstance(xmlSnippet, Iterable):\n xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet)\n else:\n raise TypeError(\n "expected string or sequence of strings; found %r"\n % type(xmlSnippet).__name__\n )\n xml += b"</root>"\n reader.parser.Parse(xml, 1)\n return reader.root[2]\n\n\ndef parseXmlInto(font, parseInto, xmlSnippet):\n parsed_xml = [e for e in parseXML(xmlSnippet.strip()) if not isinstance(e, str)]\n for name, attrs, content in parsed_xml:\n parseInto.fromXML(name, attrs, content, font)\n if hasattr(parseInto, "populateDefaults"):\n parseInto.populateDefaults()\n return parseInto\n\n\nclass FakeFont:\n def __init__(self, glyphs):\n self.glyphOrder_ = glyphs\n self.reverseGlyphOrderDict_ = {g: i for i, g in enumerate(glyphs)}\n self.lazy = False\n self.tables = {}\n self.cfg = Config()\n\n def __contains__(self, tag):\n return tag in self.tables\n\n def __getitem__(self, tag):\n return self.tables[tag]\n\n def __setitem__(self, tag, table):\n self.tables[tag] = table\n\n def get(self, tag, default=None):\n return self.tables.get(tag, default)\n\n def getGlyphID(self, name):\n return self.reverseGlyphOrderDict_[name]\n\n def getGlyphIDMany(self, lst):\n return [self.getGlyphID(gid) for gid in lst]\n\n def getGlyphName(self, glyphID):\n if glyphID < len(self.glyphOrder_):\n return self.glyphOrder_[glyphID]\n else:\n return "glyph%.5d" % glyphID\n\n def getGlyphNameMany(self, lst):\n return [self.getGlyphName(gid) for gid in lst]\n\n def getGlyphOrder(self):\n return self.glyphOrder_\n\n def getReverseGlyphMap(self):\n return self.reverseGlyphOrderDict_\n\n def getGlyphNames(self):\n return sorted(self.getGlyphOrder())\n\n\nclass TestXMLReader_(object):\n def __init__(self):\n from xml.parsers.expat import ParserCreate\n\n self.parser = ParserCreate()\n self.parser.StartElementHandler = self.startElement_\n self.parser.EndElementHandler = self.endElement_\n self.parser.CharacterDataHandler = self.addCharacterData_\n self.root = None\n self.stack = []\n\n def startElement_(self, name, attrs):\n element = (name, attrs, [])\n if self.stack:\n self.stack[-1][2].append(element)\n else:\n self.root = element\n self.stack.append(element)\n\n def endElement_(self, name):\n self.stack.pop()\n\n def addCharacterData_(self, data):\n self.stack[-1][2].append(data)\n\n\ndef makeXMLWriter(newlinestr="\n"):\n # don't write OS-specific new lines\n writer = XMLWriter(BytesIO(), newlinestr=newlinestr)\n # erase XML declaration\n writer.file.seek(0)\n writer.file.truncate()\n return writer\n\n\ndef getXML(func, ttFont=None):\n """Call the passed toXML function and return the written content as a\n list of lines (unicode strings).\n Result is stripped of XML declaration and OS-specific newline characters.\n """\n writer = makeXMLWriter()\n func(writer, ttFont)\n xml = writer.file.getvalue().decode("utf-8")\n # toXML methods must always end with a writer.newline()\n assert xml.endswith("\n")\n return xml.splitlines()\n\n\ndef stripVariableItemsFromTTX(\n string: str,\n ttLibVersion: bool = True,\n checkSumAdjustment: bool = True,\n modified: bool = True,\n created: bool = True,\n sfntVersion: bool = False, # opt-in only\n) -> str:\n """Strip stuff like ttLibVersion, checksums, timestamps, etc. from TTX dumps."""\n # ttlib changes with the fontTools version\n if ttLibVersion:\n string = re.sub(' ttLibVersion="[^"]+"', "", string)\n # sometimes (e.g. some subsetter tests) we don't care whether it's OTF or TTF\n if sfntVersion:\n string = re.sub(' sfntVersion="[^"]+"', "", string)\n # head table checksum and creation and mod date changes with each save.\n if checkSumAdjustment:\n string = re.sub('<checkSumAdjustment value="[^"]+"/>', "", string)\n if modified:\n string = re.sub('<modified value="[^"]+"/>', "", string)\n if created:\n string = re.sub('<created value="[^"]+"/>', "", string)\n return string\n\n\nclass MockFont(object):\n """A font-like object that automatically adds any looked up glyphname\n to its glyphOrder."""\n\n def __init__(self):\n self._glyphOrder = [".notdef"]\n\n class AllocatingDict(dict):\n def __missing__(reverseDict, key):\n self._glyphOrder.append(key)\n gid = len(reverseDict)\n reverseDict[key] = gid\n return gid\n\n self._reverseGlyphOrder = AllocatingDict({".notdef": 0})\n self.lazy = False\n\n def getGlyphID(self, glyph):\n gid = self._reverseGlyphOrder[glyph]\n return gid\n\n def getReverseGlyphMap(self):\n return self._reverseGlyphOrder\n\n def getGlyphName(self, gid):\n return self._glyphOrder[gid]\n\n def getGlyphOrder(self):\n return self._glyphOrder\n\n\nclass TestCase(_TestCase):\n def __init__(self, methodName):\n _TestCase.__init__(self, methodName)\n # Python 3 renamed assertRaisesRegexp to assertRaisesRegex,\n # and fires deprecation warnings if a program uses the old name.\n if not hasattr(self, "assertRaisesRegex"):\n self.assertRaisesRegex = self.assertRaisesRegexp\n\n\nclass DataFilesHandler(TestCase):\n def setUp(self):\n self.tempdir = None\n self.num_tempfiles = 0\n\n def tearDown(self):\n if self.tempdir:\n shutil.rmtree(self.tempdir)\n\n def getpath(self, testfile):\n folder = os.path.dirname(sys.modules[self.__module__].__file__)\n return os.path.join(folder, "data", testfile)\n\n def temp_dir(self):\n if not self.tempdir:\n self.tempdir = tempfile.mkdtemp()\n\n def temp_font(self, font_path, file_name):\n self.temp_dir()\n temppath = os.path.join(self.tempdir, file_name)\n shutil.copy2(font_path, temppath)\n return temppath\n
|
.venv\Lib\site-packages\fontTools\misc\testTools.py
|
testTools.py
|
Python
| 7,285 | 0.95 | 0.261803 | 0.048913 |
python-kit
| 619 |
2024-02-28T07:23:25.158151
|
BSD-3-Clause
| true |
165a5127292cb3a2866f4844954d4b40
|
"""fontTools.misc.textTools.py -- miscellaneous routines."""\n\nimport ast\nimport string\n\n\n# alias kept for backward compatibility\nsafeEval = ast.literal_eval\n\n\nclass Tag(str):\n @staticmethod\n def transcode(blob):\n if isinstance(blob, bytes):\n blob = blob.decode("latin-1")\n return blob\n\n def __new__(self, content):\n return str.__new__(self, self.transcode(content))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __eq__(self, other):\n return str.__eq__(self, self.transcode(other))\n\n def __hash__(self):\n return str.__hash__(self)\n\n def tobytes(self):\n return self.encode("latin-1")\n\n\ndef readHex(content):\n """Convert a list of hex strings to binary data."""\n return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))\n\n\ndef deHexStr(hexdata):\n """Convert a hex string to binary data."""\n hexdata = strjoin(hexdata.split())\n if len(hexdata) % 2:\n hexdata = hexdata + "0"\n data = []\n for i in range(0, len(hexdata), 2):\n data.append(bytechr(int(hexdata[i : i + 2], 16)))\n return bytesjoin(data)\n\n\ndef hexStr(data):\n """Convert binary data to a hex string."""\n h = string.hexdigits\n r = ""\n for c in data:\n i = byteord(c)\n r = r + h[(i >> 4) & 0xF] + h[i & 0xF]\n return r\n\n\ndef num2binary(l, bits=32):\n items = []\n binary = ""\n for i in range(bits):\n if l & 0x1:\n binary = "1" + binary\n else:\n binary = "0" + binary\n l = l >> 1\n if not ((i + 1) % 8):\n items.append(binary)\n binary = ""\n if binary:\n items.append(binary)\n items.reverse()\n assert l in (0, -1), "number doesn't fit in number of bits"\n return " ".join(items)\n\n\ndef binary2num(bin):\n bin = strjoin(bin.split())\n l = 0\n for digit in bin:\n l = l << 1\n if digit != "0":\n l = l | 0x1\n return l\n\n\ndef caselessSort(alist):\n """Return a sorted copy of a list. If there are only strings\n in the list, it will not consider case.\n """\n\n try:\n return sorted(alist, key=lambda a: (a.lower(), a))\n except TypeError:\n return sorted(alist)\n\n\ndef pad(data, size):\n r"""Pad byte string 'data' with null bytes until its length is a\n multiple of 'size'.\n\n >>> len(pad(b'abcd', 4))\n 4\n >>> len(pad(b'abcde', 2))\n 6\n >>> len(pad(b'abcde', 4))\n 8\n >>> pad(b'abcdef', 4) == b'abcdef\x00\x00'\n True\n """\n data = tobytes(data)\n if size > 1:\n remainder = len(data) % size\n if remainder:\n data += b"\0" * (size - remainder)\n return data\n\n\ndef tostr(s, encoding="ascii", errors="strict"):\n if not isinstance(s, str):\n return s.decode(encoding, errors)\n else:\n return s\n\n\ndef tobytes(s, encoding="ascii", errors="strict"):\n if isinstance(s, str):\n return s.encode(encoding, errors)\n else:\n return bytes(s)\n\n\ndef bytechr(n):\n return bytes([n])\n\n\ndef byteord(c):\n return c if isinstance(c, int) else ord(c)\n\n\ndef strjoin(iterable, joiner=""):\n return tostr(joiner).join(iterable)\n\n\ndef bytesjoin(iterable, joiner=b""):\n return tobytes(joiner).join(tobytes(item) for item in iterable)\n\n\nif __name__ == "__main__":\n import doctest, sys\n\n sys.exit(doctest.testmod().failed)\n
|
.venv\Lib\site-packages\fontTools\misc\textTools.py
|
textTools.py
|
Python
| 3,531 | 0.95 | 0.266234 | 0.00885 |
python-kit
| 304 |
2023-08-24T02:31:22.547135
|
MIT
| false |
a88613eea79685c629d8a2bee280b779
|
"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps.\n"""\n\nimport os\nimport time\nfrom datetime import datetime, timezone\nimport calendar\n\n\nepoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))\n\nDAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]\nMONTHNAMES = [\n None,\n "Jan",\n "Feb",\n "Mar",\n "Apr",\n "May",\n "Jun",\n "Jul",\n "Aug",\n "Sep",\n "Oct",\n "Nov",\n "Dec",\n]\n\n\ndef asctime(t=None):\n """\n Convert a tuple or struct_time representing a time as returned by gmtime()\n or localtime() to a 24-character string of the following form:\n\n >>> asctime(time.gmtime(0))\n 'Thu Jan 1 00:00:00 1970'\n\n If t is not provided, the current time as returned by localtime() is used.\n Locale information is not used by asctime().\n\n This is meant to normalise the output of the built-in time.asctime() across\n different platforms and Python versions.\n In Python 3.x, the day of the month is right-justified, whereas on Windows\n Python 2.7 it is padded with zeros.\n\n See https://github.com/fonttools/fonttools/issues/455\n """\n if t is None:\n t = time.localtime()\n s = "%s %s %2s %s" % (\n DAYNAMES[t.tm_wday],\n MONTHNAMES[t.tm_mon],\n t.tm_mday,\n time.strftime("%H:%M:%S %Y", t),\n )\n return s\n\n\ndef timestampToString(value):\n return asctime(time.gmtime(max(0, value + epoch_diff)))\n\n\ndef timestampFromString(value):\n wkday, mnth = value[:7].split()\n t = datetime.strptime(value[7:], " %d %H:%M:%S %Y")\n t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)\n wkday_idx = DAYNAMES.index(wkday)\n assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'\n return int(t.timestamp()) - epoch_diff\n\n\ndef timestampNow():\n # https://reproducible-builds.org/specs/source-date-epoch/\n source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")\n if source_date_epoch is not None:\n return int(source_date_epoch) - epoch_diff\n return int(time.time() - epoch_diff)\n\n\ndef timestampSinceEpoch(value):\n return int(value - epoch_diff)\n\n\nif __name__ == "__main__":\n import sys\n import doctest\n\n sys.exit(doctest.testmod().failed)\n
|
.venv\Lib\site-packages\fontTools\misc\timeTools.py
|
timeTools.py
|
Python
| 2,322 | 0.95 | 0.102273 | 0.014925 |
vue-tools
| 982 |
2024-12-23T17:14:56.231165
|
BSD-3-Clause
| false |
a81e21c8437baea246d1d9e3635aab56
|
"""Generic tools for working with trees."""\n\nfrom math import ceil, log\n\n\ndef build_n_ary_tree(leaves, n):\n """Build N-ary tree from sequence of leaf nodes.\n\n Return a list of lists where each non-leaf node is a list containing\n max n nodes.\n """\n if not leaves:\n return []\n\n assert n > 1\n\n depth = ceil(log(len(leaves), n))\n\n if depth <= 1:\n return list(leaves)\n\n # Fully populate complete subtrees of root until we have enough leaves left\n root = []\n unassigned = None\n full_step = n ** (depth - 1)\n for i in range(0, len(leaves), full_step):\n subtree = leaves[i : i + full_step]\n if len(subtree) < full_step:\n unassigned = subtree\n break\n while len(subtree) > n:\n subtree = [subtree[k : k + n] for k in range(0, len(subtree), n)]\n root.append(subtree)\n\n if unassigned:\n # Recurse to fill the last subtree, which is the only partially populated one\n subtree = build_n_ary_tree(unassigned, n)\n if len(subtree) <= n - len(root):\n # replace last subtree with its children if they can still fit\n root.extend(subtree)\n else:\n root.append(subtree)\n assert len(root) <= n\n\n return root\n
|
.venv\Lib\site-packages\fontTools\misc\treeTools.py
|
treeTools.py
|
Python
| 1,314 | 0.95 | 0.244444 | 0.085714 |
python-kit
| 356 |
2025-04-15T01:43:30.349990
|
Apache-2.0
| false |
6b80d6b9e8364e5327787fe9eb50cbe7
|
from numbers import Number\nimport math\nimport operator\nimport warnings\n\n\n__all__ = ["Vector"]\n\n\nclass Vector(tuple):\n """A math-like vector.\n\n Represents an n-dimensional numeric vector. ``Vector`` objects support\n vector addition and subtraction, scalar multiplication and division,\n negation, rounding, and comparison tests.\n """\n\n __slots__ = ()\n\n def __new__(cls, values, keep=False):\n if keep is not False:\n warnings.warn(\n "the 'keep' argument has been deprecated",\n DeprecationWarning,\n )\n if type(values) == Vector:\n # No need to create a new object\n return values\n return super().__new__(cls, values)\n\n def __repr__(self):\n return f"{self.__class__.__name__}({super().__repr__()})"\n\n def _vectorOp(self, other, op):\n if isinstance(other, Vector):\n assert len(self) == len(other)\n return self.__class__(op(a, b) for a, b in zip(self, other))\n if isinstance(other, Number):\n return self.__class__(op(v, other) for v in self)\n raise NotImplementedError()\n\n def _scalarOp(self, other, op):\n if isinstance(other, Number):\n return self.__class__(op(v, other) for v in self)\n raise NotImplementedError()\n\n def _unaryOp(self, op):\n return self.__class__(op(v) for v in self)\n\n def __add__(self, other):\n return self._vectorOp(other, operator.add)\n\n __radd__ = __add__\n\n def __sub__(self, other):\n return self._vectorOp(other, operator.sub)\n\n def __rsub__(self, other):\n return self._vectorOp(other, _operator_rsub)\n\n def __mul__(self, other):\n return self._scalarOp(other, operator.mul)\n\n __rmul__ = __mul__\n\n def __truediv__(self, other):\n return self._scalarOp(other, operator.truediv)\n\n def __rtruediv__(self, other):\n return self._scalarOp(other, _operator_rtruediv)\n\n def __pos__(self):\n return self._unaryOp(operator.pos)\n\n def __neg__(self):\n return self._unaryOp(operator.neg)\n\n def __round__(self, *, round=round):\n return self._unaryOp(round)\n\n def __eq__(self, other):\n if isinstance(other, list):\n # bw compat Vector([1, 2, 3]) == [1, 2, 3]\n other = tuple(other)\n return super().__eq__(other)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __bool__(self):\n return any(self)\n\n __nonzero__ = __bool__\n\n def __abs__(self):\n return math.sqrt(sum(x * x for x in self))\n\n def length(self):\n """Return the length of the vector. Equivalent to abs(vector)."""\n return abs(self)\n\n def normalized(self):\n """Return the normalized vector of the vector."""\n return self / abs(self)\n\n def dot(self, other):\n """Performs vector dot product, returning the sum of\n ``a[0] * b[0], a[1] * b[1], ...``"""\n assert len(self) == len(other)\n return sum(a * b for a, b in zip(self, other))\n\n # Deprecated methods/properties\n\n def toInt(self):\n warnings.warn(\n "the 'toInt' method has been deprecated, use round(vector) instead",\n DeprecationWarning,\n )\n return self.__round__()\n\n @property\n def values(self):\n warnings.warn(\n "the 'values' attribute has been deprecated, use "\n "the vector object itself instead",\n DeprecationWarning,\n )\n return list(self)\n\n @values.setter\n def values(self, values):\n raise AttributeError(\n "can't set attribute, the 'values' attribute has been deprecated",\n )\n\n def isclose(self, other: "Vector", **kwargs) -> bool:\n """Return True if the vector is close to another Vector."""\n assert len(self) == len(other)\n return all(math.isclose(a, b, **kwargs) for a, b in zip(self, other))\n\n\ndef _operator_rsub(a, b):\n return operator.sub(b, a)\n\n\ndef _operator_rtruediv(a, b):\n return operator.truediv(b, a)\n
|
.venv\Lib\site-packages\fontTools\misc\vector.py
|
vector.py
|
Python
| 4,209 | 0.95 | 0.285714 | 0.027778 |
python-kit
| 174 |
2023-10-29T19:53:47.199468
|
GPL-3.0
| false |
d9a57053955de64db99b8971178d5bb7
|
"""Generic visitor pattern implementation for Python objects."""\n\nimport enum\nimport weakref\n\n\nclass Visitor(object):\n defaultStop = False\n\n _visitors = {\n # By default we skip visiting weak references to avoid recursion\n # issues. Users can override this by registering a visit\n # function for weakref.ProxyType.\n weakref.ProxyType: {None: lambda self, obj, *args, **kwargs: False}\n }\n\n @classmethod\n def _register(celf, clazzes_attrs):\n assert celf != Visitor, "Subclass Visitor instead."\n if "_visitors" not in celf.__dict__:\n celf._visitors = {}\n\n def wrapper(method):\n assert method.__name__ == "visit"\n for clazzes, attrs in clazzes_attrs:\n if type(clazzes) != tuple:\n clazzes = (clazzes,)\n if type(attrs) == str:\n attrs = (attrs,)\n for clazz in clazzes:\n _visitors = celf._visitors.setdefault(clazz, {})\n for attr in attrs:\n assert attr not in _visitors, (\n "Oops, class '%s' has visitor function for '%s' defined already."\n % (clazz.__name__, attr)\n )\n _visitors[attr] = method\n return None\n\n return wrapper\n\n @classmethod\n def register(celf, clazzes):\n if type(clazzes) != tuple:\n clazzes = (clazzes,)\n return celf._register([(clazzes, (None,))])\n\n @classmethod\n def register_attr(celf, clazzes, attrs):\n clazzes_attrs = []\n if type(clazzes) != tuple:\n clazzes = (clazzes,)\n if type(attrs) == str:\n attrs = (attrs,)\n for clazz in clazzes:\n clazzes_attrs.append((clazz, attrs))\n return celf._register(clazzes_attrs)\n\n @classmethod\n def register_attrs(celf, clazzes_attrs):\n return celf._register(clazzes_attrs)\n\n @classmethod\n def _visitorsFor(celf, thing, _default={}):\n typ = type(thing)\n\n for celf in celf.mro():\n _visitors = getattr(celf, "_visitors", None)\n if _visitors is None:\n break\n\n for base in typ.mro():\n m = celf._visitors.get(base, None)\n if m is not None:\n return m\n\n return _default\n\n def visitObject(self, obj, *args, **kwargs):\n """Called to visit an object. This function loops over all non-private\n attributes of the objects and calls any user-registered (via\n @register_attr() or @register_attrs()) visit() functions.\n\n If there is no user-registered visit function, of if there is and it\n returns True, or it returns None (or doesn't return anything) and\n visitor.defaultStop is False (default), then the visitor will proceed\n to call self.visitAttr()"""\n\n keys = sorted(vars(obj).keys())\n _visitors = self._visitorsFor(obj)\n defaultVisitor = _visitors.get("*", None)\n for key in keys:\n if key[0] == "_":\n continue\n value = getattr(obj, key)\n visitorFunc = _visitors.get(key, defaultVisitor)\n if visitorFunc is not None:\n ret = visitorFunc(self, obj, key, value, *args, **kwargs)\n if ret == False or (ret is None and self.defaultStop):\n continue\n self.visitAttr(obj, key, value, *args, **kwargs)\n\n def visitAttr(self, obj, attr, value, *args, **kwargs):\n """Called to visit an attribute of an object."""\n self.visit(value, *args, **kwargs)\n\n def visitList(self, obj, *args, **kwargs):\n """Called to visit any value that is a list."""\n for value in obj:\n self.visit(value, *args, **kwargs)\n\n def visitDict(self, obj, *args, **kwargs):\n """Called to visit any value that is a dictionary."""\n for value in obj.values():\n self.visit(value, *args, **kwargs)\n\n def visitLeaf(self, obj, *args, **kwargs):\n """Called to visit any value that is not an object, list,\n or dictionary."""\n pass\n\n def visit(self, obj, *args, **kwargs):\n """This is the main entry to the visitor. The visitor will visit object\n obj.\n\n The visitor will first determine if there is a registered (via\n @register()) visit function for the type of object. If there is, it\n will be called, and (visitor, obj, *args, **kwargs) will be passed to\n the user visit function.\n\n If there is no user-registered visit function, of if there is and it\n returns True, or it returns None (or doesn't return anything) and\n visitor.defaultStop is False (default), then the visitor will proceed\n to dispatch to one of self.visitObject(), self.visitList(),\n self.visitDict(), or self.visitLeaf() (any of which can be overriden in\n a subclass)."""\n\n visitorFunc = self._visitorsFor(obj).get(None, None)\n if visitorFunc is not None:\n ret = visitorFunc(self, obj, *args, **kwargs)\n if ret == False or (ret is None and self.defaultStop):\n return\n if hasattr(obj, "__dict__") and not isinstance(obj, enum.Enum):\n self.visitObject(obj, *args, **kwargs)\n elif isinstance(obj, list):\n self.visitList(obj, *args, **kwargs)\n elif isinstance(obj, dict):\n self.visitDict(obj, *args, **kwargs)\n else:\n self.visitLeaf(obj, *args, **kwargs)\n
|
.venv\Lib\site-packages\fontTools\misc\visitor.py
|
visitor.py
|
Python
| 5,760 | 0.95 | 0.34 | 0.024 |
awesome-app
| 606 |
2023-08-01T10:12:44.427778
|
BSD-3-Clause
| false |
a967a76961b7bd8f3ee21672e1edb1d9
|
"""xmlWriter.py -- Simple XML authoring class"""\n\nfrom fontTools.misc.textTools import byteord, strjoin, tobytes, tostr\nimport sys\nimport os\nimport string\n\nINDENT = " "\n\n\nclass XMLWriter(object):\n def __init__(\n self,\n fileOrPath,\n indentwhite=INDENT,\n idlefunc=None,\n encoding="utf_8",\n newlinestr="\n",\n ):\n if encoding.lower().replace("-", "").replace("_", "") != "utf8":\n raise Exception("Only UTF-8 encoding is supported.")\n if fileOrPath == "-":\n fileOrPath = sys.stdout\n if not hasattr(fileOrPath, "write"):\n self.filename = fileOrPath\n self.file = open(fileOrPath, "wb")\n self._closeStream = True\n else:\n self.filename = None\n # assume writable file object\n self.file = fileOrPath\n self._closeStream = False\n\n # Figure out if writer expects bytes or unicodes\n try:\n # The bytes check should be first. See:\n # https://github.com/fonttools/fonttools/pull/233\n self.file.write(b"")\n self.totype = tobytes\n except TypeError:\n # This better not fail.\n self.file.write("")\n self.totype = tostr\n self.indentwhite = self.totype(indentwhite)\n if newlinestr is None:\n self.newlinestr = self.totype(os.linesep)\n else:\n self.newlinestr = self.totype(newlinestr)\n self.indentlevel = 0\n self.stack = []\n self.needindent = 1\n self.idlefunc = idlefunc\n self.idlecounter = 0\n self._writeraw('<?xml version="1.0" encoding="UTF-8"?>')\n self.newline()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.close()\n\n def close(self):\n if self._closeStream:\n self.file.close()\n\n def write(self, string, indent=True):\n """Writes text."""\n self._writeraw(escape(string), indent=indent)\n\n def writecdata(self, string):\n """Writes text in a CDATA section."""\n self._writeraw("<![CDATA[" + string + "]]>")\n\n def write8bit(self, data, strip=False):\n """Writes a bytes() sequence into the XML, escaping\n non-ASCII bytes. When this is read in xmlReader,\n the original bytes can be recovered by encoding to\n 'latin-1'."""\n self._writeraw(escape8bit(data), strip=strip)\n\n def write_noindent(self, string):\n """Writes text without indentation."""\n self._writeraw(escape(string), indent=False)\n\n def _writeraw(self, data, indent=True, strip=False):\n """Writes bytes, possibly indented."""\n if indent and self.needindent:\n self.file.write(self.indentlevel * self.indentwhite)\n self.needindent = 0\n s = self.totype(data, encoding="utf_8")\n if strip:\n s = s.strip()\n self.file.write(s)\n\n def newline(self):\n self.file.write(self.newlinestr)\n self.needindent = 1\n idlecounter = self.idlecounter\n if not idlecounter % 100 and self.idlefunc is not None:\n self.idlefunc()\n self.idlecounter = idlecounter + 1\n\n def comment(self, data):\n data = escape(data)\n lines = data.split("\n")\n self._writeraw("<!-- " + lines[0])\n for line in lines[1:]:\n self.newline()\n self._writeraw(" " + line)\n self._writeraw(" -->")\n\n def simpletag(self, _TAG_, *args, **kwargs):\n attrdata = self.stringifyattrs(*args, **kwargs)\n data = "<%s%s/>" % (_TAG_, attrdata)\n self._writeraw(data)\n\n def begintag(self, _TAG_, *args, **kwargs):\n attrdata = self.stringifyattrs(*args, **kwargs)\n data = "<%s%s>" % (_TAG_, attrdata)\n self._writeraw(data)\n self.stack.append(_TAG_)\n self.indent()\n\n def endtag(self, _TAG_):\n assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"\n del self.stack[-1]\n self.dedent()\n data = "</%s>" % _TAG_\n self._writeraw(data)\n\n def dumphex(self, data):\n linelength = 16\n hexlinelength = linelength * 2\n chunksize = 8\n for i in range(0, len(data), linelength):\n hexline = hexStr(data[i : i + linelength])\n line = ""\n white = ""\n for j in range(0, hexlinelength, chunksize):\n line = line + white + hexline[j : j + chunksize]\n white = " "\n self._writeraw(line)\n self.newline()\n\n def indent(self):\n self.indentlevel = self.indentlevel + 1\n\n def dedent(self):\n assert self.indentlevel > 0\n self.indentlevel = self.indentlevel - 1\n\n def stringifyattrs(self, *args, **kwargs):\n if kwargs:\n assert not args\n attributes = sorted(kwargs.items())\n elif args:\n assert len(args) == 1\n attributes = args[0]\n else:\n return ""\n data = ""\n for attr, value in attributes:\n if not isinstance(value, (bytes, str)):\n value = str(value)\n data = data + ' %s="%s"' % (attr, escapeattr(value))\n return data\n\n\ndef escape(data):\n data = tostr(data, "utf_8")\n data = data.replace("&", "&")\n data = data.replace("<", "<")\n data = data.replace(">", ">")\n data = data.replace("\r", " ")\n return data\n\n\ndef escapeattr(data):\n data = escape(data)\n data = data.replace('"', """)\n return data\n\n\ndef escape8bit(data):\n """Input is Unicode string."""\n\n def escapechar(c):\n n = ord(c)\n if 32 <= n <= 127 and c not in "<&>":\n return c\n else:\n return "&#" + repr(n) + ";"\n\n return strjoin(map(escapechar, data.decode("latin-1")))\n\n\ndef hexStr(s):\n h = string.hexdigits\n r = ""\n for c in s:\n i = byteord(c)\n r = r + h[(i >> 4) & 0xF] + h[i & 0xF]\n return r\n
|
.venv\Lib\site-packages\fontTools\misc\xmlWriter.py
|
xmlWriter.py
|
Python
| 6,250 | 0.95 | 0.210784 | 0.02907 |
node-utils
| 319 |
2023-10-16T03:13:30.874735
|
Apache-2.0
| false |
e601e5c7b0f0ddde72e62151d810f97f
|
"""Empty __init__.py file to signal Python this directory is a package."""\n
|
.venv\Lib\site-packages\fontTools\misc\__init__.py
|
__init__.py
|
Python
| 76 | 0.5 | 0 | 0 |
node-utils
| 396 |
2025-02-03T16:18:04.708116
|
Apache-2.0
| false |
6d412be7408e8f32685229b58fb23583
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\plistlib\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 26,479 | 0.95 | 0.077586 | 0.003155 |
react-lib
| 662 |
2023-09-25T14:02:58.879022
|
Apache-2.0
| false |
20ab290d822c4d658e1cdb3135d08c27
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\arrayTools.cpython-313.pyc
|
arrayTools.cpython-313.pyc
|
Other
| 14,094 | 0.95 | 0.029605 | 0.003861 |
node-utils
| 11 |
2024-08-08T03:37:09.725677
|
BSD-3-Clause
| false |
99a9f12830656e1848925e7f1f4b1ad9
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\bezierTools.cpython-313.pyc
|
bezierTools.cpython-313.pyc
|
Other
| 54,370 | 0.95 | 0.021776 | 0.008333 |
react-lib
| 694 |
2025-01-20T21:31:08.701562
|
BSD-3-Clause
| false |
a6539914d578c277e1af7ee9e32d1ddd
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\classifyTools.cpython-313.pyc
|
classifyTools.cpython-313.pyc
|
Other
| 6,582 | 0.95 | 0.096 | 0.009091 |
node-utils
| 621 |
2024-07-23T13:34:28.478360
|
BSD-3-Clause
| false |
c37ad685a97c0efaa613c3fef5a1741d
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\cliTools.cpython-313.pyc
|
cliTools.cpython-313.pyc
|
Other
| 2,534 | 0.95 | 0.162791 | 0 |
node-utils
| 732 |
2023-08-07T23:44:50.988067
|
MIT
| false |
ec7773d089c8d1d4a275b17448dc0417
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\configTools.cpython-313.pyc
|
configTools.cpython-313.pyc
|
Other
| 15,932 | 0.95 | 0.112426 | 0.034483 |
awesome-app
| 609 |
2024-02-06T21:31:05.948170
|
GPL-3.0
| false |
47104d2d618864bbd93227402d778d55
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\cython.cpython-313.pyc
|
cython.cpython-313.pyc
|
Other
| 1,173 | 0.95 | 0 | 0 |
react-lib
| 827 |
2024-09-09T14:20:28.187331
|
GPL-3.0
| false |
1e45164dd213eb205da1c5f3ac791169
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\dictTools.cpython-313.pyc
|
dictTools.cpython-313.pyc
|
Other
| 3,922 | 0.8 | 0.02 | 0 |
python-kit
| 148 |
2024-12-04T17:38:23.317216
|
GPL-3.0
| false |
a5351108e4d763a5551b80f644e6a5db
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\eexec.cpython-313.pyc
|
eexec.cpython-313.pyc
|
Other
| 4,286 | 0.95 | 0.047059 | 0 |
awesome-app
| 433 |
2025-02-21T15:02:22.802108
|
GPL-3.0
| false |
d4322de9c3483ef36db688453e8740a2
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\encodingTools.cpython-313.pyc
|
encodingTools.cpython-313.pyc
|
Other
| 1,839 | 0.8 | 0.142857 | 0 |
vue-tools
| 786 |
2024-01-29T15:18:01.917361
|
Apache-2.0
| false |
3beb04573317e123582c396f4057afc3
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\etree.cpython-313.pyc
|
etree.cpython-313.pyc
|
Other
| 15,943 | 0.95 | 0.107438 | 0 |
python-kit
| 288 |
2024-04-23T21:25:10.768958
|
BSD-3-Clause
| false |
731f9ce058f845721d647f332fc79ff4
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\filenames.cpython-313.pyc
|
filenames.cpython-313.pyc
|
Other
| 8,330 | 0.8 | 0.028902 | 0 |
python-kit
| 926 |
2024-01-21T02:25:31.474277
|
GPL-3.0
| false |
fdf83e417308541d24f40f90eea1ebd6
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\fixedTools.cpython-313.pyc
|
fixedTools.cpython-313.pyc
|
Other
| 8,028 | 0.95 | 0.025 | 0 |
python-kit
| 985 |
2024-02-26T08:50:08.534431
|
BSD-3-Clause
| false |
067ed138bf131b40fa905466b2f9b289
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\intTools.cpython-313.pyc
|
intTools.cpython-313.pyc
|
Other
| 992 | 0.8 | 0 | 0 |
node-utils
| 855 |
2024-04-17T22:58:42.900524
|
BSD-3-Clause
| false |
a6857c33dba0552d79213e80e57554ea
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\iterTools.cpython-313.pyc
|
iterTools.cpython-313.pyc
|
Other
| 738 | 0.8 | 0 | 0 |
awesome-app
| 852 |
2023-07-18T06:24:41.439770
|
MIT
| false |
8ec633e0cdde83f24f80a22253b0759b
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\lazyTools.cpython-313.pyc
|
lazyTools.cpython-313.pyc
|
Other
| 2,418 | 0.7 | 0 | 0 |
node-utils
| 410 |
2024-12-06T11:47:34.936159
|
MIT
| false |
b7674ed273396e2e2dc71c925f542f41
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\loggingTools.cpython-313.pyc
|
loggingTools.cpython-313.pyc
|
Other
| 24,533 | 0.95 | 0.10596 | 0 |
vue-tools
| 773 |
2024-06-14T21:40:17.684276
|
MIT
| false |
90a00d5275f8ba33c595e9096565eba7
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\macCreatorType.cpython-313.pyc
|
macCreatorType.cpython-313.pyc
|
Other
| 2,434 | 0.8 | 0.114286 | 0 |
awesome-app
| 197 |
2025-01-11T09:06:40.357320
|
BSD-3-Clause
| false |
91a53953aa4046b830f05d26b300ba19
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\macRes.cpython-313.pyc
|
macRes.cpython-313.pyc
|
Other
| 13,014 | 0.8 | 0.029197 | 0 |
awesome-app
| 194 |
2024-10-13T04:42:16.080307
|
Apache-2.0
| false |
3cc13d06c5c566a49ab698ca31a52f2f
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\psCharStrings.cpython-313.pyc
|
psCharStrings.cpython-313.pyc
|
Other
| 62,224 | 0.6 | 0.002123 | 0.015837 |
vue-tools
| 924 |
2025-03-02T05:29:46.592866
|
BSD-3-Clause
| false |
c422214e4ea21e2dfd6c0e1fd676298f
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\psLib.cpython-313.pyc
|
psLib.cpython-313.pyc
|
Other
| 17,198 | 0.95 | 0.011696 | 0.012195 |
vue-tools
| 374 |
2024-03-10T23:32:59.360108
|
GPL-3.0
| false |
2c240f3e69e70a092fc853664d79e94d
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\psOperators.cpython-313.pyc
|
psOperators.cpython-313.pyc
|
Other
| 31,860 | 0.95 | 0.016129 | 0.00578 |
vue-tools
| 106 |
2024-04-12T01:07:08.267745
|
GPL-3.0
| false |
dbce4d85df57ea104835c18a8f29c4ce
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\py23.cpython-313.pyc
|
py23.cpython-313.pyc
|
Other
| 2,909 | 0.95 | 0.05 | 0 |
python-kit
| 788 |
2023-10-31T09:59:37.198363
|
GPL-3.0
| false |
34d95ab21c72c2e55807beb898785c90
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\roundTools.cpython-313.pyc
|
roundTools.cpython-313.pyc
|
Other
| 3,811 | 0.95 | 0.088235 | 0 |
react-lib
| 665 |
2025-07-09T17:49:31.282645
|
MIT
| false |
27b29d39dbf9bfc29edb1cad1b7734c1
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\sstruct.cpython-313.pyc
|
sstruct.cpython-313.pyc
|
Other
| 9,025 | 0.95 | 0.048611 | 0.02963 |
node-utils
| 114 |
2024-08-09T15:23:18.431770
|
Apache-2.0
| false |
c846f2af968db14a85d34229e78913fc
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\symfont.cpython-313.pyc
|
symfont.cpython-313.pyc
|
Other
| 13,010 | 0.95 | 0.06701 | 0.016575 |
vue-tools
| 269 |
2024-08-30T12:41:57.185776
|
Apache-2.0
| false |
6c9e13b29a59b94c469c0a0b0eeccdbc
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\testTools.cpython-313.pyc
|
testTools.cpython-313.pyc
|
Other
| 13,339 | 0.95 | 0.021978 | 0 |
node-utils
| 250 |
2025-06-17T14:40:28.372650
|
GPL-3.0
| true |
d53d88b970fdadcf8d5fdf6d1141136f
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\textTools.cpython-313.pyc
|
textTools.cpython-313.pyc
|
Other
| 7,010 | 0.8 | 0 | 0 |
node-utils
| 561 |
2023-12-20T08:26:26.214300
|
GPL-3.0
| false |
3e42fefb00c7f97735f47732ddf1a6f8
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\timeTools.cpython-313.pyc
|
timeTools.cpython-313.pyc
|
Other
| 3,636 | 0.8 | 0.019231 | 0.022727 |
python-kit
| 487 |
2023-10-13T08:17:54.196336
|
MIT
| false |
d11b0d7acd24323831c577efea4e5bdb
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\transform.cpython-313.pyc
|
transform.cpython-313.pyc
|
Other
| 19,135 | 0.95 | 0.024876 | 0.002849 |
vue-tools
| 167 |
2023-09-11T14:18:21.565444
|
Apache-2.0
| false |
9c3ad719266150f6f2d159baa7fd369d
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\treeTools.cpython-313.pyc
|
treeTools.cpython-313.pyc
|
Other
| 1,614 | 0.8 | 0.083333 | 0 |
node-utils
| 414 |
2023-07-31T10:27:40.031161
|
GPL-3.0
| false |
73df73a931ebe4da9dee81d1fdbb0389
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\vector.cpython-313.pyc
|
vector.cpython-313.pyc
|
Other
| 9,107 | 0.8 | 0.02 | 0 |
react-lib
| 357 |
2025-03-04T03:44:39.789108
|
BSD-3-Clause
| false |
59de5da1403ea58ade7994d0673c1100
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\visitor.cpython-313.pyc
|
visitor.cpython-313.pyc
|
Other
| 7,314 | 0.95 | 0.175676 | 0.014493 |
node-utils
| 711 |
2025-05-10T13:54:29.682322
|
MIT
| false |
fac7e748bf42003b89c94d055393cfbd
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\xmlReader.cpython-313.pyc
|
xmlReader.cpython-313.pyc
|
Other
| 9,209 | 0.8 | 0 | 0 |
awesome-app
| 690 |
2023-09-12T06:37:45.960388
|
MIT
| false |
80d80cce3a24d80711ce77b6e76d453c
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\xmlWriter.cpython-313.pyc
|
xmlWriter.cpython-313.pyc
|
Other
| 10,060 | 0.8 | 0.014286 | 0 |
awesome-app
| 730 |
2024-10-27T09:14:05.171703
|
GPL-3.0
| false |
7a6ca36c5853c9ce54b81d802cc3bc13
|
\n\n
|
.venv\Lib\site-packages\fontTools\misc\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 272 | 0.7 | 0 | 0 |
awesome-app
| 294 |
2024-03-17T17:39:42.747752
|
GPL-3.0
| false |
1d19d59ccc125c7669eb7f1465f9cd83
|
# FontDame-to-FontTools for OpenType Layout tables\n#\n# Source language spec is available at:\n# http://monotype.github.io/OpenType_Table_Source/otl_source.html\n# https://github.com/Monotype/OpenType_Table_Source/\n\nfrom fontTools import ttLib\nfrom fontTools.ttLib.tables._c_m_a_p import cmap_classes\nfrom fontTools.ttLib.tables import otTables as ot\nfrom fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict\nfrom fontTools.otlLib import builder as otl\nfrom contextlib import contextmanager\nfrom fontTools.ttLib import newTable\nfrom fontTools.feaLib.lookupDebugInfo import LOOKUP_DEBUG_ENV_VAR, LOOKUP_DEBUG_INFO_KEY\nfrom operator import setitem\nimport os\nimport logging\n\n\nclass MtiLibError(Exception):\n pass\n\n\nclass ReferenceNotFoundError(MtiLibError):\n pass\n\n\nclass FeatureNotFoundError(ReferenceNotFoundError):\n pass\n\n\nclass LookupNotFoundError(ReferenceNotFoundError):\n pass\n\n\nlog = logging.getLogger("fontTools.mtiLib")\n\n\ndef makeGlyph(s):\n if s[:2] in ["U ", "u "]:\n return ttLib.TTFont._makeGlyphName(int(s[2:], 16))\n elif s[:2] == "# ":\n return "glyph%.5d" % int(s[2:])\n assert s.find(" ") < 0, "Space found in glyph name: %s" % s\n assert s, "Glyph name is empty"\n return s\n\n\ndef makeGlyphs(l):\n return [makeGlyph(g) for g in l]\n\n\ndef mapLookup(sym, mapping):\n # Lookups are addressed by name. So resolved them using a map if available.\n # Fallback to parsing as lookup index if a map isn't provided.\n if mapping is not None:\n try:\n idx = mapping[sym]\n except KeyError:\n raise LookupNotFoundError(sym)\n else:\n idx = int(sym)\n return idx\n\n\ndef mapFeature(sym, mapping):\n # Features are referenced by index according the spec. So, if symbol is an\n # integer, use it directly. Otherwise look up in the map if provided.\n try:\n idx = int(sym)\n except ValueError:\n try:\n idx = mapping[sym]\n except KeyError:\n raise FeatureNotFoundError(sym)\n return idx\n\n\ndef setReference(mapper, mapping, sym, setter, collection, key):\n try:\n mapped = mapper(sym, mapping)\n except ReferenceNotFoundError as e:\n try:\n if mapping is not None:\n mapping.addDeferredMapping(\n lambda ref: setter(collection, key, ref), sym, e\n )\n return\n except AttributeError:\n pass\n raise\n setter(collection, key, mapped)\n\n\nclass DeferredMapping(dict):\n def __init__(self):\n self._deferredMappings = []\n\n def addDeferredMapping(self, setter, sym, e):\n log.debug("Adding deferred mapping for symbol '%s' %s", sym, type(e).__name__)\n self._deferredMappings.append((setter, sym, e))\n\n def applyDeferredMappings(self):\n for setter, sym, e in self._deferredMappings:\n log.debug(\n "Applying deferred mapping for symbol '%s' %s", sym, type(e).__name__\n )\n try:\n mapped = self[sym]\n except KeyError:\n raise e\n setter(mapped)\n log.debug("Set to %s", mapped)\n self._deferredMappings = []\n\n\ndef parseScriptList(lines, featureMap=None):\n self = ot.ScriptList()\n records = []\n with lines.between("script table"):\n for line in lines:\n while len(line) < 4:\n line.append("")\n scriptTag, langSysTag, defaultFeature, features = line\n log.debug("Adding script %s language-system %s", scriptTag, langSysTag)\n\n langSys = ot.LangSys()\n langSys.LookupOrder = None\n if defaultFeature:\n setReference(\n mapFeature,\n featureMap,\n defaultFeature,\n setattr,\n langSys,\n "ReqFeatureIndex",\n )\n else:\n langSys.ReqFeatureIndex = 0xFFFF\n syms = stripSplitComma(features)\n langSys.FeatureIndex = theList = [3] * len(syms)\n for i, sym in enumerate(syms):\n setReference(mapFeature, featureMap, sym, setitem, theList, i)\n langSys.FeatureCount = len(langSys.FeatureIndex)\n\n script = [s for s in records if s.ScriptTag == scriptTag]\n if script:\n script = script[0].Script\n else:\n scriptRec = ot.ScriptRecord()\n scriptRec.ScriptTag = scriptTag + " " * (4 - len(scriptTag))\n scriptRec.Script = ot.Script()\n records.append(scriptRec)\n script = scriptRec.Script\n script.DefaultLangSys = None\n script.LangSysRecord = []\n script.LangSysCount = 0\n\n if langSysTag == "default":\n script.DefaultLangSys = langSys\n else:\n langSysRec = ot.LangSysRecord()\n langSysRec.LangSysTag = langSysTag + " " * (4 - len(langSysTag))\n langSysRec.LangSys = langSys\n script.LangSysRecord.append(langSysRec)\n script.LangSysCount = len(script.LangSysRecord)\n\n for script in records:\n script.Script.LangSysRecord = sorted(\n script.Script.LangSysRecord, key=lambda rec: rec.LangSysTag\n )\n self.ScriptRecord = sorted(records, key=lambda rec: rec.ScriptTag)\n self.ScriptCount = len(self.ScriptRecord)\n return self\n\n\ndef parseFeatureList(lines, lookupMap=None, featureMap=None):\n self = ot.FeatureList()\n self.FeatureRecord = []\n with lines.between("feature table"):\n for line in lines:\n name, featureTag, lookups = line\n if featureMap is not None:\n assert name not in featureMap, "Duplicate feature name: %s" % name\n featureMap[name] = len(self.FeatureRecord)\n # If feature name is integer, make sure it matches its index.\n try:\n assert int(name) == len(self.FeatureRecord), "%d %d" % (\n name,\n len(self.FeatureRecord),\n )\n except ValueError:\n pass\n featureRec = ot.FeatureRecord()\n featureRec.FeatureTag = featureTag\n featureRec.Feature = ot.Feature()\n self.FeatureRecord.append(featureRec)\n feature = featureRec.Feature\n feature.FeatureParams = None\n syms = stripSplitComma(lookups)\n feature.LookupListIndex = theList = [None] * len(syms)\n for i, sym in enumerate(syms):\n setReference(mapLookup, lookupMap, sym, setitem, theList, i)\n feature.LookupCount = len(feature.LookupListIndex)\n\n self.FeatureCount = len(self.FeatureRecord)\n return self\n\n\ndef parseLookupFlags(lines):\n flags = 0\n filterset = None\n allFlags = [\n "righttoleft",\n "ignorebaseglyphs",\n "ignoreligatures",\n "ignoremarks",\n "markattachmenttype",\n "markfiltertype",\n ]\n while lines.peeks()[0].lower() in allFlags:\n line = next(lines)\n flag = {\n "righttoleft": 0x0001,\n "ignorebaseglyphs": 0x0002,\n "ignoreligatures": 0x0004,\n "ignoremarks": 0x0008,\n }.get(line[0].lower())\n if flag:\n assert line[1].lower() in ["yes", "no"], line[1]\n if line[1].lower() == "yes":\n flags |= flag\n continue\n if line[0].lower() == "markattachmenttype":\n flags |= int(line[1]) << 8\n continue\n if line[0].lower() == "markfiltertype":\n flags |= 0x10\n filterset = int(line[1])\n return flags, filterset\n\n\ndef parseSingleSubst(lines, font, _lookupMap=None):\n mapping = {}\n for line in lines:\n assert len(line) == 2, line\n line = makeGlyphs(line)\n mapping[line[0]] = line[1]\n return otl.buildSingleSubstSubtable(mapping)\n\n\ndef parseMultiple(lines, font, _lookupMap=None):\n mapping = {}\n for line in lines:\n line = makeGlyphs(line)\n mapping[line[0]] = line[1:]\n return otl.buildMultipleSubstSubtable(mapping)\n\n\ndef parseAlternate(lines, font, _lookupMap=None):\n mapping = {}\n for line in lines:\n line = makeGlyphs(line)\n mapping[line[0]] = line[1:]\n return otl.buildAlternateSubstSubtable(mapping)\n\n\ndef parseLigature(lines, font, _lookupMap=None):\n mapping = {}\n for line in lines:\n assert len(line) >= 2, line\n line = makeGlyphs(line)\n mapping[tuple(line[1:])] = line[0]\n return otl.buildLigatureSubstSubtable(mapping)\n\n\ndef parseSinglePos(lines, font, _lookupMap=None):\n values = {}\n for line in lines:\n assert len(line) == 3, line\n w = line[0].title().replace(" ", "")\n assert w in valueRecordFormatDict\n g = makeGlyph(line[1])\n v = int(line[2])\n if g not in values:\n values[g] = ValueRecord()\n assert not hasattr(values[g], w), (g, w)\n setattr(values[g], w, v)\n return otl.buildSinglePosSubtable(values, font.getReverseGlyphMap())\n\n\ndef parsePair(lines, font, _lookupMap=None):\n self = ot.PairPos()\n self.ValueFormat1 = self.ValueFormat2 = 0\n typ = lines.peeks()[0].split()[0].lower()\n if typ in ("left", "right"):\n self.Format = 1\n values = {}\n for line in lines:\n assert len(line) == 4, line\n side = line[0].split()[0].lower()\n assert side in ("left", "right"), side\n what = line[0][len(side) :].title().replace(" ", "")\n mask = valueRecordFormatDict[what][0]\n glyph1, glyph2 = makeGlyphs(line[1:3])\n value = int(line[3])\n if not glyph1 in values:\n values[glyph1] = {}\n if not glyph2 in values[glyph1]:\n values[glyph1][glyph2] = (ValueRecord(), ValueRecord())\n rec2 = values[glyph1][glyph2]\n if side == "left":\n self.ValueFormat1 |= mask\n vr = rec2[0]\n else:\n self.ValueFormat2 |= mask\n vr = rec2[1]\n assert not hasattr(vr, what), (vr, what)\n setattr(vr, what, value)\n self.Coverage = makeCoverage(set(values.keys()), font)\n self.PairSet = []\n for glyph1 in self.Coverage.glyphs:\n values1 = values[glyph1]\n pairset = ot.PairSet()\n records = pairset.PairValueRecord = []\n for glyph2 in sorted(values1.keys(), key=font.getGlyphID):\n values2 = values1[glyph2]\n pair = ot.PairValueRecord()\n pair.SecondGlyph = glyph2\n pair.Value1 = values2[0]\n pair.Value2 = values2[1] if self.ValueFormat2 else None\n records.append(pair)\n pairset.PairValueCount = len(pairset.PairValueRecord)\n self.PairSet.append(pairset)\n self.PairSetCount = len(self.PairSet)\n elif typ.endswith("class"):\n self.Format = 2\n classDefs = [None, None]\n while lines.peeks()[0].endswith("class definition begin"):\n typ = lines.peek()[0][: -len("class definition begin")].lower()\n idx, klass = {\n "first": (0, ot.ClassDef1),\n "second": (1, ot.ClassDef2),\n }[typ]\n assert classDefs[idx] is None\n classDefs[idx] = parseClassDef(lines, font, klass=klass)\n self.ClassDef1, self.ClassDef2 = classDefs\n self.Class1Count, self.Class2Count = (\n 1 + max(c.classDefs.values()) for c in classDefs\n )\n self.Class1Record = [ot.Class1Record() for i in range(self.Class1Count)]\n for rec1 in self.Class1Record:\n rec1.Class2Record = [ot.Class2Record() for j in range(self.Class2Count)]\n for rec2 in rec1.Class2Record:\n rec2.Value1 = ValueRecord()\n rec2.Value2 = ValueRecord()\n for line in lines:\n assert len(line) == 4, line\n side = line[0].split()[0].lower()\n assert side in ("left", "right"), side\n what = line[0][len(side) :].title().replace(" ", "")\n mask = valueRecordFormatDict[what][0]\n class1, class2, value = (int(x) for x in line[1:4])\n rec2 = self.Class1Record[class1].Class2Record[class2]\n if side == "left":\n self.ValueFormat1 |= mask\n vr = rec2.Value1\n else:\n self.ValueFormat2 |= mask\n vr = rec2.Value2\n assert not hasattr(vr, what), (vr, what)\n setattr(vr, what, value)\n for rec1 in self.Class1Record:\n for rec2 in rec1.Class2Record:\n rec2.Value1 = ValueRecord(self.ValueFormat1, rec2.Value1)\n rec2.Value2 = (\n ValueRecord(self.ValueFormat2, rec2.Value2)\n if self.ValueFormat2\n else None\n )\n\n self.Coverage = makeCoverage(set(self.ClassDef1.classDefs.keys()), font)\n else:\n assert 0, typ\n return self\n\n\ndef parseKernset(lines, font, _lookupMap=None):\n typ = lines.peeks()[0].split()[0].lower()\n if typ in ("left", "right"):\n with lines.until(\n ("firstclass definition begin", "secondclass definition begin")\n ):\n return parsePair(lines, font)\n return parsePair(lines, font)\n\n\ndef makeAnchor(data, klass=ot.Anchor):\n assert len(data) <= 2\n anchor = klass()\n anchor.Format = 1\n anchor.XCoordinate, anchor.YCoordinate = intSplitComma(data[0])\n if len(data) > 1 and data[1] != "":\n anchor.Format = 2\n anchor.AnchorPoint = int(data[1])\n return anchor\n\n\ndef parseCursive(lines, font, _lookupMap=None):\n records = {}\n for line in lines:\n assert len(line) in [3, 4], line\n idx, klass = {\n "entry": (0, ot.EntryAnchor),\n "exit": (1, ot.ExitAnchor),\n }[line[0]]\n glyph = makeGlyph(line[1])\n if glyph not in records:\n records[glyph] = [None, None]\n assert records[glyph][idx] is None, (glyph, idx)\n records[glyph][idx] = makeAnchor(line[2:], klass)\n return otl.buildCursivePosSubtable(records, font.getReverseGlyphMap())\n\n\ndef makeMarkRecords(data, coverage, c):\n records = []\n for glyph in coverage.glyphs:\n klass, anchor = data[glyph]\n record = c.MarkRecordClass()\n record.Class = klass\n setattr(record, c.MarkAnchor, anchor)\n records.append(record)\n return records\n\n\ndef makeBaseRecords(data, coverage, c, classCount):\n records = []\n idx = {}\n for glyph in coverage.glyphs:\n idx[glyph] = len(records)\n record = c.BaseRecordClass()\n anchors = [None] * classCount\n setattr(record, c.BaseAnchor, anchors)\n records.append(record)\n for (glyph, klass), anchor in data.items():\n record = records[idx[glyph]]\n anchors = getattr(record, c.BaseAnchor)\n assert anchors[klass] is None, (glyph, klass)\n anchors[klass] = anchor\n return records\n\n\ndef makeLigatureRecords(data, coverage, c, classCount):\n records = [None] * len(coverage.glyphs)\n idx = {g: i for i, g in enumerate(coverage.glyphs)}\n\n for (glyph, klass, compIdx, compCount), anchor in data.items():\n record = records[idx[glyph]]\n if record is None:\n record = records[idx[glyph]] = ot.LigatureAttach()\n record.ComponentCount = compCount\n record.ComponentRecord = [ot.ComponentRecord() for i in range(compCount)]\n for compRec in record.ComponentRecord:\n compRec.LigatureAnchor = [None] * classCount\n assert record.ComponentCount == compCount, (\n glyph,\n record.ComponentCount,\n compCount,\n )\n\n anchors = record.ComponentRecord[compIdx - 1].LigatureAnchor\n assert anchors[klass] is None, (glyph, compIdx, klass)\n anchors[klass] = anchor\n return records\n\n\ndef parseMarkToSomething(lines, font, c):\n self = c.Type()\n self.Format = 1\n markData = {}\n baseData = {}\n Data = {\n "mark": (markData, c.MarkAnchorClass),\n "base": (baseData, c.BaseAnchorClass),\n "ligature": (baseData, c.BaseAnchorClass),\n }\n maxKlass = 0\n for line in lines:\n typ = line[0]\n assert typ in ("mark", "base", "ligature")\n glyph = makeGlyph(line[1])\n data, anchorClass = Data[typ]\n extraItems = 2 if typ == "ligature" else 0\n extras = tuple(int(i) for i in line[2 : 2 + extraItems])\n klass = int(line[2 + extraItems])\n anchor = makeAnchor(line[3 + extraItems :], anchorClass)\n if typ == "mark":\n key, value = glyph, (klass, anchor)\n else:\n key, value = ((glyph, klass) + extras), anchor\n assert key not in data, key\n data[key] = value\n maxKlass = max(maxKlass, klass)\n\n # Mark\n markCoverage = makeCoverage(set(markData.keys()), font, c.MarkCoverageClass)\n markArray = c.MarkArrayClass()\n markRecords = makeMarkRecords(markData, markCoverage, c)\n setattr(markArray, c.MarkRecord, markRecords)\n setattr(markArray, c.MarkCount, len(markRecords))\n setattr(self, c.MarkCoverage, markCoverage)\n setattr(self, c.MarkArray, markArray)\n self.ClassCount = maxKlass + 1\n\n # Base\n self.classCount = 0 if not baseData else 1 + max(k[1] for k, v in baseData.items())\n baseCoverage = makeCoverage(\n set([k[0] for k in baseData.keys()]), font, c.BaseCoverageClass\n )\n baseArray = c.BaseArrayClass()\n if c.Base == "Ligature":\n baseRecords = makeLigatureRecords(baseData, baseCoverage, c, self.classCount)\n else:\n baseRecords = makeBaseRecords(baseData, baseCoverage, c, self.classCount)\n setattr(baseArray, c.BaseRecord, baseRecords)\n setattr(baseArray, c.BaseCount, len(baseRecords))\n setattr(self, c.BaseCoverage, baseCoverage)\n setattr(self, c.BaseArray, baseArray)\n\n return self\n\n\nclass MarkHelper(object):\n def __init__(self):\n for Which in ("Mark", "Base"):\n for What in ("Coverage", "Array", "Count", "Record", "Anchor"):\n key = Which + What\n if Which == "Mark" and What in ("Count", "Record", "Anchor"):\n value = key\n else:\n value = getattr(self, Which) + What\n if value == "LigatureRecord":\n value = "LigatureAttach"\n setattr(self, key, value)\n if What != "Count":\n klass = getattr(ot, value)\n setattr(self, key + "Class", klass)\n\n\nclass MarkToBaseHelper(MarkHelper):\n Mark = "Mark"\n Base = "Base"\n Type = ot.MarkBasePos\n\n\nclass MarkToMarkHelper(MarkHelper):\n Mark = "Mark1"\n Base = "Mark2"\n Type = ot.MarkMarkPos\n\n\nclass MarkToLigatureHelper(MarkHelper):\n Mark = "Mark"\n Base = "Ligature"\n Type = ot.MarkLigPos\n\n\ndef parseMarkToBase(lines, font, _lookupMap=None):\n return parseMarkToSomething(lines, font, MarkToBaseHelper())\n\n\ndef parseMarkToMark(lines, font, _lookupMap=None):\n return parseMarkToSomething(lines, font, MarkToMarkHelper())\n\n\ndef parseMarkToLigature(lines, font, _lookupMap=None):\n return parseMarkToSomething(lines, font, MarkToLigatureHelper())\n\n\ndef stripSplitComma(line):\n return [s.strip() for s in line.split(",")] if line else []\n\n\ndef intSplitComma(line):\n return [int(i) for i in line.split(",")] if line else []\n\n\n# Copied from fontTools.subset\nclass ContextHelper(object):\n def __init__(self, klassName, Format):\n if klassName.endswith("Subst"):\n Typ = "Sub"\n Type = "Subst"\n else:\n Typ = "Pos"\n Type = "Pos"\n if klassName.startswith("Chain"):\n Chain = "Chain"\n InputIdx = 1\n DataLen = 3\n else:\n Chain = ""\n InputIdx = 0\n DataLen = 1\n ChainTyp = Chain + Typ\n\n self.Typ = Typ\n self.Type = Type\n self.Chain = Chain\n self.ChainTyp = ChainTyp\n self.InputIdx = InputIdx\n self.DataLen = DataLen\n\n self.LookupRecord = Type + "LookupRecord"\n\n if Format == 1:\n Coverage = lambda r: r.Coverage\n ChainCoverage = lambda r: r.Coverage\n ContextData = lambda r: (None,)\n ChainContextData = lambda r: (None, None, None)\n SetContextData = None\n SetChainContextData = None\n RuleData = lambda r: (r.Input,)\n ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead)\n\n def SetRuleData(r, d):\n (r.Input,) = d\n (r.GlyphCount,) = (len(x) + 1 for x in d)\n\n def ChainSetRuleData(r, d):\n (r.Backtrack, r.Input, r.LookAhead) = d\n (\n r.BacktrackGlyphCount,\n r.InputGlyphCount,\n r.LookAheadGlyphCount,\n ) = (len(d[0]), len(d[1]) + 1, len(d[2]))\n\n elif Format == 2:\n Coverage = lambda r: r.Coverage\n ChainCoverage = lambda r: r.Coverage\n ContextData = lambda r: (r.ClassDef,)\n ChainContextData = lambda r: (\n r.BacktrackClassDef,\n r.InputClassDef,\n r.LookAheadClassDef,\n )\n\n def SetContextData(r, d):\n (r.ClassDef,) = d\n\n def SetChainContextData(r, d):\n (r.BacktrackClassDef, r.InputClassDef, r.LookAheadClassDef) = d\n\n RuleData = lambda r: (r.Class,)\n ChainRuleData = lambda r: (r.Backtrack, r.Input, r.LookAhead)\n\n def SetRuleData(r, d):\n (r.Class,) = d\n (r.GlyphCount,) = (len(x) + 1 for x in d)\n\n def ChainSetRuleData(r, d):\n (r.Backtrack, r.Input, r.LookAhead) = d\n (\n r.BacktrackGlyphCount,\n r.InputGlyphCount,\n r.LookAheadGlyphCount,\n ) = (len(d[0]), len(d[1]) + 1, len(d[2]))\n\n elif Format == 3:\n Coverage = lambda r: r.Coverage[0]\n ChainCoverage = lambda r: r.InputCoverage[0]\n ContextData = None\n ChainContextData = None\n SetContextData = None\n SetChainContextData = None\n RuleData = lambda r: r.Coverage\n ChainRuleData = lambda r: (\n r.BacktrackCoverage + r.InputCoverage + r.LookAheadCoverage\n )\n\n def SetRuleData(r, d):\n (r.Coverage,) = d\n (r.GlyphCount,) = (len(x) for x in d)\n\n def ChainSetRuleData(r, d):\n (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d\n (\n r.BacktrackGlyphCount,\n r.InputGlyphCount,\n r.LookAheadGlyphCount,\n ) = (len(x) for x in d)\n\n else:\n assert 0, "unknown format: %s" % Format\n\n if Chain:\n self.Coverage = ChainCoverage\n self.ContextData = ChainContextData\n self.SetContextData = SetChainContextData\n self.RuleData = ChainRuleData\n self.SetRuleData = ChainSetRuleData\n else:\n self.Coverage = Coverage\n self.ContextData = ContextData\n self.SetContextData = SetContextData\n self.RuleData = RuleData\n self.SetRuleData = SetRuleData\n\n if Format == 1:\n self.Rule = ChainTyp + "Rule"\n self.RuleCount = ChainTyp + "RuleCount"\n self.RuleSet = ChainTyp + "RuleSet"\n self.RuleSetCount = ChainTyp + "RuleSetCount"\n self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []\n elif Format == 2:\n self.Rule = ChainTyp + "ClassRule"\n self.RuleCount = ChainTyp + "ClassRuleCount"\n self.RuleSet = ChainTyp + "ClassSet"\n self.RuleSetCount = ChainTyp + "ClassSetCount"\n self.Intersect = lambda glyphs, c, r: (\n c.intersect_class(glyphs, r)\n if c\n else (set(glyphs) if r == 0 else set())\n )\n\n self.ClassDef = "InputClassDef" if Chain else "ClassDef"\n self.ClassDefIndex = 1 if Chain else 0\n self.Input = "Input" if Chain else "Class"\n\n\ndef parseLookupRecords(items, klassName, lookupMap=None):\n klass = getattr(ot, klassName)\n lst = []\n for item in items:\n rec = klass()\n item = stripSplitComma(item)\n assert len(item) == 2, item\n idx = int(item[0])\n assert idx > 0, idx\n rec.SequenceIndex = idx - 1\n setReference(mapLookup, lookupMap, item[1], setattr, rec, "LookupListIndex")\n lst.append(rec)\n return lst\n\n\ndef makeClassDef(classDefs, font, klass=ot.Coverage):\n if not classDefs:\n return None\n self = klass()\n self.classDefs = dict(classDefs)\n return self\n\n\ndef parseClassDef(lines, font, klass=ot.ClassDef):\n classDefs = {}\n with lines.between("class definition"):\n for line in lines:\n glyph = makeGlyph(line[0])\n assert glyph not in classDefs, glyph\n classDefs[glyph] = int(line[1])\n return makeClassDef(classDefs, font, klass)\n\n\ndef makeCoverage(glyphs, font, klass=ot.Coverage):\n if not glyphs:\n return None\n if isinstance(glyphs, set):\n glyphs = sorted(glyphs)\n coverage = klass()\n coverage.glyphs = sorted(set(glyphs), key=font.getGlyphID)\n return coverage\n\n\ndef parseCoverage(lines, font, klass=ot.Coverage):\n glyphs = []\n with lines.between("coverage definition"):\n for line in lines:\n glyphs.append(makeGlyph(line[0]))\n return makeCoverage(glyphs, font, klass)\n\n\ndef bucketizeRules(self, c, rules, bucketKeys):\n buckets = {}\n for seq, recs in rules:\n buckets.setdefault(seq[c.InputIdx][0], []).append(\n (tuple(s[1 if i == c.InputIdx else 0 :] for i, s in enumerate(seq)), recs)\n )\n\n rulesets = []\n for firstGlyph in bucketKeys:\n if firstGlyph not in buckets:\n rulesets.append(None)\n continue\n thisRules = []\n for seq, recs in buckets[firstGlyph]:\n rule = getattr(ot, c.Rule)()\n c.SetRuleData(rule, seq)\n setattr(rule, c.Type + "Count", len(recs))\n setattr(rule, c.LookupRecord, recs)\n thisRules.append(rule)\n\n ruleset = getattr(ot, c.RuleSet)()\n setattr(ruleset, c.Rule, thisRules)\n setattr(ruleset, c.RuleCount, len(thisRules))\n rulesets.append(ruleset)\n\n setattr(self, c.RuleSet, rulesets)\n setattr(self, c.RuleSetCount, len(rulesets))\n\n\ndef parseContext(lines, font, Type, lookupMap=None):\n self = getattr(ot, Type)()\n typ = lines.peeks()[0].split()[0].lower()\n if typ == "glyph":\n self.Format = 1\n log.debug("Parsing %s format %s", Type, self.Format)\n c = ContextHelper(Type, self.Format)\n rules = []\n for line in lines:\n assert line[0].lower() == "glyph", line[0]\n while len(line) < 1 + c.DataLen:\n line.append("")\n seq = tuple(makeGlyphs(stripSplitComma(i)) for i in line[1 : 1 + c.DataLen])\n recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap)\n rules.append((seq, recs))\n\n firstGlyphs = set(seq[c.InputIdx][0] for seq, recs in rules)\n self.Coverage = makeCoverage(firstGlyphs, font)\n bucketizeRules(self, c, rules, self.Coverage.glyphs)\n elif typ.endswith("class"):\n self.Format = 2\n log.debug("Parsing %s format %s", Type, self.Format)\n c = ContextHelper(Type, self.Format)\n classDefs = [None] * c.DataLen\n while lines.peeks()[0].endswith("class definition begin"):\n typ = lines.peek()[0][: -len("class definition begin")].lower()\n idx, klass = {\n 1: {\n "": (0, ot.ClassDef),\n },\n 3: {\n "backtrack": (0, ot.BacktrackClassDef),\n "": (1, ot.InputClassDef),\n "lookahead": (2, ot.LookAheadClassDef),\n },\n }[c.DataLen][typ]\n assert classDefs[idx] is None, idx\n classDefs[idx] = parseClassDef(lines, font, klass=klass)\n c.SetContextData(self, classDefs)\n rules = []\n for line in lines:\n assert line[0].lower().startswith("class"), line[0]\n while len(line) < 1 + c.DataLen:\n line.append("")\n seq = tuple(intSplitComma(i) for i in line[1 : 1 + c.DataLen])\n recs = parseLookupRecords(line[1 + c.DataLen :], c.LookupRecord, lookupMap)\n rules.append((seq, recs))\n firstClasses = set(seq[c.InputIdx][0] for seq, recs in rules)\n firstGlyphs = set(\n g for g, c in classDefs[c.InputIdx].classDefs.items() if c in firstClasses\n )\n self.Coverage = makeCoverage(firstGlyphs, font)\n bucketizeRules(self, c, rules, range(max(firstClasses) + 1))\n elif typ.endswith("coverage"):\n self.Format = 3\n log.debug("Parsing %s format %s", Type, self.Format)\n c = ContextHelper(Type, self.Format)\n coverages = tuple([] for i in range(c.DataLen))\n while lines.peeks()[0].endswith("coverage definition begin"):\n typ = lines.peek()[0][: -len("coverage definition begin")].lower()\n idx, klass = {\n 1: {\n "": (0, ot.Coverage),\n },\n 3: {\n "backtrack": (0, ot.BacktrackCoverage),\n "input": (1, ot.InputCoverage),\n "lookahead": (2, ot.LookAheadCoverage),\n },\n }[c.DataLen][typ]\n coverages[idx].append(parseCoverage(lines, font, klass=klass))\n c.SetRuleData(self, coverages)\n lines = list(lines)\n assert len(lines) == 1\n line = lines[0]\n assert line[0].lower() == "coverage", line[0]\n recs = parseLookupRecords(line[1:], c.LookupRecord, lookupMap)\n setattr(self, c.Type + "Count", len(recs))\n setattr(self, c.LookupRecord, recs)\n else:\n assert 0, typ\n return self\n\n\ndef parseContextSubst(lines, font, lookupMap=None):\n return parseContext(lines, font, "ContextSubst", lookupMap=lookupMap)\n\n\ndef parseContextPos(lines, font, lookupMap=None):\n return parseContext(lines, font, "ContextPos", lookupMap=lookupMap)\n\n\ndef parseChainedSubst(lines, font, lookupMap=None):\n return parseContext(lines, font, "ChainContextSubst", lookupMap=lookupMap)\n\n\ndef parseChainedPos(lines, font, lookupMap=None):\n return parseContext(lines, font, "ChainContextPos", lookupMap=lookupMap)\n\n\ndef parseReverseChainedSubst(lines, font, _lookupMap=None):\n self = ot.ReverseChainSingleSubst()\n self.Format = 1\n coverages = ([], [])\n while lines.peeks()[0].endswith("coverage definition begin"):\n typ = lines.peek()[0][: -len("coverage definition begin")].lower()\n idx, klass = {\n "backtrack": (0, ot.BacktrackCoverage),\n "lookahead": (1, ot.LookAheadCoverage),\n }[typ]\n coverages[idx].append(parseCoverage(lines, font, klass=klass))\n self.BacktrackCoverage = coverages[0]\n self.BacktrackGlyphCount = len(self.BacktrackCoverage)\n self.LookAheadCoverage = coverages[1]\n self.LookAheadGlyphCount = len(self.LookAheadCoverage)\n mapping = {}\n for line in lines:\n assert len(line) == 2, line\n line = makeGlyphs(line)\n mapping[line[0]] = line[1]\n self.Coverage = makeCoverage(set(mapping.keys()), font)\n self.Substitute = [mapping[k] for k in self.Coverage.glyphs]\n self.GlyphCount = len(self.Substitute)\n return self\n\n\ndef parseLookup(lines, tableTag, font, lookupMap=None):\n line = lines.expect("lookup")\n _, name, typ = line\n log.debug("Parsing lookup type %s %s", typ, name)\n lookup = ot.Lookup()\n lookup.LookupFlag, filterset = parseLookupFlags(lines)\n if filterset is not None:\n lookup.MarkFilteringSet = filterset\n lookup.LookupType, parseLookupSubTable = {\n "GSUB": {\n "single": (1, parseSingleSubst),\n "multiple": (2, parseMultiple),\n "alternate": (3, parseAlternate),\n "ligature": (4, parseLigature),\n "context": (5, parseContextSubst),\n "chained": (6, parseChainedSubst),\n "reversechained": (8, parseReverseChainedSubst),\n },\n "GPOS": {\n "single": (1, parseSinglePos),\n "pair": (2, parsePair),\n "kernset": (2, parseKernset),\n "cursive": (3, parseCursive),\n "mark to base": (4, parseMarkToBase),\n "mark to ligature": (5, parseMarkToLigature),\n "mark to mark": (6, parseMarkToMark),\n "context": (7, parseContextPos),\n "chained": (8, parseChainedPos),\n },\n }[tableTag][typ]\n\n with lines.until("lookup end"):\n subtables = []\n\n while lines.peek():\n with lines.until(("% subtable", "subtable end")):\n while lines.peek():\n subtable = parseLookupSubTable(lines, font, lookupMap)\n assert lookup.LookupType == subtable.LookupType\n subtables.append(subtable)\n if lines.peeks()[0] in ("% subtable", "subtable end"):\n next(lines)\n lines.expect("lookup end")\n\n lookup.SubTable = subtables\n lookup.SubTableCount = len(lookup.SubTable)\n if lookup.SubTableCount == 0:\n # Remove this return when following is fixed:\n # https://github.com/fonttools/fonttools/issues/789\n return None\n return lookup\n\n\ndef parseGSUBGPOS(lines, font, tableTag):\n container = ttLib.getTableClass(tableTag)()\n lookupMap = DeferredMapping()\n featureMap = DeferredMapping()\n assert tableTag in ("GSUB", "GPOS")\n log.debug("Parsing %s", tableTag)\n self = getattr(ot, tableTag)()\n self.Version = 0x00010000\n fields = {\n "script table begin": (\n "ScriptList",\n lambda lines: parseScriptList(lines, featureMap),\n ),\n "feature table begin": (\n "FeatureList",\n lambda lines: parseFeatureList(lines, lookupMap, featureMap),\n ),\n "lookup": ("LookupList", None),\n }\n for attr, parser in fields.values():\n setattr(self, attr, None)\n while lines.peek() is not None:\n typ = lines.peek()[0].lower()\n if typ not in fields:\n log.debug("Skipping %s", lines.peek())\n next(lines)\n continue\n attr, parser = fields[typ]\n if typ == "lookup":\n if self.LookupList is None:\n self.LookupList = ot.LookupList()\n self.LookupList.Lookup = []\n _, name, _ = lines.peek()\n lookup = parseLookup(lines, tableTag, font, lookupMap)\n if lookupMap is not None:\n assert name not in lookupMap, "Duplicate lookup name: %s" % name\n lookupMap[name] = len(self.LookupList.Lookup)\n else:\n assert int(name) == len(self.LookupList.Lookup), "%d %d" % (\n name,\n len(self.Lookup),\n )\n self.LookupList.Lookup.append(lookup)\n else:\n assert getattr(self, attr) is None, attr\n setattr(self, attr, parser(lines))\n if self.LookupList:\n self.LookupList.LookupCount = len(self.LookupList.Lookup)\n if lookupMap is not None:\n lookupMap.applyDeferredMappings()\n if os.environ.get(LOOKUP_DEBUG_ENV_VAR):\n if "Debg" not in font:\n font["Debg"] = newTable("Debg")\n font["Debg"].data = {}\n debug = (\n font["Debg"]\n .data.setdefault(LOOKUP_DEBUG_INFO_KEY, {})\n .setdefault(tableTag, {})\n )\n for name, lookup in lookupMap.items():\n debug[str(lookup)] = ["", name, ""]\n\n featureMap.applyDeferredMappings()\n container.table = self\n return container\n\n\ndef parseGSUB(lines, font):\n return parseGSUBGPOS(lines, font, "GSUB")\n\n\ndef parseGPOS(lines, font):\n return parseGSUBGPOS(lines, font, "GPOS")\n\n\ndef parseAttachList(lines, font):\n points = {}\n with lines.between("attachment list"):\n for line in lines:\n glyph = makeGlyph(line[0])\n assert glyph not in points, glyph\n points[glyph] = [int(i) for i in line[1:]]\n return otl.buildAttachList(points, font.getReverseGlyphMap())\n\n\ndef parseCaretList(lines, font):\n carets = {}\n with lines.between("carets"):\n for line in lines:\n glyph = makeGlyph(line[0])\n assert glyph not in carets, glyph\n num = int(line[1])\n thisCarets = [int(i) for i in line[2:]]\n assert num == len(thisCarets), line\n carets[glyph] = thisCarets\n return otl.buildLigCaretList(carets, {}, font.getReverseGlyphMap())\n\n\ndef makeMarkFilteringSets(sets, font):\n self = ot.MarkGlyphSetsDef()\n self.MarkSetTableFormat = 1\n self.MarkSetCount = 1 + max(sets.keys())\n self.Coverage = [None] * self.MarkSetCount\n for k, v in sorted(sets.items()):\n self.Coverage[k] = makeCoverage(set(v), font)\n return self\n\n\ndef parseMarkFilteringSets(lines, font):\n sets = {}\n with lines.between("set definition"):\n for line in lines:\n assert len(line) == 2, line\n glyph = makeGlyph(line[0])\n # TODO accept set names\n st = int(line[1])\n if st not in sets:\n sets[st] = []\n sets[st].append(glyph)\n return makeMarkFilteringSets(sets, font)\n\n\ndef parseGDEF(lines, font):\n container = ttLib.getTableClass("GDEF")()\n log.debug("Parsing GDEF")\n self = ot.GDEF()\n fields = {\n "class definition begin": (\n "GlyphClassDef",\n lambda lines, font: parseClassDef(lines, font, klass=ot.GlyphClassDef),\n ),\n "attachment list begin": ("AttachList", parseAttachList),\n "carets begin": ("LigCaretList", parseCaretList),\n "mark attachment class definition begin": (\n "MarkAttachClassDef",\n lambda lines, font: parseClassDef(lines, font, klass=ot.MarkAttachClassDef),\n ),\n "markfilter set definition begin": ("MarkGlyphSetsDef", parseMarkFilteringSets),\n }\n for attr, parser in fields.values():\n setattr(self, attr, None)\n while lines.peek() is not None:\n typ = lines.peek()[0].lower()\n if typ not in fields:\n log.debug("Skipping %s", typ)\n next(lines)\n continue\n attr, parser = fields[typ]\n assert getattr(self, attr) is None, attr\n setattr(self, attr, parser(lines, font))\n self.Version = 0x00010000 if self.MarkGlyphSetsDef is None else 0x00010002\n container.table = self\n return container\n\n\ndef parseCmap(lines, font):\n container = ttLib.getTableClass("cmap")()\n log.debug("Parsing cmap")\n tables = []\n while lines.peek() is not None:\n lines.expect("cmap subtable %d" % len(tables))\n platId, encId, fmt, lang = [\n parseCmapId(lines, field)\n for field in ("platformID", "encodingID", "format", "language")\n ]\n table = cmap_classes[fmt](fmt)\n table.platformID = platId\n table.platEncID = encId\n table.language = lang\n table.cmap = {}\n line = next(lines)\n while line[0] != "end subtable":\n table.cmap[int(line[0], 16)] = line[1]\n line = next(lines)\n tables.append(table)\n container.tableVersion = 0\n container.tables = tables\n return container\n\n\ndef parseCmapId(lines, field):\n line = next(lines)\n assert field == line[0]\n return int(line[1])\n\n\ndef parseTable(lines, font, tableTag=None):\n log.debug("Parsing table")\n line = lines.peeks()\n tag = None\n if line[0].split()[0] == "FontDame":\n tag = line[0].split()[1]\n elif " ".join(line[0].split()[:3]) == "Font Chef Table":\n tag = line[0].split()[3]\n if tag is not None:\n next(lines)\n tag = tag.ljust(4)\n if tableTag is None:\n tableTag = tag\n else:\n assert tableTag == tag, (tableTag, tag)\n\n assert (\n tableTag is not None\n ), "Don't know what table to parse and data doesn't specify"\n\n return {\n "GSUB": parseGSUB,\n "GPOS": parseGPOS,\n "GDEF": parseGDEF,\n "cmap": parseCmap,\n }[tableTag](lines, font)\n\n\nclass Tokenizer(object):\n def __init__(self, f):\n # TODO BytesIO / StringIO as needed? also, figure out whether we work on bytes or unicode\n lines = iter(f)\n try:\n self.filename = f.name\n except:\n self.filename = None\n self.lines = iter(lines)\n self.line = ""\n self.lineno = 0\n self.stoppers = []\n self.buffer = None\n\n def __iter__(self):\n return self\n\n def _next_line(self):\n self.lineno += 1\n line = self.line = next(self.lines)\n line = [s.strip() for s in line.split("\t")]\n if len(line) == 1 and not line[0]:\n del line[0]\n if line and not line[-1]:\n log.warning("trailing tab found on line %d: %s" % (self.lineno, self.line))\n while line and not line[-1]:\n del line[-1]\n return line\n\n def _next_nonempty(self):\n while True:\n line = self._next_line()\n # Skip comments and empty lines\n if line and line[0] and (line[0][0] != "%" or line[0] == "% subtable"):\n return line\n\n def _next_buffered(self):\n if self.buffer:\n ret = self.buffer\n self.buffer = None\n return ret\n else:\n return self._next_nonempty()\n\n def __next__(self):\n line = self._next_buffered()\n if line[0].lower() in self.stoppers:\n self.buffer = line\n raise StopIteration\n return line\n\n def next(self):\n return self.__next__()\n\n def peek(self):\n if not self.buffer:\n try:\n self.buffer = self._next_nonempty()\n except StopIteration:\n return None\n if self.buffer[0].lower() in self.stoppers:\n return None\n return self.buffer\n\n def peeks(self):\n ret = self.peek()\n return ret if ret is not None else ("",)\n\n @contextmanager\n def between(self, tag):\n start = tag + " begin"\n end = tag + " end"\n self.expectendswith(start)\n self.stoppers.append(end)\n yield\n del self.stoppers[-1]\n self.expect(tag + " end")\n\n @contextmanager\n def until(self, tags):\n if type(tags) is not tuple:\n tags = (tags,)\n self.stoppers.extend(tags)\n yield\n del self.stoppers[-len(tags) :]\n\n def expect(self, s):\n line = next(self)\n tag = line[0].lower()\n assert tag == s, "Expected '%s', got '%s'" % (s, tag)\n return line\n\n def expectendswith(self, s):\n line = next(self)\n tag = line[0].lower()\n assert tag.endswith(s), "Expected '*%s', got '%s'" % (s, tag)\n return line\n\n\ndef build(f, font, tableTag=None):\n """Convert a Monotype font layout file to an OpenType layout object\n\n A font object must be passed, but this may be a "dummy" font; it is only\n used for sorting glyph sets when making coverage tables and to hold the\n OpenType layout table while it is being built.\n\n Args:\n f: A file object.\n font (TTFont): A font object.\n tableTag (string): If provided, asserts that the file contains data for the\n given OpenType table.\n\n Returns:\n An object representing the table. (e.g. ``table_G_S_U_B_``)\n """\n lines = Tokenizer(f)\n return parseTable(lines, font, tableTag=tableTag)\n\n\ndef main(args=None, font=None):\n """Convert a FontDame OTL file to TTX XML\n\n Writes XML output to stdout.\n\n Args:\n args: Command line arguments (``--font``, ``--table``, input files).\n """\n import sys\n from fontTools import configLogger\n from fontTools.misc.testTools import MockFont\n\n if args is None:\n args = sys.argv[1:]\n\n # configure the library logger (for >= WARNING)\n configLogger()\n # comment this out to enable debug messages from mtiLib's logger\n # log.setLevel(logging.DEBUG)\n\n import argparse\n\n parser = argparse.ArgumentParser(\n "fonttools mtiLib",\n description=main.__doc__,\n )\n\n parser.add_argument(\n "--font",\n "-f",\n metavar="FILE",\n dest="font",\n help="Input TTF files (used for glyph classes and sorting coverage tables)",\n )\n parser.add_argument(\n "--table",\n "-t",\n metavar="TABLE",\n dest="tableTag",\n help="Table to fill (sniffed from input file if not provided)",\n )\n parser.add_argument(\n "inputs", metavar="FILE", type=str, nargs="+", help="Input FontDame .txt files"\n )\n\n args = parser.parse_args(args)\n\n if font is None:\n if args.font:\n font = ttLib.TTFont(args.font)\n else:\n font = MockFont()\n\n for f in args.inputs:\n log.debug("Processing %s", f)\n with open(f, "rt", encoding="utf-8-sig") as f:\n table = build(f, font, tableTag=args.tableTag)\n blob = table.compile(font) # Make sure it compiles\n decompiled = table.__class__()\n decompiled.decompile(blob, font) # Make sure it decompiles!\n\n # continue\n from fontTools.misc import xmlWriter\n\n tag = table.tableTag\n writer = xmlWriter.XMLWriter(sys.stdout)\n writer.begintag(tag)\n writer.newline()\n # table.toXML(writer, font)\n decompiled.toXML(writer, font)\n writer.endtag(tag)\n writer.newline()\n\n\nif __name__ == "__main__":\n import sys\n\n sys.exit(main())\n
|
.venv\Lib\site-packages\fontTools\mtiLib\__init__.py
|
__init__.py
|
Python
| 48,002 | 0.95 | 0.208571 | 0.019167 |
node-utils
| 607 |
2023-09-12T02:21:57.558868
|
MIT
| false |
09fef9544cac38af5bd5a10831d54fe9
|
import sys\nfrom fontTools.mtiLib import main\n\nif __name__ == "__main__":\n sys.exit(main())\n
|
.venv\Lib\site-packages\fontTools\mtiLib\__main__.py
|
__main__.py
|
Python
| 99 | 0.65 | 0.2 | 0 |
react-lib
| 813 |
2025-05-12T23:02:17.546464
|
Apache-2.0
| false |
d5d808327792f9ffaa8328433884ca44
|
\n\n
|
.venv\Lib\site-packages\fontTools\mtiLib\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 72,028 | 0.75 | 0.016367 | 0.013913 |
python-kit
| 806 |
2024-08-07T18:55:20.882831
|
MIT
| false |
2e1b2f6d19071e20a02b076867786b0f
|
\n\n
|
.venv\Lib\site-packages\fontTools\mtiLib\__pycache__\__main__.cpython-313.pyc
|
__main__.cpython-313.pyc
|
Other
| 368 | 0.7 | 0 | 0 |
node-utils
| 341 |
2023-07-17T14:37:14.926116
|
Apache-2.0
| false |
23d0ec5564658fb11ce81e2d73672671
|
class OpenTypeLibError(Exception):\n def __init__(self, message, location):\n Exception.__init__(self, message)\n self.location = location\n\n def __str__(self):\n message = Exception.__str__(self)\n if self.location:\n return f"{self.location}: {message}"\n else:\n return message\n
|
.venv\Lib\site-packages\fontTools\otlLib\error.py
|
error.py
|
Python
| 346 | 0.85 | 0.363636 | 0 |
node-utils
| 842 |
2024-08-01T07:19:51.994556
|
GPL-3.0
| false |
a63db3c8af54d2f33970fa38f4bf770e
|
__all__ = ["maxCtxFont"]\n\n\ndef maxCtxFont(font):\n """Calculate the usMaxContext value for an entire font."""\n\n maxCtx = 0\n for tag in ("GSUB", "GPOS"):\n if tag not in font:\n continue\n table = font[tag].table\n if not table.LookupList:\n continue\n for lookup in table.LookupList.Lookup:\n for st in lookup.SubTable:\n maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st)\n return maxCtx\n\n\ndef maxCtxSubtable(maxCtx, tag, lookupType, st):\n """Calculate usMaxContext based on a single lookup table (and an existing\n max value).\n """\n\n # single positioning, single / multiple substitution\n if (tag == "GPOS" and lookupType == 1) or (\n tag == "GSUB" and lookupType in (1, 2, 3)\n ):\n maxCtx = max(maxCtx, 1)\n\n # pair positioning\n elif tag == "GPOS" and lookupType == 2:\n maxCtx = max(maxCtx, 2)\n\n # ligatures\n elif tag == "GSUB" and lookupType == 4:\n for ligatures in st.ligatures.values():\n for ligature in ligatures:\n maxCtx = max(maxCtx, ligature.CompCount)\n\n # context\n elif (tag == "GPOS" and lookupType == 7) or (tag == "GSUB" and lookupType == 5):\n maxCtx = maxCtxContextualSubtable(maxCtx, st, "Pos" if tag == "GPOS" else "Sub")\n\n # chained context\n elif (tag == "GPOS" and lookupType == 8) or (tag == "GSUB" and lookupType == 6):\n maxCtx = maxCtxContextualSubtable(\n maxCtx, st, "Pos" if tag == "GPOS" else "Sub", "Chain"\n )\n\n # extensions\n elif (tag == "GPOS" and lookupType == 9) or (tag == "GSUB" and lookupType == 7):\n maxCtx = maxCtxSubtable(maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)\n\n # reverse-chained context\n elif tag == "GSUB" and lookupType == 8:\n maxCtx = maxCtxContextualRule(maxCtx, st, "Reverse")\n\n return maxCtx\n\n\ndef maxCtxContextualSubtable(maxCtx, st, ruleType, chain=""):\n """Calculate usMaxContext based on a contextual feature subtable."""\n\n if st.Format == 1:\n for ruleset in getattr(st, "%s%sRuleSet" % (chain, ruleType)):\n if ruleset is None:\n continue\n for rule in getattr(ruleset, "%s%sRule" % (chain, ruleType)):\n if rule is None:\n continue\n maxCtx = maxCtxContextualRule(maxCtx, rule, chain)\n\n elif st.Format == 2:\n for ruleset in getattr(st, "%s%sClassSet" % (chain, ruleType)):\n if ruleset is None:\n continue\n for rule in getattr(ruleset, "%s%sClassRule" % (chain, ruleType)):\n if rule is None:\n continue\n maxCtx = maxCtxContextualRule(maxCtx, rule, chain)\n\n elif st.Format == 3:\n maxCtx = maxCtxContextualRule(maxCtx, st, chain)\n\n return maxCtx\n\n\ndef maxCtxContextualRule(maxCtx, st, chain):\n """Calculate usMaxContext based on a contextual feature rule."""\n\n if not chain:\n return max(maxCtx, st.GlyphCount)\n elif chain == "Reverse":\n return max(maxCtx, 1 + st.LookAheadGlyphCount)\n return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount)\n
|
.venv\Lib\site-packages\fontTools\otlLib\maxContextCalc.py
|
maxContextCalc.py
|
Python
| 3,271 | 0.95 | 0.260417 | 0.094595 |
python-kit
| 146 |
2023-07-16T20:19:42.171217
|
Apache-2.0
| false |
c943731dd22895008ad1977861f0fbe6
|
"""OpenType Layout-related functionality."""\n
|
.venv\Lib\site-packages\fontTools\otlLib\__init__.py
|
__init__.py
|
Python
| 46 | 0.65 | 0 | 0 |
react-lib
| 916 |
2023-10-08T23:42:39.932465
|
Apache-2.0
| false |
477940433b8bb95f268306b837c9df42
|
from argparse import RawTextHelpFormatter\nfrom fontTools.otlLib.optimize.gpos import COMPRESSION_LEVEL, compact\nfrom fontTools.ttLib import TTFont\n\n\ndef main(args=None):\n """Optimize the layout tables of an existing font"""\n from argparse import ArgumentParser\n\n from fontTools import configLogger\n\n parser = ArgumentParser(\n prog="otlLib.optimize",\n description=main.__doc__,\n formatter_class=RawTextHelpFormatter,\n )\n parser.add_argument("font")\n parser.add_argument(\n "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file"\n )\n parser.add_argument(\n "--gpos-compression-level",\n help=COMPRESSION_LEVEL.help,\n default=COMPRESSION_LEVEL.default,\n choices=list(range(10)),\n type=int,\n )\n logging_group = parser.add_mutually_exclusive_group(required=False)\n logging_group.add_argument(\n "-v", "--verbose", action="store_true", help="Run more verbosely."\n )\n logging_group.add_argument(\n "-q", "--quiet", action="store_true", help="Turn verbosity off."\n )\n options = parser.parse_args(args)\n\n configLogger(\n level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")\n )\n\n font = TTFont(options.font)\n compact(font, options.gpos_compression_level)\n font.save(options.outfile or options.font)\n\n\nif __name__ == "__main__":\n import sys\n\n if len(sys.argv) > 1:\n sys.exit(main())\n import doctest\n\n sys.exit(doctest.testmod().failed)\n
|
.venv\Lib\site-packages\fontTools\otlLib\optimize\__init__.py
|
__init__.py
|
Python
| 1,583 | 0.85 | 0.09434 | 0 |
node-utils
| 361 |
2024-09-06T12:52:08.111121
|
BSD-3-Clause
| false |
f1cbaf462880156375658c8b310eb26f
|
import sys\nfrom fontTools.otlLib.optimize import main\n\n\nif __name__ == "__main__":\n sys.exit(main())\n
|
.venv\Lib\site-packages\fontTools\otlLib\optimize\__main__.py
|
__main__.py
|
Python
| 110 | 0.85 | 0.166667 | 0 |
python-kit
| 275 |
2025-05-06T16:18:41.944528
|
GPL-3.0
| false |
65f535411e5e4e8b47f045f37487c0f7
|
\n\n
|
.venv\Lib\site-packages\fontTools\otlLib\optimize\__pycache__\gpos.cpython-313.pyc
|
gpos.cpython-313.pyc
|
Other
| 18,183 | 0.8 | 0 | 0.025316 |
python-kit
| 540 |
2025-04-25T07:35:09.516093
|
GPL-3.0
| false |
a3d7c56ee554a76effeb633bd154050c
|
\n\n
|
.venv\Lib\site-packages\fontTools\otlLib\optimize\__pycache__\__init__.cpython-313.pyc
|
__init__.cpython-313.pyc
|
Other
| 2,520 | 0.95 | 0 | 0.074074 |
awesome-app
| 511 |
2025-02-18T07:53:35.583432
|
BSD-3-Clause
| false |
5186f4087bc22363ea7ea2717101b4b2
|
\n\n
|
.venv\Lib\site-packages\fontTools\otlLib\optimize\__pycache__\__main__.cpython-313.pyc
|
__main__.cpython-313.pyc
|
Other
| 388 | 0.7 | 0 | 0 |
react-lib
| 194 |
2023-12-07T12:39:53.373327
|
MIT
| false |
308b1d5406f10b34a728e130d01ee69f
|
\n\n
|
.venv\Lib\site-packages\fontTools\otlLib\__pycache__\error.cpython-313.pyc
|
error.cpython-313.pyc
|
Other
| 976 | 0.8 | 0 | 0 |
vue-tools
| 119 |
2024-05-07T04:02:58.168660
|
Apache-2.0
| false |
f972ffa6b51ad34c12a76ccc25416661
|
\n\n
|
.venv\Lib\site-packages\fontTools\otlLib\__pycache__\maxContextCalc.cpython-313.pyc
|
maxContextCalc.cpython-313.pyc
|
Other
| 3,697 | 0.8 | 0.035714 | 0.083333 |
react-lib
| 543 |
2024-08-19T03:10:43.532791
|
GPL-3.0
| false |
cd23936e9ae4f9987b11f71db79e7ba2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.