diff --git a/.venv/bin/fonttools b/.venv/bin/fonttools new file mode 100644 index 00000000..aa7f8487 --- /dev/null +++ b/.venv/bin/fonttools @@ -0,0 +1,8 @@ +#!/media/HardDrive/Pyhton/School/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from fontTools.__main__ import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/pyftmerge b/.venv/bin/pyftmerge new file mode 100644 index 00000000..47adbc13 --- /dev/null +++ b/.venv/bin/pyftmerge @@ -0,0 +1,8 @@ +#!/media/HardDrive/Pyhton/School/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from fontTools.merge import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/pyftsubset b/.venv/bin/pyftsubset new file mode 100644 index 00000000..99d8ff45 --- /dev/null +++ b/.venv/bin/pyftsubset @@ -0,0 +1,8 @@ +#!/media/HardDrive/Pyhton/School/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from fontTools.subset import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/bin/ttx b/.venv/bin/ttx new file mode 100644 index 00000000..172b9823 --- /dev/null +++ b/.venv/bin/ttx @@ -0,0 +1,8 @@ +#!/media/HardDrive/Pyhton/School/.venv/bin/python +# -*- coding: utf-8 -*- +import re +import sys +from fontTools.ttx import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/.venv/lib/python3.9/site-packages/PIL/BdfFontFile.py b/.venv/lib/python3.9/site-packages/PIL/BdfFontFile.py new file mode 100644 index 00000000..102b72e1 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/BdfFontFile.py @@ -0,0 +1,110 @@ +# +# The Python Imaging Library +# $Id$ +# +# bitmap distribution font (bdf) file parser +# +# history: +# 1996-05-16 fl created (as bdf2pil) +# 1997-08-25 fl converted to FontFile driver +# 2001-05-25 fl removed bogus __init__ call +# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev) +# 2003-04-22 fl more robustification (from Graham Dumpleton) +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1997-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +""" +Parse X Bitmap Distribution Format (BDF) +""" + + +from . import FontFile, Image + +bdf_slant = { + "R": "Roman", + "I": "Italic", + "O": "Oblique", + "RI": "Reverse Italic", + "RO": "Reverse Oblique", + "OT": "Other", +} + +bdf_spacing = {"P": "Proportional", "M": "Monospaced", "C": "Cell"} + + +def bdf_char(f): + # skip to STARTCHAR + while True: + s = f.readline() + if not s: + return None + if s[:9] == b"STARTCHAR": + break + id = s[9:].strip().decode("ascii") + + # load symbol properties + props = {} + while True: + s = f.readline() + if not s or s[:6] == b"BITMAP": + break + i = s.find(b" ") + props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") + + # load bitmap + bitmap = [] + while True: + s = f.readline() + if not s or s[:7] == b"ENDCHAR": + break + bitmap.append(s[:-1]) + bitmap = b"".join(bitmap) + + [x, y, l, d] = [int(p) for p in props["BBX"].split()] + [dx, dy] = [int(p) for p in props["DWIDTH"].split()] + + bbox = (dx, dy), (l, -d - y, x + l, -d), (0, 0, x, y) + + try: + im = Image.frombytes("1", (x, y), bitmap, "hex", "1") + except ValueError: + # deal with zero-width characters + im = Image.new("1", (x, y)) + + return id, int(props["ENCODING"]), bbox, im + + +class BdfFontFile(FontFile.FontFile): + """Font file plugin for the X11 BDF format.""" + + def __init__(self, fp): + super().__init__() + + s = fp.readline() + if s[:13] != b"STARTFONT 2.1": + raise SyntaxError("not a valid BDF file") + + props = {} + comments = [] + + while True: + s = fp.readline() + if not s or s[:13] == b"ENDPROPERTIES": + break + i = s.find(b" ") + props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") + if s[:i] in [b"COMMENT", b"COPYRIGHT"]: + if s.find(b"LogicalFontDescription") < 0: + comments.append(s[i + 1 : -1].decode("ascii")) + + while True: + c = bdf_char(fp) + if not c: + break + id, ch, (xy, dst, src), im = c + if 0 <= ch < len(self.glyph): + self.glyph[ch] = xy, dst, src, im diff --git a/.venv/lib/python3.9/site-packages/PIL/BlpImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/BlpImagePlugin.py new file mode 100644 index 00000000..7b78597b --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/BlpImagePlugin.py @@ -0,0 +1,428 @@ +""" +Blizzard Mipmap Format (.blp) +Jerome Leclanche + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ + +BLP1 files, used mostly in Warcraft III, are not fully supported. +All types of BLP2 files used in World of Warcraft are supported. + +The BLP file structure consists of a header, up to 16 mipmaps of the +texture + +Texture sizes must be powers of two, though the two dimensions do +not have to be equal; 512x256 is valid, but 512x200 is not. +The first mipmap (mipmap #0) is the full size image; each subsequent +mipmap halves both dimensions. The final mipmap should be 1x1. + +BLP files come in many different flavours: +* JPEG-compressed (type == 0) - only supported for BLP1. +* RAW images (type == 1, encoding == 1). Each mipmap is stored as an + array of 8-bit values, one per pixel, left to right, top to bottom. + Each value is an index to the palette. +* DXT-compressed (type == 1, encoding == 2): +- DXT1 compression is used if alpha_encoding == 0. + - An additional alpha bit is used if alpha_depth == 1. + - DXT3 compression is used if alpha_encoding == 1. + - DXT5 compression is used if alpha_encoding == 7. +""" + +import struct +from io import BytesIO + +from . import Image, ImageFile + +BLP_FORMAT_JPEG = 0 + +BLP_ENCODING_UNCOMPRESSED = 1 +BLP_ENCODING_DXT = 2 +BLP_ENCODING_UNCOMPRESSED_RAW_BGRA = 3 + +BLP_ALPHA_ENCODING_DXT1 = 0 +BLP_ALPHA_ENCODING_DXT3 = 1 +BLP_ALPHA_ENCODING_DXT5 = 7 + + +def unpack_565(i): + return (((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3) + + +def decode_dxt1(data, alpha=False): + """ + input: one "row" of data (i.e. will produce 4*width pixels) + """ + + blocks = len(data) // 8 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block in range(blocks): + # Decode next 8-byte block. + idx = block * 8 + color0, color1, bits = struct.unpack_from("> 2 + + a = 0xFF + if control == 0: + r, g, b = r0, g0, b0 + elif control == 1: + r, g, b = r1, g1, b1 + elif control == 2: + if color0 > color1: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + else: + r = (r0 + r1) // 2 + g = (g0 + g1) // 2 + b = (b0 + b1) // 2 + elif control == 3: + if color0 > color1: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + else: + r, g, b, a = 0, 0, 0, 0 + + if alpha: + ret[j].extend([r, g, b, a]) + else: + ret[j].extend([r, g, b]) + + return ret + + +def decode_dxt3(data): + """ + input: one "row" of data (i.e. will produce 4*width pixels) + """ + + blocks = len(data) // 16 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block in range(blocks): + idx = block * 16 + block = data[idx : idx + 16] + # Decode next 16-byte block. + bits = struct.unpack_from("<8B", block) + color0, color1 = struct.unpack_from(">= 4 + else: + high = True + a &= 0xF + a *= 17 # We get a value between 0 and 15 + + color_code = (code >> 2 * (4 * j + i)) & 0x03 + + if color_code == 0: + r, g, b = r0, g0, b0 + elif color_code == 1: + r, g, b = r1, g1, b1 + elif color_code == 2: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + elif color_code == 3: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + + ret[j].extend([r, g, b, a]) + + return ret + + +def decode_dxt5(data): + """ + input: one "row" of data (i.e. will produce 4 * width pixels) + """ + + blocks = len(data) // 16 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block in range(blocks): + idx = block * 16 + block = data[idx : idx + 16] + # Decode next 16-byte block. + a0, a1 = struct.unpack_from("> alphacode_index) & 0x07 + elif alphacode_index == 15: + alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06) + else: # alphacode_index >= 18 and alphacode_index <= 45 + alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07 + + if alphacode == 0: + a = a0 + elif alphacode == 1: + a = a1 + elif a0 > a1: + a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7 + elif alphacode == 6: + a = 0 + elif alphacode == 7: + a = 255 + else: + a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5 + + color_code = (code >> 2 * (4 * j + i)) & 0x03 + + if color_code == 0: + r, g, b = r0, g0, b0 + elif color_code == 1: + r, g, b = r1, g1, b1 + elif color_code == 2: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + elif color_code == 3: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + + ret[j].extend([r, g, b, a]) + + return ret + + +class BLPFormatError(NotImplementedError): + pass + + +class BlpImageFile(ImageFile.ImageFile): + """ + Blizzard Mipmap Format + """ + + format = "BLP" + format_description = "Blizzard Mipmap Format" + + def _open(self): + self.magic = self.fp.read(4) + self._read_blp_header() + + if self.magic == b"BLP1": + decoder = "BLP1" + self.mode = "RGB" + elif self.magic == b"BLP2": + decoder = "BLP2" + self.mode = "RGBA" if self._blp_alpha_depth else "RGB" + else: + raise BLPFormatError(f"Bad BLP magic {repr(self.magic)}") + + self.tile = [(decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))] + + def _read_blp_header(self): + (self._blp_compression,) = struct.unpack(" mode, rawmode + 1: ("P", "P;1"), + 4: ("P", "P;4"), + 8: ("P", "P"), + 16: ("RGB", "BGR;15"), + 24: ("RGB", "BGR"), + 32: ("RGB", "BGRX"), +} + + +def _accept(prefix): + return prefix[:2] == b"BM" + + +def _dib_accept(prefix): + return i32(prefix) in [12, 40, 64, 108, 124] + + +# ============================================================================= +# Image plugin for the Windows BMP format. +# ============================================================================= +class BmpImageFile(ImageFile.ImageFile): + """Image plugin for the Windows Bitmap format (BMP)""" + + # ------------------------------------------------------------- Description + format_description = "Windows Bitmap" + format = "BMP" + + # -------------------------------------------------- BMP Compression values + COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5} + for k, v in COMPRESSIONS.items(): + vars()[k] = v + + def _bitmap(self, header=0, offset=0): + """Read relevant info about the BMP""" + read, seek = self.fp.read, self.fp.seek + if header: + seek(header) + file_info = {} + # read bmp header size @offset 14 (this is part of the header size) + file_info["header_size"] = i32(read(4)) + file_info["direction"] = -1 + + # -------------------- If requested, read header at a specific position + # read the rest of the bmp header, without its size + header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4) + + # -------------------------------------------------- IBM OS/2 Bitmap v1 + # ----- This format has different offsets because of width/height types + if file_info["header_size"] == 12: + file_info["width"] = i16(header_data, 0) + file_info["height"] = i16(header_data, 2) + file_info["planes"] = i16(header_data, 4) + file_info["bits"] = i16(header_data, 6) + file_info["compression"] = self.RAW + file_info["palette_padding"] = 3 + + # --------------------------------------------- Windows Bitmap v2 to v5 + # v3, OS/2 v2, v4, v5 + elif file_info["header_size"] in (40, 64, 108, 124): + file_info["y_flip"] = header_data[7] == 0xFF + file_info["direction"] = 1 if file_info["y_flip"] else -1 + file_info["width"] = i32(header_data, 0) + file_info["height"] = ( + i32(header_data, 4) + if not file_info["y_flip"] + else 2 ** 32 - i32(header_data, 4) + ) + file_info["planes"] = i16(header_data, 8) + file_info["bits"] = i16(header_data, 10) + file_info["compression"] = i32(header_data, 12) + # byte size of pixel data + file_info["data_size"] = i32(header_data, 16) + file_info["pixels_per_meter"] = ( + i32(header_data, 20), + i32(header_data, 24), + ) + file_info["colors"] = i32(header_data, 28) + file_info["palette_padding"] = 4 + self.info["dpi"] = tuple(x / 39.3701 for x in file_info["pixels_per_meter"]) + if file_info["compression"] == self.BITFIELDS: + if len(header_data) >= 52: + for idx, mask in enumerate( + ["r_mask", "g_mask", "b_mask", "a_mask"] + ): + file_info[mask] = i32(header_data, 36 + idx * 4) + else: + # 40 byte headers only have the three components in the + # bitfields masks, ref: + # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx + # See also + # https://github.com/python-pillow/Pillow/issues/1293 + # There is a 4th component in the RGBQuad, in the alpha + # location, but it is listed as a reserved component, + # and it is not generally an alpha channel + file_info["a_mask"] = 0x0 + for mask in ["r_mask", "g_mask", "b_mask"]: + file_info[mask] = i32(read(4)) + file_info["rgb_mask"] = ( + file_info["r_mask"], + file_info["g_mask"], + file_info["b_mask"], + ) + file_info["rgba_mask"] = ( + file_info["r_mask"], + file_info["g_mask"], + file_info["b_mask"], + file_info["a_mask"], + ) + else: + raise OSError(f"Unsupported BMP header type ({file_info['header_size']})") + + # ------------------ Special case : header is reported 40, which + # ---------------------- is shorter than real size for bpp >= 16 + self._size = file_info["width"], file_info["height"] + + # ------- If color count was not found in the header, compute from bits + file_info["colors"] = ( + file_info["colors"] + if file_info.get("colors", 0) + else (1 << file_info["bits"]) + ) + + # ---------------------- Check bit depth for unusual unsupported values + self.mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None)) + if self.mode is None: + raise OSError(f"Unsupported BMP pixel depth ({file_info['bits']})") + + # ---------------- Process BMP with Bitfields compression (not palette) + if file_info["compression"] == self.BITFIELDS: + SUPPORTED = { + 32: [ + (0xFF0000, 0xFF00, 0xFF, 0x0), + (0xFF0000, 0xFF00, 0xFF, 0xFF000000), + (0xFF, 0xFF00, 0xFF0000, 0xFF000000), + (0x0, 0x0, 0x0, 0x0), + (0xFF000000, 0xFF0000, 0xFF00, 0x0), + ], + 24: [(0xFF0000, 0xFF00, 0xFF)], + 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)], + } + MASK_MODES = { + (32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX", + (32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR", + (32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA", + (32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA", + (32, (0x0, 0x0, 0x0, 0x0)): "BGRA", + (24, (0xFF0000, 0xFF00, 0xFF)): "BGR", + (16, (0xF800, 0x7E0, 0x1F)): "BGR;16", + (16, (0x7C00, 0x3E0, 0x1F)): "BGR;15", + } + if file_info["bits"] in SUPPORTED: + if ( + file_info["bits"] == 32 + and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]] + ): + raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])] + self.mode = "RGBA" if "A" in raw_mode else self.mode + elif ( + file_info["bits"] in (24, 16) + and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]] + ): + raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])] + else: + raise OSError("Unsupported BMP bitfields layout") + else: + raise OSError("Unsupported BMP bitfields layout") + elif file_info["compression"] == self.RAW: + if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset + raw_mode, self.mode = "BGRA", "RGBA" + else: + raise OSError(f"Unsupported BMP compression ({file_info['compression']})") + + # --------------- Once the header is processed, process the palette/LUT + if self.mode == "P": # Paletted for 1, 4 and 8 bit images + + # ---------------------------------------------------- 1-bit images + if not (0 < file_info["colors"] <= 65536): + raise OSError(f"Unsupported BMP Palette size ({file_info['colors']})") + else: + padding = file_info["palette_padding"] + palette = read(padding * file_info["colors"]) + greyscale = True + indices = ( + (0, 255) + if file_info["colors"] == 2 + else list(range(file_info["colors"])) + ) + + # ----------------- Check if greyscale and ignore palette if so + for ind, val in enumerate(indices): + rgb = palette[ind * padding : ind * padding + 3] + if rgb != o8(val) * 3: + greyscale = False + + # ------- If all colors are grey, white or black, ditch palette + if greyscale: + self.mode = "1" if file_info["colors"] == 2 else "L" + raw_mode = self.mode + else: + self.mode = "P" + self.palette = ImagePalette.raw( + "BGRX" if padding == 4 else "BGR", palette + ) + + # ---------------------------- Finally set the tile data for the plugin + self.info["compression"] = file_info["compression"] + self.tile = [ + ( + "raw", + (0, 0, file_info["width"], file_info["height"]), + offset or self.fp.tell(), + ( + raw_mode, + ((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3), + file_info["direction"], + ), + ) + ] + + def _open(self): + """Open file, check magic number and read header""" + # read 14 bytes: magic number, filesize, reserved, header final offset + head_data = self.fp.read(14) + # choke if the file does not have the required magic bytes + if not _accept(head_data): + raise SyntaxError("Not a BMP file") + # read the start position of the BMP image data (u32) + offset = i32(head_data, 10) + # load bitmap information (offset=raster info) + self._bitmap(offset=offset) + + +# ============================================================================= +# Image plugin for the DIB format (BMP alias) +# ============================================================================= +class DibImageFile(BmpImageFile): + + format = "DIB" + format_description = "Windows Bitmap" + + def _open(self): + self._bitmap() + + +# +# -------------------------------------------------------------------- +# Write BMP file + + +SAVE = { + "1": ("1", 1, 2), + "L": ("L", 8, 256), + "P": ("P", 8, 256), + "RGB": ("BGR", 24, 0), + "RGBA": ("BGRA", 32, 0), +} + + +def _dib_save(im, fp, filename): + _save(im, fp, filename, False) + + +def _save(im, fp, filename, bitmap_header=True): + try: + rawmode, bits, colors = SAVE[im.mode] + except KeyError as e: + raise OSError(f"cannot write mode {im.mode} as BMP") from e + + info = im.encoderinfo + + dpi = info.get("dpi", (96, 96)) + + # 1 meter == 39.3701 inches + ppm = tuple(map(lambda x: int(x * 39.3701 + 0.5), dpi)) + + stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3) + header = 40 # or 64 for OS/2 version 2 + image = stride * im.size[1] + + # bitmap header + if bitmap_header: + offset = 14 + header + colors * 4 + file_size = offset + image + if file_size > 2 ** 32 - 1: + raise ValueError("File size is too large for the BMP format") + fp.write( + b"BM" # file type (magic) + + o32(file_size) # file size + + o32(0) # reserved + + o32(offset) # image data offset + ) + + # bitmap info header + fp.write( + o32(header) # info header size + + o32(im.size[0]) # width + + o32(im.size[1]) # height + + o16(1) # planes + + o16(bits) # depth + + o32(0) # compression (0=uncompressed) + + o32(image) # size of bitmap + + o32(ppm[0]) # resolution + + o32(ppm[1]) # resolution + + o32(colors) # colors used + + o32(colors) # colors important + ) + + fp.write(b"\0" * (header - 40)) # padding (for OS/2 format) + + if im.mode == "1": + for i in (0, 255): + fp.write(o8(i) * 4) + elif im.mode == "L": + for i in range(256): + fp.write(o8(i) * 4) + elif im.mode == "P": + fp.write(im.im.getpalette("RGB", "BGRX")) + + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))]) + + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(BmpImageFile.format, BmpImageFile, _accept) +Image.register_save(BmpImageFile.format, _save) + +Image.register_extension(BmpImageFile.format, ".bmp") + +Image.register_mime(BmpImageFile.format, "image/bmp") + +Image.register_open(DibImageFile.format, DibImageFile, _dib_accept) +Image.register_save(DibImageFile.format, _dib_save) + +Image.register_extension(DibImageFile.format, ".dib") + +Image.register_mime(DibImageFile.format, "image/bmp") diff --git a/.venv/lib/python3.9/site-packages/PIL/BufrStubImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/BufrStubImagePlugin.py new file mode 100644 index 00000000..48f21e1b --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/BufrStubImagePlugin.py @@ -0,0 +1,73 @@ +# +# The Python Imaging Library +# $Id$ +# +# BUFR stub adapter +# +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific BUFR image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix): + return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC" + + +class BufrStubImageFile(ImageFile.StubImageFile): + + format = "BUFR" + format_description = "BUFR" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(4)): + raise SyntaxError("Not a BUFR file") + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise OSError("BUFR save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept) +Image.register_save(BufrStubImageFile.format, _save) + +Image.register_extension(BufrStubImageFile.format, ".bufr") diff --git a/.venv/lib/python3.9/site-packages/PIL/ContainerIO.py b/.venv/lib/python3.9/site-packages/PIL/ContainerIO.py new file mode 100644 index 00000000..45e80b39 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ContainerIO.py @@ -0,0 +1,120 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a class to read from a container file +# +# History: +# 1995-06-18 fl Created +# 1995-09-07 fl Added readline(), readlines() +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1995 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +import io + + +class ContainerIO: + """ + A file object that provides read access to a part of an existing + file (for example a TAR file). + """ + + def __init__(self, file, offset, length): + """ + Create file object. + + :param file: Existing file. + :param offset: Start of region, in bytes. + :param length: Size of region, in bytes. + """ + self.fh = file + self.pos = 0 + self.offset = offset + self.length = length + self.fh.seek(offset) + + ## + # Always false. + + def isatty(self): + return False + + def seek(self, offset, mode=io.SEEK_SET): + """ + Move file pointer. + + :param offset: Offset in bytes. + :param mode: Starting position. Use 0 for beginning of region, 1 + for current offset, and 2 for end of region. You cannot move + the pointer outside the defined region. + """ + if mode == 1: + self.pos = self.pos + offset + elif mode == 2: + self.pos = self.length + offset + else: + self.pos = offset + # clamp + self.pos = max(0, min(self.pos, self.length)) + self.fh.seek(self.offset + self.pos) + + def tell(self): + """ + Get current file pointer. + + :returns: Offset from start of region, in bytes. + """ + return self.pos + + def read(self, n=0): + """ + Read data. + + :param n: Number of bytes to read. If omitted or zero, + read until end of region. + :returns: An 8-bit string. + """ + if n: + n = min(n, self.length - self.pos) + else: + n = self.length - self.pos + if not n: # EOF + return b"" if "b" in self.fh.mode else "" + self.pos = self.pos + n + return self.fh.read(n) + + def readline(self): + """ + Read a line of text. + + :returns: An 8-bit string. + """ + s = b"" if "b" in self.fh.mode else "" + newline_character = b"\n" if "b" in self.fh.mode else "\n" + while True: + c = self.read(1) + if not c: + break + s = s + c + if c == newline_character: + break + return s + + def readlines(self): + """ + Read multiple lines of text. + + :returns: A list of 8-bit strings. + """ + lines = [] + while True: + s = self.readline() + if not s: + break + lines.append(s) + return lines diff --git a/.venv/lib/python3.9/site-packages/PIL/CurImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/CurImagePlugin.py new file mode 100644 index 00000000..42af5caf --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/CurImagePlugin.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Windows Cursor support for PIL +# +# notes: +# uses BmpImagePlugin.py to read the bitmap data. +# +# history: +# 96-05-27 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +from . import BmpImagePlugin, Image +from ._binary import i16le as i16 +from ._binary import i32le as i32 + +# +# -------------------------------------------------------------------- + + +def _accept(prefix): + return prefix[:4] == b"\0\0\2\0" + + +## +# Image plugin for Windows Cursor files. + + +class CurImageFile(BmpImagePlugin.BmpImageFile): + + format = "CUR" + format_description = "Windows Cursor" + + def _open(self): + + offset = self.fp.tell() + + # check magic + s = self.fp.read(6) + if not _accept(s): + raise SyntaxError("not a CUR file") + + # pick the largest cursor in the file + m = b"" + for i in range(i16(s, 4)): + s = self.fp.read(16) + if not m: + m = s + elif s[0] > m[0] and s[1] > m[1]: + m = s + if not m: + raise TypeError("No cursors were found") + + # load as bitmap + self._bitmap(i32(m, 12) + offset) + + # patch up the bitmap height + self._size = self.size[0], self.size[1] // 2 + d, e, o, a = self.tile[0] + self.tile[0] = d, (0, 0) + self.size, o, a + + return + + +# +# -------------------------------------------------------------------- + +Image.register_open(CurImageFile.format, CurImageFile, _accept) + +Image.register_extension(CurImageFile.format, ".cur") diff --git a/.venv/lib/python3.9/site-packages/PIL/DcxImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/DcxImagePlugin.py new file mode 100644 index 00000000..de21db8f --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/DcxImagePlugin.py @@ -0,0 +1,89 @@ +# +# The Python Imaging Library. +# $Id$ +# +# DCX file handling +# +# DCX is a container file format defined by Intel, commonly used +# for fax applications. Each DCX file consists of a directory +# (a list of file offsets) followed by a set of (usually 1-bit) +# PCX files. +# +# History: +# 1995-09-09 fl Created +# 1996-03-20 fl Properly derived from PcxImageFile. +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 2002-07-30 fl Fixed file handling +# +# Copyright (c) 1997-98 by Secret Labs AB. +# Copyright (c) 1995-96 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +from . import Image +from ._binary import i32le as i32 +from .PcxImagePlugin import PcxImageFile + +MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then? + + +def _accept(prefix): + return len(prefix) >= 4 and i32(prefix) == MAGIC + + +## +# Image plugin for the Intel DCX format. + + +class DcxImageFile(PcxImageFile): + + format = "DCX" + format_description = "Intel DCX" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # Header + s = self.fp.read(4) + if not _accept(s): + raise SyntaxError("not a DCX file") + + # Component directory + self._offset = [] + for i in range(1024): + offset = i32(self.fp.read(4)) + if not offset: + break + self._offset.append(offset) + + self.__fp = self.fp + self.frame = None + self.n_frames = len(self._offset) + self.is_animated = self.n_frames > 1 + self.seek(0) + + def seek(self, frame): + if not self._seek_check(frame): + return + self.frame = frame + self.fp = self.__fp + self.fp.seek(self._offset[frame]) + PcxImageFile._open(self) + + def tell(self): + return self.frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +Image.register_open(DcxImageFile.format, DcxImageFile, _accept) + +Image.register_extension(DcxImageFile.format, ".dcx") diff --git a/.venv/lib/python3.9/site-packages/PIL/DdsImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/DdsImagePlugin.py new file mode 100644 index 00000000..260924fc --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/DdsImagePlugin.py @@ -0,0 +1,247 @@ +""" +A Pillow loader for .dds files (S3TC-compressed aka DXTC) +Jerome Leclanche + +Documentation: + https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ +""" + +import struct +from io import BytesIO + +from . import Image, ImageFile +from ._binary import o32le as o32 + +# Magic ("DDS ") +DDS_MAGIC = 0x20534444 + +# DDS flags +DDSD_CAPS = 0x1 +DDSD_HEIGHT = 0x2 +DDSD_WIDTH = 0x4 +DDSD_PITCH = 0x8 +DDSD_PIXELFORMAT = 0x1000 +DDSD_MIPMAPCOUNT = 0x20000 +DDSD_LINEARSIZE = 0x80000 +DDSD_DEPTH = 0x800000 + +# DDS caps +DDSCAPS_COMPLEX = 0x8 +DDSCAPS_TEXTURE = 0x1000 +DDSCAPS_MIPMAP = 0x400000 + +DDSCAPS2_CUBEMAP = 0x200 +DDSCAPS2_CUBEMAP_POSITIVEX = 0x400 +DDSCAPS2_CUBEMAP_NEGATIVEX = 0x800 +DDSCAPS2_CUBEMAP_POSITIVEY = 0x1000 +DDSCAPS2_CUBEMAP_NEGATIVEY = 0x2000 +DDSCAPS2_CUBEMAP_POSITIVEZ = 0x4000 +DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x8000 +DDSCAPS2_VOLUME = 0x200000 + +# Pixel Format +DDPF_ALPHAPIXELS = 0x1 +DDPF_ALPHA = 0x2 +DDPF_FOURCC = 0x4 +DDPF_PALETTEINDEXED8 = 0x20 +DDPF_RGB = 0x40 +DDPF_LUMINANCE = 0x20000 + + +# dds.h + +DDS_FOURCC = DDPF_FOURCC +DDS_RGB = DDPF_RGB +DDS_RGBA = DDPF_RGB | DDPF_ALPHAPIXELS +DDS_LUMINANCE = DDPF_LUMINANCE +DDS_LUMINANCEA = DDPF_LUMINANCE | DDPF_ALPHAPIXELS +DDS_ALPHA = DDPF_ALPHA +DDS_PAL8 = DDPF_PALETTEINDEXED8 + +DDS_HEADER_FLAGS_TEXTURE = DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH | DDSD_PIXELFORMAT +DDS_HEADER_FLAGS_MIPMAP = DDSD_MIPMAPCOUNT +DDS_HEADER_FLAGS_VOLUME = DDSD_DEPTH +DDS_HEADER_FLAGS_PITCH = DDSD_PITCH +DDS_HEADER_FLAGS_LINEARSIZE = DDSD_LINEARSIZE + +DDS_HEIGHT = DDSD_HEIGHT +DDS_WIDTH = DDSD_WIDTH + +DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS_TEXTURE +DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS_COMPLEX | DDSCAPS_MIPMAP +DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS_COMPLEX + +DDS_CUBEMAP_POSITIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEX +DDS_CUBEMAP_NEGATIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEX +DDS_CUBEMAP_POSITIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEY +DDS_CUBEMAP_NEGATIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEY +DDS_CUBEMAP_POSITIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEZ +DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEZ + + +# DXT1 +DXT1_FOURCC = 0x31545844 + +# DXT3 +DXT3_FOURCC = 0x33545844 + +# DXT5 +DXT5_FOURCC = 0x35545844 + + +# dxgiformat.h + +DXGI_FORMAT_R8G8B8A8_TYPELESS = 27 +DXGI_FORMAT_R8G8B8A8_UNORM = 28 +DXGI_FORMAT_R8G8B8A8_UNORM_SRGB = 29 +DXGI_FORMAT_BC5_TYPELESS = 82 +DXGI_FORMAT_BC5_UNORM = 83 +DXGI_FORMAT_BC5_SNORM = 84 +DXGI_FORMAT_BC7_TYPELESS = 97 +DXGI_FORMAT_BC7_UNORM = 98 +DXGI_FORMAT_BC7_UNORM_SRGB = 99 + + +class DdsImageFile(ImageFile.ImageFile): + format = "DDS" + format_description = "DirectDraw Surface" + + def _open(self): + magic, header_size = struct.unpack(" 0: + s = fp.read(min(lengthfile, 100 * 1024)) + if not s: + break + lengthfile -= len(s) + f.write(s) + + device = "pngalpha" if transparency else "ppmraw" + + # Build Ghostscript command + command = [ + "gs", + "-q", # quiet mode + "-g%dx%d" % size, # set output geometry (pixels) + "-r%fx%f" % res, # set input DPI (dots per inch) + "-dBATCH", # exit after processing + "-dNOPAUSE", # don't pause between pages + "-dSAFER", # safe mode + f"-sDEVICE={device}", + f"-sOutputFile={outfile}", # output file + # adjust for image origin + "-c", + f"{-bbox[0]} {-bbox[1]} translate", + "-f", + infile, # input file + # showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272) + "-c", + "showpage", + ] + + if gs_windows_binary is not None: + if not gs_windows_binary: + raise OSError("Unable to locate Ghostscript on paths") + command[0] = gs_windows_binary + + # push data through Ghostscript + try: + startupinfo = None + if sys.platform.startswith("win"): + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + subprocess.check_call(command, startupinfo=startupinfo) + out_im = Image.open(outfile) + out_im.load() + finally: + try: + os.unlink(outfile) + if infile_temp: + os.unlink(infile_temp) + except OSError: + pass + + im = out_im.im.copy() + out_im.close() + return im + + +class PSFile: + """ + Wrapper for bytesio object that treats either CR or LF as end of line. + """ + + def __init__(self, fp): + self.fp = fp + self.char = None + + def seek(self, offset, whence=io.SEEK_SET): + self.char = None + self.fp.seek(offset, whence) + + def readline(self): + s = [self.char or b""] + self.char = None + + c = self.fp.read(1) + while (c not in b"\r\n") and len(c): + s.append(c) + c = self.fp.read(1) + + self.char = self.fp.read(1) + # line endings can be 1 or 2 of \r \n, in either order + if self.char in b"\r\n": + self.char = None + + return b"".join(s).decode("latin-1") + + +def _accept(prefix): + return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5) + + +## +# Image plugin for Encapsulated PostScript. This plugin supports only +# a few variants of this format. + + +class EpsImageFile(ImageFile.ImageFile): + """EPS File Parser for the Python Imaging Library""" + + format = "EPS" + format_description = "Encapsulated Postscript" + + mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"} + + def _open(self): + (length, offset) = self._find_offset(self.fp) + + # Rewrap the open file pointer in something that will + # convert line endings and decode to latin-1. + fp = PSFile(self.fp) + + # go to offset - start of "%!PS" + fp.seek(offset) + + box = None + + self.mode = "RGB" + self._size = 1, 1 # FIXME: huh? + + # + # Load EPS header + + s_raw = fp.readline() + s = s_raw.strip("\r\n") + + while s_raw: + if s: + if len(s) > 255: + raise SyntaxError("not an EPS file") + + try: + m = split.match(s) + except re.error as e: + raise SyntaxError("not an EPS file") from e + + if m: + k, v = m.group(1, 2) + self.info[k] = v + if k == "BoundingBox": + try: + # Note: The DSC spec says that BoundingBox + # fields should be integers, but some drivers + # put floating point values there anyway. + box = [int(float(i)) for i in v.split()] + self._size = box[2] - box[0], box[3] - box[1] + self.tile = [ + ("eps", (0, 0) + self.size, offset, (length, box)) + ] + except Exception: + pass + + else: + m = field.match(s) + if m: + k = m.group(1) + + if k == "EndComments": + break + if k[:8] == "PS-Adobe": + self.info[k[:8]] = k[9:] + else: + self.info[k] = "" + elif s[0] == "%": + # handle non-DSC PostScript comments that some + # tools mistakenly put in the Comments section + pass + else: + raise OSError("bad EPS header") + + s_raw = fp.readline() + s = s_raw.strip("\r\n") + + if s and s[:1] != "%": + break + + # + # Scan for an "ImageData" descriptor + + while s[:1] == "%": + + if len(s) > 255: + raise SyntaxError("not an EPS file") + + if s[:11] == "%ImageData:": + # Encoded bitmapped image. + x, y, bi, mo = s[11:].split(None, 7)[:4] + + if int(bi) != 8: + break + try: + self.mode = self.mode_map[int(mo)] + except ValueError: + break + + self._size = int(x), int(y) + return + + s = fp.readline().strip("\r\n") + if not s: + break + + if not box: + raise OSError("cannot determine EPS bounding box") + + def _find_offset(self, fp): + + s = fp.read(160) + + if s[:4] == b"%!PS": + # for HEAD without binary preview + fp.seek(0, io.SEEK_END) + length = fp.tell() + offset = 0 + elif i32(s, 0) == 0xC6D3D0C5: + # FIX for: Some EPS file not handled correctly / issue #302 + # EPS can contain binary data + # or start directly with latin coding + # more info see: + # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf + offset = i32(s, 4) + length = i32(s, 8) + else: + raise SyntaxError("not an EPS file") + + return (length, offset) + + def load(self, scale=1, transparency=False): + # Load EPS via Ghostscript + if not self.tile: + return + self.im = Ghostscript(self.tile, self.size, self.fp, scale, transparency) + self.mode = self.im.mode + self._size = self.im.size + self.tile = [] + + def load_seek(self, *args, **kwargs): + # we can't incrementally load, so force ImageFile.parser to + # use our custom load method by defining this method. + pass + + +# +# -------------------------------------------------------------------- + + +def _save(im, fp, filename, eps=1): + """EPS Writer for the Python Imaging Library.""" + + # + # make sure image data is available + im.load() + + # + # determine PostScript image mode + if im.mode == "L": + operator = (8, 1, b"image") + elif im.mode == "RGB": + operator = (8, 3, b"false 3 colorimage") + elif im.mode == "CMYK": + operator = (8, 4, b"false 4 colorimage") + else: + raise ValueError("image mode is not supported") + + if eps: + # + # write EPS header + fp.write(b"%!PS-Adobe-3.0 EPSF-3.0\n") + fp.write(b"%%Creator: PIL 0.1 EpsEncode\n") + # fp.write("%%CreationDate: %s"...) + fp.write(b"%%%%BoundingBox: 0 0 %d %d\n" % im.size) + fp.write(b"%%Pages: 1\n") + fp.write(b"%%EndComments\n") + fp.write(b"%%Page: 1 1\n") + fp.write(b"%%ImageData: %d %d " % im.size) + fp.write(b'%d %d 0 1 1 "%s"\n' % operator) + + # + # image header + fp.write(b"gsave\n") + fp.write(b"10 dict begin\n") + fp.write(b"/buf %d string def\n" % (im.size[0] * operator[1])) + fp.write(b"%d %d scale\n" % im.size) + fp.write(b"%d %d 8\n" % im.size) # <= bits + fp.write(b"[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1])) + fp.write(b"{ currentfile buf readhexstring pop } bind\n") + fp.write(operator[2] + b"\n") + if hasattr(fp, "flush"): + fp.flush() + + ImageFile._save(im, fp, [("eps", (0, 0) + im.size, 0, None)]) + + fp.write(b"\n%%%%EndBinary\n") + fp.write(b"grestore end\n") + if hasattr(fp, "flush"): + fp.flush() + + +# +# -------------------------------------------------------------------- + + +Image.register_open(EpsImageFile.format, EpsImageFile, _accept) + +Image.register_save(EpsImageFile.format, _save) + +Image.register_extensions(EpsImageFile.format, [".ps", ".eps"]) + +Image.register_mime(EpsImageFile.format, "application/postscript") diff --git a/.venv/lib/python3.9/site-packages/PIL/ExifTags.py b/.venv/lib/python3.9/site-packages/PIL/ExifTags.py new file mode 100644 index 00000000..7da2ddae --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ExifTags.py @@ -0,0 +1,331 @@ +# +# The Python Imaging Library. +# $Id$ +# +# EXIF tags +# +# Copyright (c) 2003 by Secret Labs AB +# +# See the README file for information on usage and redistribution. +# + +""" +This module provides constants and clear-text names for various +well-known EXIF tags. +""" + + +TAGS = { + # possibly incomplete + 0x0001: "InteropIndex", + 0x000B: "ProcessingSoftware", + 0x00FE: "NewSubfileType", + 0x00FF: "SubfileType", + 0x0100: "ImageWidth", + 0x0101: "ImageLength", + 0x0102: "BitsPerSample", + 0x0103: "Compression", + 0x0106: "PhotometricInterpretation", + 0x0107: "Thresholding", + 0x0108: "CellWidth", + 0x0109: "CellLength", + 0x010A: "FillOrder", + 0x010D: "DocumentName", + 0x010E: "ImageDescription", + 0x010F: "Make", + 0x0110: "Model", + 0x0111: "StripOffsets", + 0x0112: "Orientation", + 0x0115: "SamplesPerPixel", + 0x0116: "RowsPerStrip", + 0x0117: "StripByteCounts", + 0x0118: "MinSampleValue", + 0x0119: "MaxSampleValue", + 0x011A: "XResolution", + 0x011B: "YResolution", + 0x011C: "PlanarConfiguration", + 0x011D: "PageName", + 0x0120: "FreeOffsets", + 0x0121: "FreeByteCounts", + 0x0122: "GrayResponseUnit", + 0x0123: "GrayResponseCurve", + 0x0124: "T4Options", + 0x0125: "T6Options", + 0x0128: "ResolutionUnit", + 0x0129: "PageNumber", + 0x012D: "TransferFunction", + 0x0131: "Software", + 0x0132: "DateTime", + 0x013B: "Artist", + 0x013C: "HostComputer", + 0x013D: "Predictor", + 0x013E: "WhitePoint", + 0x013F: "PrimaryChromaticities", + 0x0140: "ColorMap", + 0x0141: "HalftoneHints", + 0x0142: "TileWidth", + 0x0143: "TileLength", + 0x0144: "TileOffsets", + 0x0145: "TileByteCounts", + 0x014A: "SubIFDs", + 0x014C: "InkSet", + 0x014D: "InkNames", + 0x014E: "NumberOfInks", + 0x0150: "DotRange", + 0x0151: "TargetPrinter", + 0x0152: "ExtraSamples", + 0x0153: "SampleFormat", + 0x0154: "SMinSampleValue", + 0x0155: "SMaxSampleValue", + 0x0156: "TransferRange", + 0x0157: "ClipPath", + 0x0158: "XClipPathUnits", + 0x0159: "YClipPathUnits", + 0x015A: "Indexed", + 0x015B: "JPEGTables", + 0x015F: "OPIProxy", + 0x0200: "JPEGProc", + 0x0201: "JpegIFOffset", + 0x0202: "JpegIFByteCount", + 0x0203: "JpegRestartInterval", + 0x0205: "JpegLosslessPredictors", + 0x0206: "JpegPointTransforms", + 0x0207: "JpegQTables", + 0x0208: "JpegDCTables", + 0x0209: "JpegACTables", + 0x0211: "YCbCrCoefficients", + 0x0212: "YCbCrSubSampling", + 0x0213: "YCbCrPositioning", + 0x0214: "ReferenceBlackWhite", + 0x02BC: "XMLPacket", + 0x1000: "RelatedImageFileFormat", + 0x1001: "RelatedImageWidth", + 0x1002: "RelatedImageLength", + 0x4746: "Rating", + 0x4749: "RatingPercent", + 0x800D: "ImageID", + 0x828D: "CFARepeatPatternDim", + 0x828E: "CFAPattern", + 0x828F: "BatteryLevel", + 0x8298: "Copyright", + 0x829A: "ExposureTime", + 0x829D: "FNumber", + 0x83BB: "IPTCNAA", + 0x8649: "ImageResources", + 0x8769: "ExifOffset", + 0x8773: "InterColorProfile", + 0x8822: "ExposureProgram", + 0x8824: "SpectralSensitivity", + 0x8825: "GPSInfo", + 0x8827: "ISOSpeedRatings", + 0x8828: "OECF", + 0x8829: "Interlace", + 0x882A: "TimeZoneOffset", + 0x882B: "SelfTimerMode", + 0x8830: "SensitivityType", + 0x8831: "StandardOutputSensitivity", + 0x8832: "RecommendedExposureIndex", + 0x8833: "ISOSpeed", + 0x8834: "ISOSpeedLatitudeyyy", + 0x8835: "ISOSpeedLatitudezzz", + 0x9000: "ExifVersion", + 0x9003: "DateTimeOriginal", + 0x9004: "DateTimeDigitized", + 0x9010: "OffsetTime", + 0x9011: "OffsetTimeOriginal", + 0x9012: "OffsetTimeDigitized", + 0x9101: "ComponentsConfiguration", + 0x9102: "CompressedBitsPerPixel", + 0x9201: "ShutterSpeedValue", + 0x9202: "ApertureValue", + 0x9203: "BrightnessValue", + 0x9204: "ExposureBiasValue", + 0x9205: "MaxApertureValue", + 0x9206: "SubjectDistance", + 0x9207: "MeteringMode", + 0x9208: "LightSource", + 0x9209: "Flash", + 0x920A: "FocalLength", + 0x920B: "FlashEnergy", + 0x920C: "SpatialFrequencyResponse", + 0x920D: "Noise", + 0x9211: "ImageNumber", + 0x9212: "SecurityClassification", + 0x9213: "ImageHistory", + 0x9214: "SubjectLocation", + 0x9215: "ExposureIndex", + 0x9216: "TIFF/EPStandardID", + 0x927C: "MakerNote", + 0x9286: "UserComment", + 0x9290: "SubsecTime", + 0x9291: "SubsecTimeOriginal", + 0x9292: "SubsecTimeDigitized", + 0x9400: "AmbientTemperature", + 0x9401: "Humidity", + 0x9402: "Pressure", + 0x9403: "WaterDepth", + 0x9404: "Acceleration", + 0x9405: "CameraElevationAngle", + 0x9C9B: "XPTitle", + 0x9C9C: "XPComment", + 0x9C9D: "XPAuthor", + 0x9C9E: "XPKeywords", + 0x9C9F: "XPSubject", + 0xA000: "FlashPixVersion", + 0xA001: "ColorSpace", + 0xA002: "ExifImageWidth", + 0xA003: "ExifImageHeight", + 0xA004: "RelatedSoundFile", + 0xA005: "ExifInteroperabilityOffset", + 0xA20B: "FlashEnergy", + 0xA20C: "SpatialFrequencyResponse", + 0xA20E: "FocalPlaneXResolution", + 0xA20F: "FocalPlaneYResolution", + 0xA210: "FocalPlaneResolutionUnit", + 0xA214: "SubjectLocation", + 0xA215: "ExposureIndex", + 0xA217: "SensingMethod", + 0xA300: "FileSource", + 0xA301: "SceneType", + 0xA302: "CFAPattern", + 0xA401: "CustomRendered", + 0xA402: "ExposureMode", + 0xA403: "WhiteBalance", + 0xA404: "DigitalZoomRatio", + 0xA405: "FocalLengthIn35mmFilm", + 0xA406: "SceneCaptureType", + 0xA407: "GainControl", + 0xA408: "Contrast", + 0xA409: "Saturation", + 0xA40A: "Sharpness", + 0xA40B: "DeviceSettingDescription", + 0xA40C: "SubjectDistanceRange", + 0xA420: "ImageUniqueID", + 0xA430: "CameraOwnerName", + 0xA431: "BodySerialNumber", + 0xA432: "LensSpecification", + 0xA433: "LensMake", + 0xA434: "LensModel", + 0xA435: "LensSerialNumber", + 0xA460: "CompositeImage", + 0xA461: "CompositeImageCount", + 0xA462: "CompositeImageExposureTimes", + 0xA500: "Gamma", + 0xC4A5: "PrintImageMatching", + 0xC612: "DNGVersion", + 0xC613: "DNGBackwardVersion", + 0xC614: "UniqueCameraModel", + 0xC615: "LocalizedCameraModel", + 0xC616: "CFAPlaneColor", + 0xC617: "CFALayout", + 0xC618: "LinearizationTable", + 0xC619: "BlackLevelRepeatDim", + 0xC61A: "BlackLevel", + 0xC61B: "BlackLevelDeltaH", + 0xC61C: "BlackLevelDeltaV", + 0xC61D: "WhiteLevel", + 0xC61E: "DefaultScale", + 0xC61F: "DefaultCropOrigin", + 0xC620: "DefaultCropSize", + 0xC621: "ColorMatrix1", + 0xC622: "ColorMatrix2", + 0xC623: "CameraCalibration1", + 0xC624: "CameraCalibration2", + 0xC625: "ReductionMatrix1", + 0xC626: "ReductionMatrix2", + 0xC627: "AnalogBalance", + 0xC628: "AsShotNeutral", + 0xC629: "AsShotWhiteXY", + 0xC62A: "BaselineExposure", + 0xC62B: "BaselineNoise", + 0xC62C: "BaselineSharpness", + 0xC62D: "BayerGreenSplit", + 0xC62E: "LinearResponseLimit", + 0xC62F: "CameraSerialNumber", + 0xC630: "LensInfo", + 0xC631: "ChromaBlurRadius", + 0xC632: "AntiAliasStrength", + 0xC633: "ShadowScale", + 0xC634: "DNGPrivateData", + 0xC635: "MakerNoteSafety", + 0xC65A: "CalibrationIlluminant1", + 0xC65B: "CalibrationIlluminant2", + 0xC65C: "BestQualityScale", + 0xC65D: "RawDataUniqueID", + 0xC68B: "OriginalRawFileName", + 0xC68C: "OriginalRawFileData", + 0xC68D: "ActiveArea", + 0xC68E: "MaskedAreas", + 0xC68F: "AsShotICCProfile", + 0xC690: "AsShotPreProfileMatrix", + 0xC691: "CurrentICCProfile", + 0xC692: "CurrentPreProfileMatrix", + 0xC6BF: "ColorimetricReference", + 0xC6F3: "CameraCalibrationSignature", + 0xC6F4: "ProfileCalibrationSignature", + 0xC6F6: "AsShotProfileName", + 0xC6F7: "NoiseReductionApplied", + 0xC6F8: "ProfileName", + 0xC6F9: "ProfileHueSatMapDims", + 0xC6FA: "ProfileHueSatMapData1", + 0xC6FB: "ProfileHueSatMapData2", + 0xC6FC: "ProfileToneCurve", + 0xC6FD: "ProfileEmbedPolicy", + 0xC6FE: "ProfileCopyright", + 0xC714: "ForwardMatrix1", + 0xC715: "ForwardMatrix2", + 0xC716: "PreviewApplicationName", + 0xC717: "PreviewApplicationVersion", + 0xC718: "PreviewSettingsName", + 0xC719: "PreviewSettingsDigest", + 0xC71A: "PreviewColorSpace", + 0xC71B: "PreviewDateTime", + 0xC71C: "RawImageDigest", + 0xC71D: "OriginalRawFileDigest", + 0xC71E: "SubTileBlockSize", + 0xC71F: "RowInterleaveFactor", + 0xC725: "ProfileLookTableDims", + 0xC726: "ProfileLookTableData", + 0xC740: "OpcodeList1", + 0xC741: "OpcodeList2", + 0xC74E: "OpcodeList3", + 0xC761: "NoiseProfile", +} +"""Maps EXIF tags to tag names.""" + + +GPSTAGS = { + 0: "GPSVersionID", + 1: "GPSLatitudeRef", + 2: "GPSLatitude", + 3: "GPSLongitudeRef", + 4: "GPSLongitude", + 5: "GPSAltitudeRef", + 6: "GPSAltitude", + 7: "GPSTimeStamp", + 8: "GPSSatellites", + 9: "GPSStatus", + 10: "GPSMeasureMode", + 11: "GPSDOP", + 12: "GPSSpeedRef", + 13: "GPSSpeed", + 14: "GPSTrackRef", + 15: "GPSTrack", + 16: "GPSImgDirectionRef", + 17: "GPSImgDirection", + 18: "GPSMapDatum", + 19: "GPSDestLatitudeRef", + 20: "GPSDestLatitude", + 21: "GPSDestLongitudeRef", + 22: "GPSDestLongitude", + 23: "GPSDestBearingRef", + 24: "GPSDestBearing", + 25: "GPSDestDistanceRef", + 26: "GPSDestDistance", + 27: "GPSProcessingMethod", + 28: "GPSAreaInformation", + 29: "GPSDateStamp", + 30: "GPSDifferential", + 31: "GPSHPositioningError", +} +"""Maps EXIF GPS tags to tag names.""" diff --git a/.venv/lib/python3.9/site-packages/PIL/FitsStubImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/FitsStubImagePlugin.py new file mode 100644 index 00000000..a3a94cf4 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/FitsStubImagePlugin.py @@ -0,0 +1,100 @@ +# +# The Python Imaging Library +# $Id$ +# +# FITS stub adapter +# +# Copyright (c) 1998-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific FITS image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix): + return prefix[:6] == b"SIMPLE" + + +class FITSStubImageFile(ImageFile.StubImageFile): + + format = "FITS" + format_description = "FITS" + + def _open(self): + offset = self.fp.tell() + + headers = {} + while True: + header = self.fp.read(80) + if not header: + raise OSError("Truncated FITS file") + keyword = header[:8].strip() + if keyword == b"END": + break + value = header[8:].strip() + if value.startswith(b"="): + value = value[1:].strip() + if not headers and (not _accept(keyword) or value != b"T"): + raise SyntaxError("Not a FITS file") + headers[keyword] = value + + naxis = int(headers[b"NAXIS"]) + if naxis == 0: + raise ValueError("No image data") + elif naxis == 1: + self._size = 1, int(headers[b"NAXIS1"]) + else: + self._size = int(headers[b"NAXIS1"]), int(headers[b"NAXIS2"]) + + number_of_bits = int(headers[b"BITPIX"]) + if number_of_bits == 8: + self.mode = "L" + elif number_of_bits == 16: + self.mode = "I" + # rawmode = "I;16S" + elif number_of_bits == 32: + self.mode = "I" + elif number_of_bits in (-32, -64): + self.mode = "F" + # rawmode = "F" if number_of_bits == -32 else "F;64F" + + self.fp.seek(offset) + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise OSError("FITS save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(FITSStubImageFile.format, FITSStubImageFile, _accept) +Image.register_save(FITSStubImageFile.format, _save) + +Image.register_extensions(FITSStubImageFile.format, [".fit", ".fits"]) diff --git a/.venv/lib/python3.9/site-packages/PIL/FliImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/FliImagePlugin.py new file mode 100644 index 00000000..f2d4857f --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/FliImagePlugin.py @@ -0,0 +1,171 @@ +# +# The Python Imaging Library. +# $Id$ +# +# FLI/FLC file handling. +# +# History: +# 95-09-01 fl Created +# 97-01-03 fl Fixed parser, setup decoder tile +# 98-07-15 fl Renamed offset attribute to avoid name clash +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1995-97. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile, ImagePalette +from ._binary import i16le as i16 +from ._binary import i32le as i32 +from ._binary import o8 + +# +# decoder + + +def _accept(prefix): + return len(prefix) >= 6 and i16(prefix, 4) in [0xAF11, 0xAF12] + + +## +# Image plugin for the FLI/FLC animation format. Use the seek +# method to load individual frames. + + +class FliImageFile(ImageFile.ImageFile): + + format = "FLI" + format_description = "Autodesk FLI/FLC Animation" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # HEAD + s = self.fp.read(128) + if not ( + _accept(s) + and i16(s, 14) in [0, 3] # flags + and s[20:22] == b"\x00\x00" # reserved + ): + raise SyntaxError("not an FLI/FLC file") + + # frames + self.n_frames = i16(s, 6) + self.is_animated = self.n_frames > 1 + + # image characteristics + self.mode = "P" + self._size = i16(s, 8), i16(s, 10) + + # animation speed + duration = i32(s, 16) + magic = i16(s, 4) + if magic == 0xAF11: + duration = (duration * 1000) // 70 + self.info["duration"] = duration + + # look for palette + palette = [(a, a, a) for a in range(256)] + + s = self.fp.read(16) + + self.__offset = 128 + + if i16(s, 4) == 0xF100: + # prefix chunk; ignore it + self.__offset = self.__offset + i32(s) + s = self.fp.read(16) + + if i16(s, 4) == 0xF1FA: + # look for palette chunk + s = self.fp.read(6) + if i16(s, 4) == 11: + self._palette(palette, 2) + elif i16(s, 4) == 4: + self._palette(palette, 0) + + palette = [o8(r) + o8(g) + o8(b) for (r, g, b) in palette] + self.palette = ImagePalette.raw("RGB", b"".join(palette)) + + # set things up to decode first frame + self.__frame = -1 + self.__fp = self.fp + self.__rewind = self.fp.tell() + self.seek(0) + + def _palette(self, palette, shift): + # load palette + + i = 0 + for e in range(i16(self.fp.read(2))): + s = self.fp.read(2) + i = i + s[0] + n = s[1] + if n == 0: + n = 256 + s = self.fp.read(n * 3) + for n in range(0, len(s), 3): + r = s[n] << shift + g = s[n + 1] << shift + b = s[n + 2] << shift + palette[i] = (r, g, b) + i += 1 + + def seek(self, frame): + if not self._seek_check(frame): + return + if frame < self.__frame: + self._seek(0) + + for f in range(self.__frame + 1, frame + 1): + self._seek(f) + + def _seek(self, frame): + if frame == 0: + self.__frame = -1 + self.__fp.seek(self.__rewind) + self.__offset = 128 + else: + # ensure that the previous frame was loaded + self.load() + + if frame != self.__frame + 1: + raise ValueError(f"cannot seek to frame {frame}") + self.__frame = frame + + # move to next frame + self.fp = self.__fp + self.fp.seek(self.__offset) + + s = self.fp.read(4) + if not s: + raise EOFError + + framesize = i32(s) + + self.decodermaxblock = framesize + self.tile = [("fli", (0, 0) + self.size, self.__offset, None)] + + self.__offset += framesize + + def tell(self): + return self.__frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# +# registry + +Image.register_open(FliImageFile.format, FliImageFile, _accept) + +Image.register_extensions(FliImageFile.format, [".fli", ".flc"]) diff --git a/.venv/lib/python3.9/site-packages/PIL/FontFile.py b/.venv/lib/python3.9/site-packages/PIL/FontFile.py new file mode 100644 index 00000000..c5fc80b3 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/FontFile.py @@ -0,0 +1,111 @@ +# +# The Python Imaging Library +# $Id$ +# +# base class for raster font file parsers +# +# history: +# 1997-06-05 fl created +# 1997-08-19 fl restrict image width +# +# Copyright (c) 1997-1998 by Secret Labs AB +# Copyright (c) 1997-1998 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +import os + +from . import Image, _binary + +WIDTH = 800 + + +def puti16(fp, values): + """Write network order (big-endian) 16-bit sequence""" + for v in values: + if v < 0: + v += 65536 + fp.write(_binary.o16be(v)) + + +class FontFile: + """Base class for raster font file handlers.""" + + bitmap = None + + def __init__(self): + + self.info = {} + self.glyph = [None] * 256 + + def __getitem__(self, ix): + return self.glyph[ix] + + def compile(self): + """Create metrics and bitmap""" + + if self.bitmap: + return + + # create bitmap large enough to hold all data + h = w = maxwidth = 0 + lines = 1 + for glyph in self: + if glyph: + d, dst, src, im = glyph + h = max(h, src[3] - src[1]) + w = w + (src[2] - src[0]) + if w > WIDTH: + lines += 1 + w = src[2] - src[0] + maxwidth = max(maxwidth, w) + + xsize = maxwidth + ysize = lines * h + + if xsize == 0 and ysize == 0: + return "" + + self.ysize = h + + # paste glyphs into bitmap + self.bitmap = Image.new("1", (xsize, ysize)) + self.metrics = [None] * 256 + x = y = 0 + for i in range(256): + glyph = self[i] + if glyph: + d, dst, src, im = glyph + xx = src[2] - src[0] + # yy = src[3] - src[1] + x0, y0 = x, y + x = x + xx + if x > WIDTH: + x, y = 0, y + h + x0, y0 = x, y + x = xx + s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0 + self.bitmap.paste(im.crop(src), s) + self.metrics[i] = d, dst, s + + def save(self, filename): + """Save font""" + + self.compile() + + # font data + self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG") + + # font metrics + with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp: + fp.write(b"PILfont\n") + fp.write(f";;;;;;{self.ysize};\n".encode("ascii")) # HACK!!! + fp.write(b"DATA\n") + for id in range(256): + m = self.metrics[id] + if not m: + puti16(fp, [0] * 10) + else: + puti16(fp, m[0] + m[1] + m[2]) diff --git a/.venv/lib/python3.9/site-packages/PIL/FpxImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/FpxImagePlugin.py new file mode 100644 index 00000000..5e385469 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/FpxImagePlugin.py @@ -0,0 +1,242 @@ +# +# THIS IS WORK IN PROGRESS +# +# The Python Imaging Library. +# $Id$ +# +# FlashPix support for PIL +# +# History: +# 97-01-25 fl Created (reads uncompressed RGB images only) +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# +import olefile + +from . import Image, ImageFile +from ._binary import i32le as i32 + +# we map from colour field tuples to (mode, rawmode) descriptors +MODES = { + # opacity + (0x00007FFE): ("A", "L"), + # monochrome + (0x00010000,): ("L", "L"), + (0x00018000, 0x00017FFE): ("RGBA", "LA"), + # photo YCC + (0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"), + (0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"), + # standard RGB (NIFRGB) + (0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"), + (0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"), +} + + +# +# -------------------------------------------------------------------- + + +def _accept(prefix): + return prefix[:8] == olefile.MAGIC + + +## +# Image plugin for the FlashPix images. + + +class FpxImageFile(ImageFile.ImageFile): + + format = "FPX" + format_description = "FlashPix" + + def _open(self): + # + # read the OLE directory and see if this is a likely + # to be a FlashPix file + + try: + self.ole = olefile.OleFileIO(self.fp) + except OSError as e: + raise SyntaxError("not an FPX file; invalid OLE file") from e + + if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B": + raise SyntaxError("not an FPX file; bad root CLSID") + + self._open_index(1) + + def _open_index(self, index=1): + # + # get the Image Contents Property Set + + prop = self.ole.getproperties( + [f"Data Object Store {index:06d}", "\005Image Contents"] + ) + + # size (highest resolution) + + self._size = prop[0x1000002], prop[0x1000003] + + size = max(self.size) + i = 1 + while size > 64: + size = size / 2 + i += 1 + self.maxid = i - 1 + + # mode. instead of using a single field for this, flashpix + # requires you to specify the mode for each channel in each + # resolution subimage, and leaves it to the decoder to make + # sure that they all match. for now, we'll cheat and assume + # that this is always the case. + + id = self.maxid << 16 + + s = prop[0x2000002 | id] + + colors = [] + bands = i32(s, 4) + if bands > 4: + raise OSError("Invalid number of bands") + for i in range(bands): + # note: for now, we ignore the "uncalibrated" flag + colors.append(i32(s, 8 + i * 4) & 0x7FFFFFFF) + + self.mode, self.rawmode = MODES[tuple(colors)] + + # load JPEG tables, if any + self.jpeg = {} + for i in range(256): + id = 0x3000001 | (i << 16) + if id in prop: + self.jpeg[i] = prop[id] + + self._open_subimage(1, self.maxid) + + def _open_subimage(self, index=1, subimage=0): + # + # setup tile descriptors for a given subimage + + stream = [ + f"Data Object Store {index:06d}", + f"Resolution {subimage:04d}", + "Subimage 0000 Header", + ] + + fp = self.ole.openstream(stream) + + # skip prefix + fp.read(28) + + # header stream + s = fp.read(36) + + size = i32(s, 4), i32(s, 8) + # tilecount = i32(s, 12) + tilesize = i32(s, 16), i32(s, 20) + # channels = i32(s, 24) + offset = i32(s, 28) + length = i32(s, 32) + + if size != self.size: + raise OSError("subimage mismatch") + + # get tile descriptors + fp.seek(28 + offset) + s = fp.read(i32(s, 12) * length) + + x = y = 0 + xsize, ysize = size + xtile, ytile = tilesize + self.tile = [] + + for i in range(0, len(s), length): + + compression = i32(s, i + 8) + + if compression == 0: + self.tile.append( + ( + "raw", + (x, y, x + xtile, y + ytile), + i32(s, i) + 28, + (self.rawmode), + ) + ) + + elif compression == 1: + + # FIXME: the fill decoder is not implemented + self.tile.append( + ( + "fill", + (x, y, x + xtile, y + ytile), + i32(s, i) + 28, + (self.rawmode, s[12:16]), + ) + ) + + elif compression == 2: + + internal_color_conversion = s[14] + jpeg_tables = s[15] + rawmode = self.rawmode + + if internal_color_conversion: + # The image is stored as usual (usually YCbCr). + if rawmode == "RGBA": + # For "RGBA", data is stored as YCbCrA based on + # negative RGB. The following trick works around + # this problem : + jpegmode, rawmode = "YCbCrK", "CMYK" + else: + jpegmode = None # let the decoder decide + + else: + # The image is stored as defined by rawmode + jpegmode = rawmode + + self.tile.append( + ( + "jpeg", + (x, y, x + xtile, y + ytile), + i32(s, i) + 28, + (rawmode, jpegmode), + ) + ) + + # FIXME: jpeg tables are tile dependent; the prefix + # data must be placed in the tile descriptor itself! + + if jpeg_tables: + self.tile_prefix = self.jpeg[jpeg_tables] + + else: + raise OSError("unknown/invalid compression") + + x = x + xtile + if x >= xsize: + x, y = 0, y + ytile + if y >= ysize: + break # isn't really required + + self.stream = stream + self.fp = None + + def load(self): + + if not self.fp: + self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"]) + + return ImageFile.ImageFile.load(self) + + +# +# -------------------------------------------------------------------- + + +Image.register_open(FpxImageFile.format, FpxImageFile, _accept) + +Image.register_extension(FpxImageFile.format, ".fpx") diff --git a/.venv/lib/python3.9/site-packages/PIL/FtexImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/FtexImagePlugin.py new file mode 100644 index 00000000..3b169038 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/FtexImagePlugin.py @@ -0,0 +1,106 @@ +""" +A Pillow loader for .ftc and .ftu files (FTEX) +Jerome Leclanche + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ + +Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001 + +The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a +packed custom format called FTEX. This file format uses file extensions FTC +and FTU. +* FTC files are compressed textures (using standard texture compression). +* FTU files are not compressed. +Texture File Format +The FTC and FTU texture files both use the same format. This +has the following structure: +{header} +{format_directory} +{data} +Where: +{header} = { + u32:magic, + u32:version, + u32:width, + u32:height, + u32:mipmap_count, + u32:format_count +} + +* The "magic" number is "FTEX". +* "width" and "height" are the dimensions of the texture. +* "mipmap_count" is the number of mipmaps in the texture. +* "format_count" is the number of texture formats (different versions of the +same texture) in this file. + +{format_directory} = format_count * { u32:format, u32:where } + +The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB +uncompressed textures. +The texture data for a format starts at the position "where" in the file. + +Each set of texture data in the file has the following structure: +{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } } +* "mipmap_size" is the number of bytes in that mip level. For compressed +textures this is the size of the texture data compressed with DXT1. For 24 bit +uncompressed textures, this is 3 * width * height. Following this are the image +bytes for that mipmap level. + +Note: All data is stored in little-Endian (Intel) byte order. +""" + +import struct +from io import BytesIO + +from . import Image, ImageFile + +MAGIC = b"FTEX" +FORMAT_DXT1 = 0 +FORMAT_UNCOMPRESSED = 1 + + +class FtexImageFile(ImageFile.ImageFile): + format = "FTEX" + format_description = "Texture File Format (IW2:EOC)" + + def _open(self): + struct.unpack("= 8 and i32(prefix, 0) >= 20 and i32(prefix, 4) in (1, 2) + + +## +# Image plugin for the GIMP brush format. + + +class GbrImageFile(ImageFile.ImageFile): + + format = "GBR" + format_description = "GIMP brush file" + + def _open(self): + header_size = i32(self.fp.read(4)) + version = i32(self.fp.read(4)) + if header_size < 20: + raise SyntaxError("not a GIMP brush") + if version not in (1, 2): + raise SyntaxError(f"Unsupported GIMP brush version: {version}") + + width = i32(self.fp.read(4)) + height = i32(self.fp.read(4)) + color_depth = i32(self.fp.read(4)) + if width <= 0 or height <= 0: + raise SyntaxError("not a GIMP brush") + if color_depth not in (1, 4): + raise SyntaxError(f"Unsupported GIMP brush color depth: {color_depth}") + + if version == 1: + comment_length = header_size - 20 + else: + comment_length = header_size - 28 + magic_number = self.fp.read(4) + if magic_number != b"GIMP": + raise SyntaxError("not a GIMP brush, bad magic number") + self.info["spacing"] = i32(self.fp.read(4)) + + comment = self.fp.read(comment_length)[:-1] + + if color_depth == 1: + self.mode = "L" + else: + self.mode = "RGBA" + + self._size = width, height + + self.info["comment"] = comment + + # Image might not be small + Image._decompression_bomb_check(self.size) + + # Data is an uncompressed block of w * h * bytes/pixel + self._data_size = width * height * color_depth + + def load(self): + if self.im: + # Already loaded + return + + self.im = Image.core.new(self.mode, self.size) + self.frombytes(self.fp.read(self._data_size)) + + +# +# registry + + +Image.register_open(GbrImageFile.format, GbrImageFile, _accept) +Image.register_extension(GbrImageFile.format, ".gbr") diff --git a/.venv/lib/python3.9/site-packages/PIL/GdImageFile.py b/.venv/lib/python3.9/site-packages/PIL/GdImageFile.py new file mode 100644 index 00000000..9c34adaa --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/GdImageFile.py @@ -0,0 +1,90 @@ +# +# The Python Imaging Library. +# $Id$ +# +# GD file handling +# +# History: +# 1996-04-12 fl Created +# +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +""" +.. note:: + This format cannot be automatically recognized, so the + class is not registered for use with :py:func:`PIL.Image.open()`. To open a + gd file, use the :py:func:`PIL.GdImageFile.open()` function instead. + +.. warning:: + THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This + implementation is provided for convenience and demonstrational + purposes only. +""" + + +from . import ImageFile, ImagePalette, UnidentifiedImageError +from ._binary import i16be as i16 +from ._binary import i32be as i32 + + +class GdImageFile(ImageFile.ImageFile): + """ + Image plugin for the GD uncompressed format. Note that this format + is not supported by the standard :py:func:`PIL.Image.open()` function. To use + this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and + use the :py:func:`PIL.GdImageFile.open()` function. + """ + + format = "GD" + format_description = "GD uncompressed images" + + def _open(self): + + # Header + s = self.fp.read(1037) + + if not i16(s) in [65534, 65535]: + raise SyntaxError("Not a valid GD 2.x .gd file") + + self.mode = "L" # FIXME: "P" + self._size = i16(s, 2), i16(s, 4) + + trueColor = s[6] + trueColorOffset = 2 if trueColor else 0 + + # transparency index + tindex = i32(s, 7 + trueColorOffset) + if tindex < 256: + self.info["transparency"] = tindex + + self.palette = ImagePalette.raw( + "XBGR", s[7 + trueColorOffset + 4 : 7 + trueColorOffset + 4 + 256 * 4] + ) + + self.tile = [ + ("raw", (0, 0) + self.size, 7 + trueColorOffset + 4 + 256 * 4, ("L", 0, 1)) + ] + + +def open(fp, mode="r"): + """ + Load texture from a GD image file. + + :param filename: GD file name, or an opened file handle. + :param mode: Optional mode. In this version, if the mode argument + is given, it must be "r". + :returns: An image instance. + :raises OSError: If the image could not be read. + """ + if mode != "r": + raise ValueError("bad mode") + + try: + return GdImageFile(fp) + except SyntaxError as e: + raise UnidentifiedImageError("cannot identify this image file") from e diff --git a/.venv/lib/python3.9/site-packages/PIL/GifImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/GifImagePlugin.py new file mode 100644 index 00000000..128afc42 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/GifImagePlugin.py @@ -0,0 +1,926 @@ +# +# The Python Imaging Library. +# $Id$ +# +# GIF file handling +# +# History: +# 1995-09-01 fl Created +# 1996-12-14 fl Added interlace support +# 1996-12-30 fl Added animation support +# 1997-01-05 fl Added write support, fixed local colour map bug +# 1997-02-23 fl Make sure to load raster data in getdata() +# 1997-07-05 fl Support external decoder (0.4) +# 1998-07-09 fl Handle all modes when saving (0.5) +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 2001-04-16 fl Added rewind support (seek to frame 0) (0.6) +# 2001-04-17 fl Added palette optimization (0.7) +# 2002-06-06 fl Added transparency support for save (0.8) +# 2004-02-24 fl Disable interlacing for small images +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1995-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import itertools +import math +import os +import subprocess + +from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence +from ._binary import i16le as i16 +from ._binary import o8 +from ._binary import o16le as o16 + +# -------------------------------------------------------------------- +# Identify/read GIF files + + +def _accept(prefix): + return prefix[:6] in [b"GIF87a", b"GIF89a"] + + +## +# Image plugin for GIF images. This plugin supports both GIF87 and +# GIF89 images. + + +class GifImageFile(ImageFile.ImageFile): + + format = "GIF" + format_description = "Compuserve GIF" + _close_exclusive_fp_after_loading = False + + global_palette = None + + def data(self): + s = self.fp.read(1) + if s and s[0]: + return self.fp.read(s[0]) + return None + + def _open(self): + + # Screen + s = self.fp.read(13) + if not _accept(s): + raise SyntaxError("not a GIF file") + + self.info["version"] = s[:6] + self._size = i16(s, 6), i16(s, 8) + self.tile = [] + flags = s[10] + bits = (flags & 7) + 1 + + if flags & 128: + # get global palette + self.info["background"] = s[11] + # check if palette contains colour indices + p = self.fp.read(3 << bits) + for i in range(0, len(p), 3): + if not (i // 3 == p[i] == p[i + 1] == p[i + 2]): + p = ImagePalette.raw("RGB", p) + self.global_palette = self.palette = p + break + + self.__fp = self.fp # FIXME: hack + self.__rewind = self.fp.tell() + self._n_frames = None + self._is_animated = None + self._seek(0) # get ready to read first frame + + @property + def n_frames(self): + if self._n_frames is None: + current = self.tell() + try: + while True: + self.seek(self.tell() + 1) + except EOFError: + self._n_frames = self.tell() + 1 + self.seek(current) + return self._n_frames + + @property + def is_animated(self): + if self._is_animated is None: + if self._n_frames is not None: + self._is_animated = self._n_frames != 1 + else: + current = self.tell() + + try: + self.seek(1) + self._is_animated = True + except EOFError: + self._is_animated = False + + self.seek(current) + return self._is_animated + + def seek(self, frame): + if not self._seek_check(frame): + return + if frame < self.__frame: + if frame != 0: + self.im = None + self._seek(0) + + last_frame = self.__frame + for f in range(self.__frame + 1, frame + 1): + try: + self._seek(f) + except EOFError as e: + self.seek(last_frame) + raise EOFError("no more images in GIF file") from e + + def _seek(self, frame): + + if frame == 0: + # rewind + self.__offset = 0 + self.dispose = None + self.dispose_extent = [0, 0, 0, 0] # x0, y0, x1, y1 + self.__frame = -1 + self.__fp.seek(self.__rewind) + self.disposal_method = 0 + else: + # ensure that the previous frame was loaded + if self.tile: + self.load() + + if frame != self.__frame + 1: + raise ValueError(f"cannot seek to frame {frame}") + self.__frame = frame + + self.tile = [] + + self.fp = self.__fp + if self.__offset: + # backup to last frame + self.fp.seek(self.__offset) + while self.data(): + pass + self.__offset = 0 + + if self.dispose: + self.im.paste(self.dispose, self.dispose_extent) + + from copy import copy + + self.palette = copy(self.global_palette) + + info = {} + frame_transparency = None + interlace = None + while True: + + s = self.fp.read(1) + if not s or s == b";": + break + + elif s == b"!": + # + # extensions + # + s = self.fp.read(1) + block = self.data() + if s[0] == 249: + # + # graphic control extension + # + flags = block[0] + if flags & 1: + frame_transparency = block[3] + info["duration"] = i16(block, 1) * 10 + + # disposal method - find the value of bits 4 - 6 + dispose_bits = 0b00011100 & flags + dispose_bits = dispose_bits >> 2 + if dispose_bits: + # only set the dispose if it is not + # unspecified. I'm not sure if this is + # correct, but it seems to prevent the last + # frame from looking odd for some animations + self.disposal_method = dispose_bits + elif s[0] == 254: + # + # comment extension + # + while block: + if "comment" in info: + info["comment"] += block + else: + info["comment"] = block + block = self.data() + continue + elif s[0] == 255: + # + # application extension + # + info["extension"] = block, self.fp.tell() + if block[:11] == b"NETSCAPE2.0": + block = self.data() + if len(block) >= 3 and block[0] == 1: + info["loop"] = i16(block, 1) + while self.data(): + pass + + elif s == b",": + # + # local image + # + s = self.fp.read(9) + + # extent + x0, y0 = i16(s, 0), i16(s, 2) + x1, y1 = x0 + i16(s, 4), y0 + i16(s, 6) + if x1 > self.size[0] or y1 > self.size[1]: + self._size = max(x1, self.size[0]), max(y1, self.size[1]) + self.dispose_extent = x0, y0, x1, y1 + flags = s[8] + + interlace = (flags & 64) != 0 + + if flags & 128: + bits = (flags & 7) + 1 + self.palette = ImagePalette.raw("RGB", self.fp.read(3 << bits)) + + # image data + bits = self.fp.read(1)[0] + self.__offset = self.fp.tell() + break + + else: + pass + # raise OSError, "illegal GIF tag `%x`" % s[0] + + try: + if self.disposal_method < 2: + # do not dispose or none specified + self.dispose = None + elif self.disposal_method == 2: + # replace with background colour + + # only dispose the extent in this frame + x0, y0, x1, y1 = self.dispose_extent + dispose_size = (x1 - x0, y1 - y0) + + Image._decompression_bomb_check(dispose_size) + + # by convention, attempt to use transparency first + color = self.info.get("transparency", frame_transparency) + if color is None: + color = self.info.get("background", 0) + self.dispose = Image.core.fill("P", dispose_size, color) + else: + # replace with previous contents + if self.im: + # only dispose the extent in this frame + self.dispose = self._crop(self.im, self.dispose_extent) + elif frame_transparency is not None: + x0, y0, x1, y1 = self.dispose_extent + dispose_size = (x1 - x0, y1 - y0) + + Image._decompression_bomb_check(dispose_size) + self.dispose = Image.core.fill( + "P", dispose_size, frame_transparency + ) + except AttributeError: + pass + + if interlace is not None: + transparency = -1 + if frame_transparency is not None: + if frame == 0: + self.info["transparency"] = frame_transparency + else: + transparency = frame_transparency + self.tile = [ + ( + "gif", + (x0, y0, x1, y1), + self.__offset, + (bits, interlace, transparency), + ) + ] + else: + # self.__fp = None + raise EOFError + + for k in ["duration", "comment", "extension", "loop"]: + if k in info: + self.info[k] = info[k] + elif k in self.info: + del self.info[k] + + self.mode = "L" + if self.palette: + self.mode = "P" + + def load_prepare(self): + if not self.im and "transparency" in self.info: + self.im = Image.core.fill(self.mode, self.size, self.info["transparency"]) + + super(GifImageFile, self).load_prepare() + + def tell(self): + return self.__frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# -------------------------------------------------------------------- +# Write GIF files + + +RAWMODE = {"1": "L", "L": "L", "P": "P"} + + +def _normalize_mode(im, initial_call=False): + """ + Takes an image (or frame), returns an image in a mode that is appropriate + for saving in a Gif. + + It may return the original image, or it may return an image converted to + palette or 'L' mode. + + UNDONE: What is the point of mucking with the initial call palette, for + an image that shouldn't have a palette, or it would be a mode 'P' and + get returned in the RAWMODE clause. + + :param im: Image object + :param initial_call: Default false, set to true for a single frame. + :returns: Image object + """ + if im.mode in RAWMODE: + im.load() + return im + if Image.getmodebase(im.mode) == "RGB": + if initial_call: + palette_size = 256 + if im.palette: + palette_size = len(im.palette.getdata()[1]) // 3 + return im.convert("P", palette=Image.ADAPTIVE, colors=palette_size) + else: + return im.convert("P") + return im.convert("L") + + +def _normalize_palette(im, palette, info): + """ + Normalizes the palette for image. + - Sets the palette to the incoming palette, if provided. + - Ensures that there's a palette for L mode images + - Optimizes the palette if necessary/desired. + + :param im: Image object + :param palette: bytes object containing the source palette, or .... + :param info: encoderinfo + :returns: Image object + """ + source_palette = None + if palette: + # a bytes palette + if isinstance(palette, (bytes, bytearray, list)): + source_palette = bytearray(palette[:768]) + if isinstance(palette, ImagePalette.ImagePalette): + source_palette = bytearray(palette.palette) + + if im.mode == "P": + if not source_palette: + source_palette = im.im.getpalette("RGB")[:768] + else: # L-mode + if not source_palette: + source_palette = bytearray(i // 3 for i in range(768)) + im.palette = ImagePalette.ImagePalette("RGB", palette=source_palette) + + if palette: + used_palette_colors = [] + for i in range(0, len(source_palette), 3): + source_color = tuple(source_palette[i : i + 3]) + try: + index = im.palette.colors[source_color] + except KeyError: + index = None + used_palette_colors.append(index) + for i, index in enumerate(used_palette_colors): + if index is None: + for j in range(len(used_palette_colors)): + if j not in used_palette_colors: + used_palette_colors[i] = j + break + im = im.remap_palette(used_palette_colors) + else: + used_palette_colors = _get_optimize(im, info) + if used_palette_colors is not None: + return im.remap_palette(used_palette_colors, source_palette) + + im.palette.palette = source_palette + return im + + +def _write_single_frame(im, fp, palette): + im_out = _normalize_mode(im, True) + for k, v in im_out.info.items(): + im.encoderinfo.setdefault(k, v) + im_out = _normalize_palette(im_out, palette, im.encoderinfo) + + for s in _get_global_header(im_out, im.encoderinfo): + fp.write(s) + + # local image header + flags = 0 + if get_interlace(im): + flags = flags | 64 + _write_local_header(fp, im, (0, 0), flags) + + im_out.encoderconfig = (8, get_interlace(im)) + ImageFile._save(im_out, fp, [("gif", (0, 0) + im.size, 0, RAWMODE[im_out.mode])]) + + fp.write(b"\0") # end of image data + + +def _write_multiple_frames(im, fp, palette): + + duration = im.encoderinfo.get("duration", im.info.get("duration")) + disposal = im.encoderinfo.get("disposal", im.info.get("disposal")) + + im_frames = [] + frame_count = 0 + background_im = None + for imSequence in itertools.chain([im], im.encoderinfo.get("append_images", [])): + for im_frame in ImageSequence.Iterator(imSequence): + # a copy is required here since seek can still mutate the image + im_frame = _normalize_mode(im_frame.copy()) + if frame_count == 0: + for k, v in im_frame.info.items(): + im.encoderinfo.setdefault(k, v) + im_frame = _normalize_palette(im_frame, palette, im.encoderinfo) + + encoderinfo = im.encoderinfo.copy() + if isinstance(duration, (list, tuple)): + encoderinfo["duration"] = duration[frame_count] + if isinstance(disposal, (list, tuple)): + encoderinfo["disposal"] = disposal[frame_count] + frame_count += 1 + + if im_frames: + # delta frame + previous = im_frames[-1] + if encoderinfo.get("disposal") == 2: + if background_im is None: + color = im.encoderinfo.get( + "transparency", im.info.get("transparency", (0, 0, 0)) + ) + background = _get_background(im_frame, color) + background_im = Image.new("P", im_frame.size, background) + background_im.putpalette(im_frames[0]["im"].palette) + base_im = background_im + else: + base_im = previous["im"] + if _get_palette_bytes(im_frame) == _get_palette_bytes(base_im): + delta = ImageChops.subtract_modulo(im_frame, base_im) + else: + delta = ImageChops.subtract_modulo( + im_frame.convert("RGB"), base_im.convert("RGB") + ) + bbox = delta.getbbox() + if not bbox: + # This frame is identical to the previous frame + if duration: + previous["encoderinfo"]["duration"] += encoderinfo["duration"] + continue + else: + bbox = None + im_frames.append({"im": im_frame, "bbox": bbox, "encoderinfo": encoderinfo}) + + if len(im_frames) > 1: + for frame_data in im_frames: + im_frame = frame_data["im"] + if not frame_data["bbox"]: + # global header + for s in _get_global_header(im_frame, frame_data["encoderinfo"]): + fp.write(s) + offset = (0, 0) + else: + # compress difference + if not palette: + frame_data["encoderinfo"]["include_color_table"] = True + + im_frame = im_frame.crop(frame_data["bbox"]) + offset = frame_data["bbox"][:2] + _write_frame_data(fp, im_frame, offset, frame_data["encoderinfo"]) + return True + elif "duration" in im.encoderinfo and isinstance( + im.encoderinfo["duration"], (list, tuple) + ): + # Since multiple frames will not be written, add together the frame durations + im.encoderinfo["duration"] = sum(im.encoderinfo["duration"]) + + +def _save_all(im, fp, filename): + _save(im, fp, filename, save_all=True) + + +def _save(im, fp, filename, save_all=False): + # header + if "palette" in im.encoderinfo or "palette" in im.info: + palette = im.encoderinfo.get("palette", im.info.get("palette")) + else: + palette = None + im.encoderinfo["optimize"] = im.encoderinfo.get("optimize", True) + + if not save_all or not _write_multiple_frames(im, fp, palette): + _write_single_frame(im, fp, palette) + + fp.write(b";") # end of file + + if hasattr(fp, "flush"): + fp.flush() + + +def get_interlace(im): + interlace = im.encoderinfo.get("interlace", 1) + + # workaround for @PIL153 + if min(im.size) < 16: + interlace = 0 + + return interlace + + +def _write_local_header(fp, im, offset, flags): + transparent_color_exists = False + try: + transparency = im.encoderinfo["transparency"] + except KeyError: + pass + else: + transparency = int(transparency) + # optimize the block away if transparent color is not used + transparent_color_exists = True + + used_palette_colors = _get_optimize(im, im.encoderinfo) + if used_palette_colors is not None: + # adjust the transparency index after optimize + try: + transparency = used_palette_colors.index(transparency) + except ValueError: + transparent_color_exists = False + + if "duration" in im.encoderinfo: + duration = int(im.encoderinfo["duration"] / 10) + else: + duration = 0 + + disposal = int(im.encoderinfo.get("disposal", 0)) + + if transparent_color_exists or duration != 0 or disposal: + packed_flag = 1 if transparent_color_exists else 0 + packed_flag |= disposal << 2 + if not transparent_color_exists: + transparency = 0 + + fp.write( + b"!" + + o8(249) # extension intro + + o8(4) # length + + o8(packed_flag) # packed fields + + o16(duration) # duration + + o8(transparency) # transparency index + + o8(0) + ) + + if "comment" in im.encoderinfo and 1 <= len(im.encoderinfo["comment"]): + fp.write(b"!" + o8(254)) # extension intro + comment = im.encoderinfo["comment"] + if isinstance(comment, str): + comment = comment.encode() + for i in range(0, len(comment), 255): + subblock = comment[i : i + 255] + fp.write(o8(len(subblock)) + subblock) + fp.write(o8(0)) + if "loop" in im.encoderinfo: + number_of_loops = im.encoderinfo["loop"] + fp.write( + b"!" + + o8(255) # extension intro + + o8(11) + + b"NETSCAPE2.0" + + o8(3) + + o8(1) + + o16(number_of_loops) # number of loops + + o8(0) + ) + include_color_table = im.encoderinfo.get("include_color_table") + if include_color_table: + palette_bytes = _get_palette_bytes(im) + color_table_size = _get_color_table_size(palette_bytes) + if color_table_size: + flags = flags | 128 # local color table flag + flags = flags | color_table_size + + fp.write( + b"," + + o16(offset[0]) # offset + + o16(offset[1]) + + o16(im.size[0]) # size + + o16(im.size[1]) + + o8(flags) # flags + ) + if include_color_table and color_table_size: + fp.write(_get_header_palette(palette_bytes)) + fp.write(o8(8)) # bits + + +def _save_netpbm(im, fp, filename): + + # Unused by default. + # To use, uncomment the register_save call at the end of the file. + # + # If you need real GIF compression and/or RGB quantization, you + # can use the external NETPBM/PBMPLUS utilities. See comments + # below for information on how to enable this. + tempfile = im._dump() + + try: + with open(filename, "wb") as f: + if im.mode != "RGB": + subprocess.check_call( + ["ppmtogif", tempfile], stdout=f, stderr=subprocess.DEVNULL + ) + else: + # Pipe ppmquant output into ppmtogif + # "ppmquant 256 %s | ppmtogif > %s" % (tempfile, filename) + quant_cmd = ["ppmquant", "256", tempfile] + togif_cmd = ["ppmtogif"] + quant_proc = subprocess.Popen( + quant_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL + ) + togif_proc = subprocess.Popen( + togif_cmd, + stdin=quant_proc.stdout, + stdout=f, + stderr=subprocess.DEVNULL, + ) + + # Allow ppmquant to receive SIGPIPE if ppmtogif exits + quant_proc.stdout.close() + + retcode = quant_proc.wait() + if retcode: + raise subprocess.CalledProcessError(retcode, quant_cmd) + + retcode = togif_proc.wait() + if retcode: + raise subprocess.CalledProcessError(retcode, togif_cmd) + finally: + try: + os.unlink(tempfile) + except OSError: + pass + + +# Force optimization so that we can test performance against +# cases where it took lots of memory and time previously. +_FORCE_OPTIMIZE = False + + +def _get_optimize(im, info): + """ + Palette optimization is a potentially expensive operation. + + This function determines if the palette should be optimized using + some heuristics, then returns the list of palette entries in use. + + :param im: Image object + :param info: encoderinfo + :returns: list of indexes of palette entries in use, or None + """ + if im.mode in ("P", "L") and info and info.get("optimize", 0): + # Potentially expensive operation. + + # The palette saves 3 bytes per color not used, but palette + # lengths are restricted to 3*(2**N) bytes. Max saving would + # be 768 -> 6 bytes if we went all the way down to 2 colors. + # * If we're over 128 colors, we can't save any space. + # * If there aren't any holes, it's not worth collapsing. + # * If we have a 'large' image, the palette is in the noise. + + # create the new palette if not every color is used + optimise = _FORCE_OPTIMIZE or im.mode == "L" + if optimise or im.width * im.height < 512 * 512: + # check which colors are used + used_palette_colors = [] + for i, count in enumerate(im.histogram()): + if count: + used_palette_colors.append(i) + + if optimise or ( + len(used_palette_colors) <= 128 + and max(used_palette_colors) > len(used_palette_colors) + ): + return used_palette_colors + + +def _get_color_table_size(palette_bytes): + # calculate the palette size for the header + if not palette_bytes: + return 0 + elif len(palette_bytes) < 9: + return 1 + else: + return math.ceil(math.log(len(palette_bytes) // 3, 2)) - 1 + + +def _get_header_palette(palette_bytes): + """ + Returns the palette, null padded to the next power of 2 (*3) bytes + suitable for direct inclusion in the GIF header + + :param palette_bytes: Unpadded palette bytes, in RGBRGB form + :returns: Null padded palette + """ + color_table_size = _get_color_table_size(palette_bytes) + + # add the missing amount of bytes + # the palette has to be 2< 0: + palette_bytes += o8(0) * 3 * actual_target_size_diff + return palette_bytes + + +def _get_palette_bytes(im): + """ + Gets the palette for inclusion in the gif header + + :param im: Image object + :returns: Bytes, len<=768 suitable for inclusion in gif header + """ + return im.palette.palette + + +def _get_background(im, infoBackground): + background = 0 + if infoBackground: + background = infoBackground + if isinstance(background, tuple): + # WebPImagePlugin stores an RGBA value in info["background"] + # So it must be converted to the same format as GifImagePlugin's + # info["background"] - a global color table index + try: + background = im.palette.getcolor(background, im) + except ValueError as e: + if str(e) == "cannot allocate more than 256 colors": + # If all 256 colors are in use, + # then there is no need for the background color + return 0 + else: + raise + return background + + +def _get_global_header(im, info): + """Return a list of strings representing a GIF header""" + + # Header Block + # https://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp + + version = b"87a" + for extensionKey in ["transparency", "duration", "loop", "comment"]: + if info and extensionKey in info: + if (extensionKey == "duration" and info[extensionKey] == 0) or ( + extensionKey == "comment" and not (1 <= len(info[extensionKey]) <= 255) + ): + continue + version = b"89a" + break + else: + if im.info.get("version") == b"89a": + version = b"89a" + + background = _get_background(im, info.get("background")) + + palette_bytes = _get_palette_bytes(im) + color_table_size = _get_color_table_size(palette_bytes) + + return [ + b"GIF" # signature + + version # version + + o16(im.size[0]) # canvas width + + o16(im.size[1]), # canvas height + # Logical Screen Descriptor + # size of global color table + global color table flag + o8(color_table_size + 128), # packed fields + # background + reserved/aspect + o8(background) + o8(0), + # Global Color Table + _get_header_palette(palette_bytes), + ] + + +def _write_frame_data(fp, im_frame, offset, params): + try: + im_frame.encoderinfo = params + + # local image header + _write_local_header(fp, im_frame, offset, 0) + + ImageFile._save( + im_frame, fp, [("gif", (0, 0) + im_frame.size, 0, RAWMODE[im_frame.mode])] + ) + + fp.write(b"\0") # end of image data + finally: + del im_frame.encoderinfo + + +# -------------------------------------------------------------------- +# Legacy GIF utilities + + +def getheader(im, palette=None, info=None): + """ + Legacy Method to get Gif data from image. + + Warning:: May modify image data. + + :param im: Image object + :param palette: bytes object containing the source palette, or .... + :param info: encoderinfo + :returns: tuple of(list of header items, optimized palette) + + """ + used_palette_colors = _get_optimize(im, info) + + if info is None: + info = {} + + if "background" not in info and "background" in im.info: + info["background"] = im.info["background"] + + im_mod = _normalize_palette(im, palette, info) + im.palette = im_mod.palette + im.im = im_mod.im + header = _get_global_header(im, info) + + return header, used_palette_colors + + +# To specify duration, add the time in milliseconds to getdata(), +# e.g. getdata(im_frame, duration=1000) +def getdata(im, offset=(0, 0), **params): + """ + Legacy Method + + Return a list of strings representing this image. + The first string is a local image header, the rest contains + encoded image data. + + :param im: Image object + :param offset: Tuple of (x, y) pixels. Defaults to (0,0) + :param \\**params: E.g. duration or other encoder info parameters + :returns: List of Bytes containing gif encoded frame data + + """ + + class Collector: + data = [] + + def write(self, data): + self.data.append(data) + + im.load() # make sure raster data is available + + fp = Collector() + + _write_frame_data(fp, im, offset, params) + + return fp.data + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(GifImageFile.format, GifImageFile, _accept) +Image.register_save(GifImageFile.format, _save) +Image.register_save_all(GifImageFile.format, _save_all) +Image.register_extension(GifImageFile.format, ".gif") +Image.register_mime(GifImageFile.format, "image/gif") + +# +# Uncomment the following line if you wish to use NETPBM/PBMPLUS +# instead of the built-in "uncompressed" GIF encoder + +# Image.register_save(GifImageFile.format, _save_netpbm) diff --git a/.venv/lib/python3.9/site-packages/PIL/GimpGradientFile.py b/.venv/lib/python3.9/site-packages/PIL/GimpGradientFile.py new file mode 100644 index 00000000..7ab7f999 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/GimpGradientFile.py @@ -0,0 +1,140 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read (and render) GIMP gradient files +# +# History: +# 97-08-23 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +""" +Stuff to translate curve segments to palette values (derived from +the corresponding code in GIMP, written by Federico Mena Quintero. +See the GIMP distribution for more information.) +""" + + +from math import log, pi, sin, sqrt + +from ._binary import o8 + +EPSILON = 1e-10 +"""""" # Enable auto-doc for data member + + +def linear(middle, pos): + if pos <= middle: + if middle < EPSILON: + return 0.0 + else: + return 0.5 * pos / middle + else: + pos = pos - middle + middle = 1.0 - middle + if middle < EPSILON: + return 1.0 + else: + return 0.5 + 0.5 * pos / middle + + +def curved(middle, pos): + return pos ** (log(0.5) / log(max(middle, EPSILON))) + + +def sine(middle, pos): + return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0 + + +def sphere_increasing(middle, pos): + return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2) + + +def sphere_decreasing(middle, pos): + return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2) + + +SEGMENTS = [linear, curved, sine, sphere_increasing, sphere_decreasing] +"""""" # Enable auto-doc for data member + + +class GradientFile: + + gradient = None + + def getpalette(self, entries=256): + + palette = [] + + ix = 0 + x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] + + for i in range(entries): + + x = i / (entries - 1) + + while x1 < x: + ix += 1 + x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] + + w = x1 - x0 + + if w < EPSILON: + scale = segment(0.5, 0.5) + else: + scale = segment((xm - x0) / w, (x - x0) / w) + + # expand to RGBA + r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5)) + g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5)) + b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5)) + a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5)) + + # add to palette + palette.append(r + g + b + a) + + return b"".join(palette), "RGBA" + + +class GimpGradientFile(GradientFile): + """File handler for GIMP's gradient format.""" + + def __init__(self, fp): + + if fp.readline()[:13] != b"GIMP Gradient": + raise SyntaxError("not a GIMP gradient file") + + line = fp.readline() + + # GIMP 1.2 gradient files don't contain a name, but GIMP 1.3 files do + if line.startswith(b"Name: "): + line = fp.readline().strip() + + count = int(line) + + gradient = [] + + for i in range(count): + + s = fp.readline().split() + w = [float(x) for x in s[:11]] + + x0, x1 = w[0], w[2] + xm = w[1] + rgb0 = w[3:7] + rgb1 = w[7:11] + + segment = SEGMENTS[int(s[11])] + cspace = int(s[12]) + + if cspace != 0: + raise OSError("cannot handle HSV colour space") + + gradient.append((x0, x1, xm, rgb0, rgb1, segment)) + + self.gradient = gradient diff --git a/.venv/lib/python3.9/site-packages/PIL/GimpPaletteFile.py b/.venv/lib/python3.9/site-packages/PIL/GimpPaletteFile.py new file mode 100644 index 00000000..10fd3ad8 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/GimpPaletteFile.py @@ -0,0 +1,56 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read GIMP palette files +# +# History: +# 1997-08-23 fl Created +# 2004-09-07 fl Support GIMP 2.0 palette files. +# +# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. +# Copyright (c) Fredrik Lundh 1997-2004. +# +# See the README file for information on usage and redistribution. +# + +import re + +from ._binary import o8 + + +class GimpPaletteFile: + """File handler for GIMP's palette format.""" + + rawmode = "RGB" + + def __init__(self, fp): + + self.palette = [o8(i) * 3 for i in range(256)] + + if fp.readline()[:12] != b"GIMP Palette": + raise SyntaxError("not a GIMP palette file") + + for i in range(256): + + s = fp.readline() + if not s: + break + + # skip fields and comment lines + if re.match(br"\w+:|#", s): + continue + if len(s) > 100: + raise SyntaxError("bad palette file") + + v = tuple(map(int, s.split()[:3])) + if len(v) != 3: + raise ValueError("bad palette entry") + + self.palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2]) + + self.palette = b"".join(self.palette) + + def getpalette(self): + + return self.palette, self.rawmode diff --git a/.venv/lib/python3.9/site-packages/PIL/GribStubImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/GribStubImagePlugin.py new file mode 100644 index 00000000..b9bdd16e --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/GribStubImagePlugin.py @@ -0,0 +1,73 @@ +# +# The Python Imaging Library +# $Id$ +# +# GRIB stub adapter +# +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific GRIB image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix): + return prefix[0:4] == b"GRIB" and prefix[7] == 1 + + +class GribStubImageFile(ImageFile.StubImageFile): + + format = "GRIB" + format_description = "GRIB" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(8)): + raise SyntaxError("Not a GRIB file") + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise OSError("GRIB save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept) +Image.register_save(GribStubImageFile.format, _save) + +Image.register_extension(GribStubImageFile.format, ".grib") diff --git a/.venv/lib/python3.9/site-packages/PIL/Hdf5StubImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/Hdf5StubImagePlugin.py new file mode 100644 index 00000000..362f2d39 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/Hdf5StubImagePlugin.py @@ -0,0 +1,73 @@ +# +# The Python Imaging Library +# $Id$ +# +# HDF5 stub adapter +# +# Copyright (c) 2000-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific HDF5 image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix): + return prefix[:8] == b"\x89HDF\r\n\x1a\n" + + +class HDF5StubImageFile(ImageFile.StubImageFile): + + format = "HDF5" + format_description = "HDF5" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(8)): + raise SyntaxError("Not an HDF file") + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise OSError("HDF5 save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept) +Image.register_save(HDF5StubImageFile.format, _save) + +Image.register_extensions(HDF5StubImageFile.format, [".h5", ".hdf"]) diff --git a/.venv/lib/python3.9/site-packages/PIL/IcnsImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/IcnsImagePlugin.py new file mode 100644 index 00000000..d30eaf90 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/IcnsImagePlugin.py @@ -0,0 +1,386 @@ +# +# The Python Imaging Library. +# $Id$ +# +# macOS icns file decoder, based on icns.py by Bob Ippolito. +# +# history: +# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies. +# 2020-04-04 Allow saving on all operating systems. +# +# Copyright (c) 2004 by Bob Ippolito. +# Copyright (c) 2004 by Secret Labs. +# Copyright (c) 2004 by Fredrik Lundh. +# Copyright (c) 2014 by Alastair Houghton. +# Copyright (c) 2020 by Pan Jing. +# +# See the README file for information on usage and redistribution. +# + +import io +import os +import struct +import sys + +from PIL import Image, ImageFile, PngImagePlugin, features + +enable_jpeg2k = features.check_codec("jpg_2000") +if enable_jpeg2k: + from PIL import Jpeg2KImagePlugin + +MAGIC = b"icns" +HEADERSIZE = 8 + + +def nextheader(fobj): + return struct.unpack(">4sI", fobj.read(HEADERSIZE)) + + +def read_32t(fobj, start_length, size): + # The 128x128 icon seems to have an extra header for some reason. + (start, length) = start_length + fobj.seek(start) + sig = fobj.read(4) + if sig != b"\x00\x00\x00\x00": + raise SyntaxError("Unknown signature, expecting 0x00000000") + return read_32(fobj, (start + 4, length - 4), size) + + +def read_32(fobj, start_length, size): + """ + Read a 32bit RGB icon resource. Seems to be either uncompressed or + an RLE packbits-like scheme. + """ + (start, length) = start_length + fobj.seek(start) + pixel_size = (size[0] * size[2], size[1] * size[2]) + sizesq = pixel_size[0] * pixel_size[1] + if length == sizesq * 3: + # uncompressed ("RGBRGBGB") + indata = fobj.read(length) + im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1) + else: + # decode image + im = Image.new("RGB", pixel_size, None) + for band_ix in range(3): + data = [] + bytesleft = sizesq + while bytesleft > 0: + byte = fobj.read(1) + if not byte: + break + byte = byte[0] + if byte & 0x80: + blocksize = byte - 125 + byte = fobj.read(1) + for i in range(blocksize): + data.append(byte) + else: + blocksize = byte + 1 + data.append(fobj.read(blocksize)) + bytesleft -= blocksize + if bytesleft <= 0: + break + if bytesleft != 0: + raise SyntaxError(f"Error reading channel [{repr(bytesleft)} left]") + band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1) + im.im.putband(band.im, band_ix) + return {"RGB": im} + + +def read_mk(fobj, start_length, size): + # Alpha masks seem to be uncompressed + start = start_length[0] + fobj.seek(start) + pixel_size = (size[0] * size[2], size[1] * size[2]) + sizesq = pixel_size[0] * pixel_size[1] + band = Image.frombuffer("L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1) + return {"A": band} + + +def read_png_or_jpeg2000(fobj, start_length, size): + (start, length) = start_length + fobj.seek(start) + sig = fobj.read(12) + if sig[:8] == b"\x89PNG\x0d\x0a\x1a\x0a": + fobj.seek(start) + im = PngImagePlugin.PngImageFile(fobj) + Image._decompression_bomb_check(im.size) + return {"RGBA": im} + elif ( + sig[:4] == b"\xff\x4f\xff\x51" + or sig[:4] == b"\x0d\x0a\x87\x0a" + or sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a" + ): + if not enable_jpeg2k: + raise ValueError( + "Unsupported icon subimage format (rebuild PIL " + "with JPEG 2000 support to fix this)" + ) + # j2k, jpc or j2c + fobj.seek(start) + jp2kstream = fobj.read(length) + f = io.BytesIO(jp2kstream) + im = Jpeg2KImagePlugin.Jpeg2KImageFile(f) + Image._decompression_bomb_check(im.size) + if im.mode != "RGBA": + im = im.convert("RGBA") + return {"RGBA": im} + else: + raise ValueError("Unsupported icon subimage format") + + +class IcnsFile: + + SIZES = { + (512, 512, 2): [(b"ic10", read_png_or_jpeg2000)], + (512, 512, 1): [(b"ic09", read_png_or_jpeg2000)], + (256, 256, 2): [(b"ic14", read_png_or_jpeg2000)], + (256, 256, 1): [(b"ic08", read_png_or_jpeg2000)], + (128, 128, 2): [(b"ic13", read_png_or_jpeg2000)], + (128, 128, 1): [ + (b"ic07", read_png_or_jpeg2000), + (b"it32", read_32t), + (b"t8mk", read_mk), + ], + (64, 64, 1): [(b"icp6", read_png_or_jpeg2000)], + (32, 32, 2): [(b"ic12", read_png_or_jpeg2000)], + (48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)], + (32, 32, 1): [ + (b"icp5", read_png_or_jpeg2000), + (b"il32", read_32), + (b"l8mk", read_mk), + ], + (16, 16, 2): [(b"ic11", read_png_or_jpeg2000)], + (16, 16, 1): [ + (b"icp4", read_png_or_jpeg2000), + (b"is32", read_32), + (b"s8mk", read_mk), + ], + } + + def __init__(self, fobj): + """ + fobj is a file-like object as an icns resource + """ + # signature : (start, length) + self.dct = dct = {} + self.fobj = fobj + sig, filesize = nextheader(fobj) + if sig != MAGIC: + raise SyntaxError("not an icns file") + i = HEADERSIZE + while i < filesize: + sig, blocksize = nextheader(fobj) + if blocksize <= 0: + raise SyntaxError("invalid block header") + i += HEADERSIZE + blocksize -= HEADERSIZE + dct[sig] = (i, blocksize) + fobj.seek(blocksize, io.SEEK_CUR) + i += blocksize + + def itersizes(self): + sizes = [] + for size, fmts in self.SIZES.items(): + for (fmt, reader) in fmts: + if fmt in self.dct: + sizes.append(size) + break + return sizes + + def bestsize(self): + sizes = self.itersizes() + if not sizes: + raise SyntaxError("No 32bit icon resources found") + return max(sizes) + + def dataforsize(self, size): + """ + Get an icon resource as {channel: array}. Note that + the arrays are bottom-up like windows bitmaps and will likely + need to be flipped or transposed in some way. + """ + dct = {} + for code, reader in self.SIZES[size]: + desc = self.dct.get(code) + if desc is not None: + dct.update(reader(self.fobj, desc, size)) + return dct + + def getimage(self, size=None): + if size is None: + size = self.bestsize() + if len(size) == 2: + size = (size[0], size[1], 1) + channels = self.dataforsize(size) + + im = channels.get("RGBA", None) + if im: + return im + + im = channels.get("RGB").copy() + try: + im.putalpha(channels["A"]) + except KeyError: + pass + return im + + +## +# Image plugin for Mac OS icons. + + +class IcnsImageFile(ImageFile.ImageFile): + """ + PIL image support for Mac OS .icns files. + Chooses the best resolution, but will possibly load + a different size image if you mutate the size attribute + before calling 'load'. + + The info dictionary has a key 'sizes' that is a list + of sizes that the icns file has. + """ + + format = "ICNS" + format_description = "Mac OS icns resource" + + def _open(self): + self.icns = IcnsFile(self.fp) + self.mode = "RGBA" + self.info["sizes"] = self.icns.itersizes() + self.best_size = self.icns.bestsize() + self.size = ( + self.best_size[0] * self.best_size[2], + self.best_size[1] * self.best_size[2], + ) + + @property + def size(self): + return self._size + + @size.setter + def size(self, value): + info_size = value + if info_size not in self.info["sizes"] and len(info_size) == 2: + info_size = (info_size[0], info_size[1], 1) + if ( + info_size not in self.info["sizes"] + and len(info_size) == 3 + and info_size[2] == 1 + ): + simple_sizes = [ + (size[0] * size[2], size[1] * size[2]) for size in self.info["sizes"] + ] + if value in simple_sizes: + info_size = self.info["sizes"][simple_sizes.index(value)] + if info_size not in self.info["sizes"]: + raise ValueError("This is not one of the allowed sizes of this image") + self._size = value + + def load(self): + if len(self.size) == 3: + self.best_size = self.size + self.size = ( + self.best_size[0] * self.best_size[2], + self.best_size[1] * self.best_size[2], + ) + + Image.Image.load(self) + if self.im and self.im.size == self.size: + # Already loaded + return + self.load_prepare() + # This is likely NOT the best way to do it, but whatever. + im = self.icns.getimage(self.best_size) + + # If this is a PNG or JPEG 2000, it won't be loaded yet + im.load() + + self.im = im.im + self.mode = im.mode + self.size = im.size + self.load_end() + + +def _save(im, fp, filename): + """ + Saves the image as a series of PNG files, + that are then combined into a .icns file. + """ + if hasattr(fp, "flush"): + fp.flush() + + sizes = { + b"ic07": 128, + b"ic08": 256, + b"ic09": 512, + b"ic10": 1024, + b"ic11": 32, + b"ic12": 64, + b"ic13": 256, + b"ic14": 512, + } + provided_images = {im.width: im for im in im.encoderinfo.get("append_images", [])} + size_streams = {} + for size in set(sizes.values()): + image = ( + provided_images[size] + if size in provided_images + else im.resize((size, size)) + ) + + temp = io.BytesIO() + image.save(temp, "png") + size_streams[size] = temp.getvalue() + + entries = [] + for type, size in sizes.items(): + stream = size_streams[size] + entries.append({"type": type, "size": len(stream), "stream": stream}) + + # Header + fp.write(MAGIC) + fp.write(struct.pack(">i", sum(entry["size"] for entry in entries))) + + # TOC + fp.write(b"TOC ") + fp.write(struct.pack(">i", HEADERSIZE + len(entries) * HEADERSIZE)) + for entry in entries: + fp.write(entry["type"]) + fp.write(struct.pack(">i", HEADERSIZE + entry["size"])) + + # Data + for entry in entries: + fp.write(entry["type"]) + fp.write(struct.pack(">i", HEADERSIZE + entry["size"])) + fp.write(entry["stream"]) + + if hasattr(fp, "flush"): + fp.flush() + + +def _accept(prefix): + return prefix[:4] == MAGIC + + +Image.register_open(IcnsImageFile.format, IcnsImageFile, _accept) +Image.register_extension(IcnsImageFile.format, ".icns") + +Image.register_save(IcnsImageFile.format, _save) +Image.register_mime(IcnsImageFile.format, "image/icns") + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Syntax: python3 IcnsImagePlugin.py [file]") + sys.exit() + + with open(sys.argv[1], "rb") as fp: + imf = IcnsImageFile(fp) + for size in imf.info["sizes"]: + imf.size = size + imf.save("out-%s-%s-%s.png" % size) + with Image.open(sys.argv[1]) as im: + im.save("out.png") + if sys.platform == "windows": + os.startfile("out.png") diff --git a/.venv/lib/python3.9/site-packages/PIL/IcoImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/IcoImagePlugin.py new file mode 100644 index 00000000..d9ff9b5e --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/IcoImagePlugin.py @@ -0,0 +1,339 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Windows Icon support for PIL +# +# History: +# 96-05-27 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis +# . +# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki +# +# Icon format references: +# * https://en.wikipedia.org/wiki/ICO_(file_format) +# * https://msdn.microsoft.com/en-us/library/ms997538.aspx + + +import struct +import warnings +from io import BytesIO +from math import ceil, log + +from . import BmpImagePlugin, Image, ImageFile, PngImagePlugin +from ._binary import i16le as i16 +from ._binary import i32le as i32 +from ._binary import o32le as o32 + +# +# -------------------------------------------------------------------- + +_MAGIC = b"\0\0\1\0" + + +def _save(im, fp, filename): + fp.write(_MAGIC) # (2+2) + sizes = im.encoderinfo.get( + "sizes", + [(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)], + ) + width, height = im.size + sizes = filter( + lambda x: False + if (x[0] > width or x[1] > height or x[0] > 256 or x[1] > 256) + else True, + sizes, + ) + sizes = list(sizes) + fp.write(struct.pack("=8bpp) + "reserved": s[3], + "planes": i16(s, 4), + "bpp": i16(s, 6), + "size": i32(s, 8), + "offset": i32(s, 12), + } + + # See Wikipedia + for j in ("width", "height"): + if not icon_header[j]: + icon_header[j] = 256 + + # See Wikipedia notes about color depth. + # We need this just to differ images with equal sizes + icon_header["color_depth"] = ( + icon_header["bpp"] + or ( + icon_header["nb_color"] != 0 + and ceil(log(icon_header["nb_color"], 2)) + ) + or 256 + ) + + icon_header["dim"] = (icon_header["width"], icon_header["height"]) + icon_header["square"] = icon_header["width"] * icon_header["height"] + + self.entry.append(icon_header) + + self.entry = sorted(self.entry, key=lambda x: x["color_depth"]) + # ICO images are usually squares + # self.entry = sorted(self.entry, key=lambda x: x['width']) + self.entry = sorted(self.entry, key=lambda x: x["square"]) + self.entry.reverse() + + def sizes(self): + """ + Get a list of all available icon sizes and color depths. + """ + return {(h["width"], h["height"]) for h in self.entry} + + def getentryindex(self, size, bpp=False): + for (i, h) in enumerate(self.entry): + if size == h["dim"] and (bpp is False or bpp == h["color_depth"]): + return i + return 0 + + def getimage(self, size, bpp=False): + """ + Get an image from the icon + """ + return self.frame(self.getentryindex(size, bpp)) + + def frame(self, idx): + """ + Get an image from frame idx + """ + + header = self.entry[idx] + + self.buf.seek(header["offset"]) + data = self.buf.read(8) + self.buf.seek(header["offset"]) + + if data[:8] == PngImagePlugin._MAGIC: + # png frame + im = PngImagePlugin.PngImageFile(self.buf) + Image._decompression_bomb_check(im.size) + else: + # XOR + AND mask bmp frame + im = BmpImagePlugin.DibImageFile(self.buf) + Image._decompression_bomb_check(im.size) + + # change tile dimension to only encompass XOR image + im._size = (im.size[0], int(im.size[1] / 2)) + d, e, o, a = im.tile[0] + im.tile[0] = d, (0, 0) + im.size, o, a + + # figure out where AND mask image starts + bpp = header["bpp"] + if 32 == bpp: + # 32-bit color depth icon image allows semitransparent areas + # PIL's DIB format ignores transparency bits, recover them. + # The DIB is packed in BGRX byte order where X is the alpha + # channel. + + # Back up to start of bmp data + self.buf.seek(o) + # extract every 4th byte (eg. 3,7,11,15,...) + alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4] + + # convert to an 8bpp grayscale image + mask = Image.frombuffer( + "L", # 8bpp + im.size, # (w, h) + alpha_bytes, # source chars + "raw", # raw decoder + ("L", 0, -1), # 8bpp inverted, unpadded, reversed + ) + else: + # get AND image from end of bitmap + w = im.size[0] + if (w % 32) > 0: + # bitmap row data is aligned to word boundaries + w += 32 - (im.size[0] % 32) + + # the total mask data is + # padded row size * height / bits per char + + total_bytes = int((w * im.size[1]) / 8) + and_mask_offset = header["offset"] + header["size"] - total_bytes + + self.buf.seek(and_mask_offset) + mask_data = self.buf.read(total_bytes) + + # convert raw data to image + mask = Image.frombuffer( + "1", # 1 bpp + im.size, # (w, h) + mask_data, # source chars + "raw", # raw decoder + ("1;I", int(w / 8), -1), # 1bpp inverted, padded, reversed + ) + + # now we have two images, im is XOR image and mask is AND image + + # apply mask image as alpha channel + im = im.convert("RGBA") + im.putalpha(mask) + + return im + + +## +# Image plugin for Windows Icon files. + + +class IcoImageFile(ImageFile.ImageFile): + """ + PIL read-only image support for Microsoft Windows .ico files. + + By default the largest resolution image in the file will be loaded. This + can be changed by altering the 'size' attribute before calling 'load'. + + The info dictionary has a key 'sizes' that is a list of the sizes available + in the icon file. + + Handles classic, XP and Vista icon formats. + + When saving, PNG compression is used. Support for this was only added in + Windows Vista. If you are unable to view the icon in Windows, convert the + image to "RGBA" mode before saving. + + This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis + . + https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki + """ + + format = "ICO" + format_description = "Windows Icon" + + def _open(self): + self.ico = IcoFile(self.fp) + self.info["sizes"] = self.ico.sizes() + self.size = self.ico.entry[0]["dim"] + self.load() + + @property + def size(self): + return self._size + + @size.setter + def size(self, value): + if value not in self.info["sizes"]: + raise ValueError("This is not one of the allowed sizes of this image") + self._size = value + + def load(self): + if self.im and self.im.size == self.size: + # Already loaded + return + im = self.ico.getimage(self.size) + # if tile is PNG, it won't really be loaded yet + im.load() + self.im = im.im + self.mode = im.mode + if im.size != self.size: + warnings.warn("Image was not the expected size") + + index = self.ico.getentryindex(self.size) + sizes = list(self.info["sizes"]) + sizes[index] = im.size + self.info["sizes"] = set(sizes) + + self.size = im.size + + def load_seek(self): + # Flag the ImageFile.Parser so that it + # just does all the decode at the end. + pass + + +# +# -------------------------------------------------------------------- + + +Image.register_open(IcoImageFile.format, IcoImageFile, _accept) +Image.register_save(IcoImageFile.format, _save) +Image.register_extension(IcoImageFile.format, ".ico") + +Image.register_mime(IcoImageFile.format, "image/x-icon") diff --git a/.venv/lib/python3.9/site-packages/PIL/ImImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/ImImagePlugin.py new file mode 100644 index 00000000..1dfc808c --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImImagePlugin.py @@ -0,0 +1,376 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IFUNC IM file handling for PIL +# +# history: +# 1995-09-01 fl Created. +# 1997-01-03 fl Save palette images +# 1997-01-08 fl Added sequence support +# 1997-01-23 fl Added P and RGB save support +# 1997-05-31 fl Read floating point images +# 1997-06-22 fl Save floating point images +# 1997-08-27 fl Read and save 1-bit images +# 1998-06-25 fl Added support for RGB+LUT images +# 1998-07-02 fl Added support for YCC images +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 1998-12-29 fl Added I;16 support +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) +# 2003-09-26 fl Added LA/PA support +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2001 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +import os +import re + +from . import Image, ImageFile, ImagePalette + +# -------------------------------------------------------------------- +# Standard tags + +COMMENT = "Comment" +DATE = "Date" +EQUIPMENT = "Digitalization equipment" +FRAMES = "File size (no of images)" +LUT = "Lut" +NAME = "Name" +SCALE = "Scale (x,y)" +SIZE = "Image size (x*y)" +MODE = "Image type" + +TAGS = { + COMMENT: 0, + DATE: 0, + EQUIPMENT: 0, + FRAMES: 0, + LUT: 0, + NAME: 0, + SCALE: 0, + SIZE: 0, + MODE: 0, +} + +OPEN = { + # ifunc93/p3cfunc formats + "0 1 image": ("1", "1"), + "L 1 image": ("1", "1"), + "Greyscale image": ("L", "L"), + "Grayscale image": ("L", "L"), + "RGB image": ("RGB", "RGB;L"), + "RLB image": ("RGB", "RLB"), + "RYB image": ("RGB", "RLB"), + "B1 image": ("1", "1"), + "B2 image": ("P", "P;2"), + "B4 image": ("P", "P;4"), + "X 24 image": ("RGB", "RGB"), + "L 32 S image": ("I", "I;32"), + "L 32 F image": ("F", "F;32"), + # old p3cfunc formats + "RGB3 image": ("RGB", "RGB;T"), + "RYB3 image": ("RGB", "RYB;T"), + # extensions + "LA image": ("LA", "LA;L"), + "PA image": ("LA", "PA;L"), + "RGBA image": ("RGBA", "RGBA;L"), + "RGBX image": ("RGBX", "RGBX;L"), + "CMYK image": ("CMYK", "CMYK;L"), + "YCC image": ("YCbCr", "YCbCr;L"), +} + +# ifunc95 extensions +for i in ["8", "8S", "16", "16S", "32", "32F"]: + OPEN[f"L {i} image"] = ("F", f"F;{i}") + OPEN[f"L*{i} image"] = ("F", f"F;{i}") +for i in ["16", "16L", "16B"]: + OPEN[f"L {i} image"] = (f"I;{i}", f"I;{i}") + OPEN[f"L*{i} image"] = (f"I;{i}", f"I;{i}") +for i in ["32S"]: + OPEN[f"L {i} image"] = ("I", f"I;{i}") + OPEN[f"L*{i} image"] = ("I", f"I;{i}") +for i in range(2, 33): + OPEN[f"L*{i} image"] = ("F", f"F;{i}") + + +# -------------------------------------------------------------------- +# Read IM directory + +split = re.compile(br"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$") + + +def number(s): + try: + return int(s) + except ValueError: + return float(s) + + +## +# Image plugin for the IFUNC IM file format. + + +class ImImageFile(ImageFile.ImageFile): + + format = "IM" + format_description = "IFUNC Image Memory" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # Quick rejection: if there's not an LF among the first + # 100 bytes, this is (probably) not a text header. + + if b"\n" not in self.fp.read(100): + raise SyntaxError("not an IM file") + self.fp.seek(0) + + n = 0 + + # Default values + self.info[MODE] = "L" + self.info[SIZE] = (512, 512) + self.info[FRAMES] = 1 + + self.rawmode = "L" + + while True: + + s = self.fp.read(1) + + # Some versions of IFUNC uses \n\r instead of \r\n... + if s == b"\r": + continue + + if not s or s == b"\0" or s == b"\x1A": + break + + # FIXME: this may read whole file if not a text file + s = s + self.fp.readline() + + if len(s) > 100: + raise SyntaxError("not an IM file") + + if s[-2:] == b"\r\n": + s = s[:-2] + elif s[-1:] == b"\n": + s = s[:-1] + + try: + m = split.match(s) + except re.error as e: + raise SyntaxError("not an IM file") from e + + if m: + + k, v = m.group(1, 2) + + # Don't know if this is the correct encoding, + # but a decent guess (I guess) + k = k.decode("latin-1", "replace") + v = v.decode("latin-1", "replace") + + # Convert value as appropriate + if k in [FRAMES, SCALE, SIZE]: + v = v.replace("*", ",") + v = tuple(map(number, v.split(","))) + if len(v) == 1: + v = v[0] + elif k == MODE and v in OPEN: + v, self.rawmode = OPEN[v] + + # Add to dictionary. Note that COMMENT tags are + # combined into a list of strings. + if k == COMMENT: + if k in self.info: + self.info[k].append(v) + else: + self.info[k] = [v] + else: + self.info[k] = v + + if k in TAGS: + n += 1 + + else: + + raise SyntaxError( + "Syntax error in IM header: " + s.decode("ascii", "replace") + ) + + if not n: + raise SyntaxError("Not an IM file") + + # Basic attributes + self._size = self.info[SIZE] + self.mode = self.info[MODE] + + # Skip forward to start of image data + while s and s[0:1] != b"\x1A": + s = self.fp.read(1) + if not s: + raise SyntaxError("File truncated") + + if LUT in self.info: + # convert lookup table to palette or lut attribute + palette = self.fp.read(768) + greyscale = 1 # greyscale palette + linear = 1 # linear greyscale palette + for i in range(256): + if palette[i] == palette[i + 256] == palette[i + 512]: + if palette[i] != i: + linear = 0 + else: + greyscale = 0 + if self.mode in ["L", "LA", "P", "PA"]: + if greyscale: + if not linear: + self.lut = list(palette[:256]) + else: + if self.mode in ["L", "P"]: + self.mode = self.rawmode = "P" + elif self.mode in ["LA", "PA"]: + self.mode = "PA" + self.rawmode = "PA;L" + self.palette = ImagePalette.raw("RGB;L", palette) + elif self.mode == "RGB": + if not greyscale or not linear: + self.lut = list(palette) + + self.frame = 0 + + self.__offset = offs = self.fp.tell() + + self.__fp = self.fp # FIXME: hack + + if self.rawmode[:2] == "F;": + + # ifunc95 formats + try: + # use bit decoder (if necessary) + bits = int(self.rawmode[2:]) + if bits not in [8, 16, 32]: + self.tile = [("bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1))] + return + except ValueError: + pass + + if self.rawmode in ["RGB;T", "RYB;T"]: + # Old LabEye/3PC files. Would be very surprised if anyone + # ever stumbled upon such a file ;-) + size = self.size[0] * self.size[1] + self.tile = [ + ("raw", (0, 0) + self.size, offs, ("G", 0, -1)), + ("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)), + ("raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1)), + ] + else: + # LabEye/IFUNC files + self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))] + + @property + def n_frames(self): + return self.info[FRAMES] + + @property + def is_animated(self): + return self.info[FRAMES] > 1 + + def seek(self, frame): + if not self._seek_check(frame): + return + + self.frame = frame + + if self.mode == "1": + bits = 1 + else: + bits = 8 * len(self.mode) + + size = ((self.size[0] * bits + 7) // 8) * self.size[1] + offs = self.__offset + frame * size + + self.fp = self.__fp + + self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))] + + def tell(self): + return self.frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# +# -------------------------------------------------------------------- +# Save IM files + + +SAVE = { + # mode: (im type, raw mode) + "1": ("0 1", "1"), + "L": ("Greyscale", "L"), + "LA": ("LA", "LA;L"), + "P": ("Greyscale", "P"), + "PA": ("LA", "PA;L"), + "I": ("L 32S", "I;32S"), + "I;16": ("L 16", "I;16"), + "I;16L": ("L 16L", "I;16L"), + "I;16B": ("L 16B", "I;16B"), + "F": ("L 32F", "F;32F"), + "RGB": ("RGB", "RGB;L"), + "RGBA": ("RGBA", "RGBA;L"), + "RGBX": ("RGBX", "RGBX;L"), + "CMYK": ("CMYK", "CMYK;L"), + "YCbCr": ("YCC", "YCbCr;L"), +} + + +def _save(im, fp, filename): + + try: + image_type, rawmode = SAVE[im.mode] + except KeyError as e: + raise ValueError(f"Cannot save {im.mode} images as IM") from e + + frames = im.encoderinfo.get("frames", 1) + + fp.write(f"Image type: {image_type} image\r\n".encode("ascii")) + if filename: + # Each line must be 100 characters or less, + # or: SyntaxError("not an IM file") + # 8 characters are used for "Name: " and "\r\n" + # Keep just the filename, ditch the potentially overlong path + name, ext = os.path.splitext(os.path.basename(filename)) + name = "".join([name[: 92 - len(ext)], ext]) + + fp.write(f"Name: {name}\r\n".encode("ascii")) + fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode("ascii")) + fp.write(f"File size (no of images): {frames}\r\n".encode("ascii")) + if im.mode in ["P", "PA"]: + fp.write(b"Lut: 1\r\n") + fp.write(b"\000" * (511 - fp.tell()) + b"\032") + if im.mode in ["P", "PA"]: + fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))]) + + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(ImImageFile.format, ImImageFile) +Image.register_save(ImImageFile.format, _save) + +Image.register_extension(ImImageFile.format, ".im") diff --git a/.venv/lib/python3.9/site-packages/PIL/Image.py b/.venv/lib/python3.9/site-packages/PIL/Image.py new file mode 100644 index 00000000..7dd5b35b --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/Image.py @@ -0,0 +1,3611 @@ +# +# The Python Imaging Library. +# $Id$ +# +# the Image class wrapper +# +# partial release history: +# 1995-09-09 fl Created +# 1996-03-11 fl PIL release 0.0 (proof of concept) +# 1996-04-30 fl PIL release 0.1b1 +# 1999-07-28 fl PIL release 1.0 final +# 2000-06-07 fl PIL release 1.1 +# 2000-10-20 fl PIL release 1.1.1 +# 2001-05-07 fl PIL release 1.1.2 +# 2002-03-15 fl PIL release 1.1.3 +# 2003-05-10 fl PIL release 1.1.4 +# 2005-03-28 fl PIL release 1.1.5 +# 2006-12-02 fl PIL release 1.1.6 +# 2009-11-15 fl PIL release 1.1.7 +# +# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-2009 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +import atexit +import builtins +import io +import logging +import math +import numbers +import os +import re +import struct +import sys +import tempfile +import warnings +from collections.abc import Callable, MutableMapping +from pathlib import Path + +try: + import defusedxml.ElementTree as ElementTree +except ImportError: + ElementTree = None + +# VERSION was removed in Pillow 6.0.0. +# PILLOW_VERSION is deprecated and will be removed in a future release. +# Use __version__ instead. +from . import ( + ImageMode, + TiffTags, + UnidentifiedImageError, + __version__, + _plugins, + _raise_version_warning, +) +from ._binary import i32le +from ._util import deferred_error, isPath + +if sys.version_info >= (3, 7): + + def __getattr__(name): + if name == "PILLOW_VERSION": + _raise_version_warning() + return __version__ + else: + categories = {"NORMAL": 0, "SEQUENCE": 1, "CONTAINER": 2} + if name in categories: + warnings.warn( + "Image categories are deprecated and will be removed in Pillow 10 " + "(2023-01-02). Use is_animated instead.", + DeprecationWarning, + stacklevel=2, + ) + return categories[name] + raise AttributeError(f"module '{__name__}' has no attribute '{name}'") + + +else: + + from . import PILLOW_VERSION + + # Silence warning + assert PILLOW_VERSION + + # categories + NORMAL = 0 + SEQUENCE = 1 + CONTAINER = 2 + + +logger = logging.getLogger(__name__) + + +class DecompressionBombWarning(RuntimeWarning): + pass + + +class DecompressionBombError(Exception): + pass + + +# Limit to around a quarter gigabyte for a 24-bit (3 bpp) image +MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 // 4 // 3) + + +try: + # If the _imaging C module is not present, Pillow will not load. + # Note that other modules should not refer to _imaging directly; + # import Image and use the Image.core variable instead. + # Also note that Image.core is not a publicly documented interface, + # and should be considered private and subject to change. + from . import _imaging as core + + if __version__ != getattr(core, "PILLOW_VERSION", None): + raise ImportError( + "The _imaging extension was built for another version of Pillow or PIL:\n" + f"Core version: {getattr(core, 'PILLOW_VERSION', None)}\n" + f"Pillow version: {__version__}" + ) + +except ImportError as v: + core = deferred_error(ImportError("The _imaging C module is not installed.")) + # Explanations for ways that we know we might have an import error + if str(v).startswith("Module use of python"): + # The _imaging C module is present, but not compiled for + # the right version (windows only). Print a warning, if + # possible. + warnings.warn( + "The _imaging extension was built for another version of Python.", + RuntimeWarning, + ) + elif str(v).startswith("The _imaging extension"): + warnings.warn(str(v), RuntimeWarning) + # Fail here anyway. Don't let people run with a mostly broken Pillow. + # see docs/porting.rst + raise + + +# works everywhere, win for pypy, not cpython +USE_CFFI_ACCESS = hasattr(sys, "pypy_version_info") +try: + import cffi +except ImportError: + cffi = None + + +def isImageType(t): + """ + Checks if an object is an image object. + + .. warning:: + + This function is for internal use only. + + :param t: object to check if it's an image + :returns: True if the object is an image + """ + return hasattr(t, "im") + + +# +# Constants + +NONE = 0 + +# transpose +FLIP_LEFT_RIGHT = 0 +FLIP_TOP_BOTTOM = 1 +ROTATE_90 = 2 +ROTATE_180 = 3 +ROTATE_270 = 4 +TRANSPOSE = 5 +TRANSVERSE = 6 + +# transforms (also defined in Imaging.h) +AFFINE = 0 +EXTENT = 1 +PERSPECTIVE = 2 +QUAD = 3 +MESH = 4 + +# resampling filters (also defined in Imaging.h) +NEAREST = NONE = 0 +BOX = 4 +BILINEAR = LINEAR = 2 +HAMMING = 5 +BICUBIC = CUBIC = 3 +LANCZOS = ANTIALIAS = 1 + +_filters_support = {BOX: 0.5, BILINEAR: 1.0, HAMMING: 1.0, BICUBIC: 2.0, LANCZOS: 3.0} + + +# dithers +NEAREST = NONE = 0 +ORDERED = 1 # Not yet implemented +RASTERIZE = 2 # Not yet implemented +FLOYDSTEINBERG = 3 # default + +# palettes/quantizers +WEB = 0 +ADAPTIVE = 1 + +MEDIANCUT = 0 +MAXCOVERAGE = 1 +FASTOCTREE = 2 +LIBIMAGEQUANT = 3 + +if hasattr(core, "DEFAULT_STRATEGY"): + DEFAULT_STRATEGY = core.DEFAULT_STRATEGY + FILTERED = core.FILTERED + HUFFMAN_ONLY = core.HUFFMAN_ONLY + RLE = core.RLE + FIXED = core.FIXED + + +# -------------------------------------------------------------------- +# Registries + +ID = [] +OPEN = {} +MIME = {} +SAVE = {} +SAVE_ALL = {} +EXTENSION = {} +DECODERS = {} +ENCODERS = {} + +# -------------------------------------------------------------------- +# Modes + +if sys.byteorder == "little": + _ENDIAN = "<" +else: + _ENDIAN = ">" + +_MODE_CONV = { + # official modes + "1": ("|b1", None), # Bits need to be extended to bytes + "L": ("|u1", None), + "LA": ("|u1", 2), + "I": (_ENDIAN + "i4", None), + "F": (_ENDIAN + "f4", None), + "P": ("|u1", None), + "RGB": ("|u1", 3), + "RGBX": ("|u1", 4), + "RGBA": ("|u1", 4), + "CMYK": ("|u1", 4), + "YCbCr": ("|u1", 3), + "LAB": ("|u1", 3), # UNDONE - unsigned |u1i1i1 + "HSV": ("|u1", 3), + # I;16 == I;16L, and I;32 == I;32L + "I;16": ("u2", None), + "I;16L": ("i2", None), + "I;16LS": ("u4", None), + "I;32L": ("i4", None), + "I;32LS": ("= 1: + return + + try: + from . import BmpImagePlugin + + assert BmpImagePlugin + except ImportError: + pass + try: + from . import GifImagePlugin + + assert GifImagePlugin + except ImportError: + pass + try: + from . import JpegImagePlugin + + assert JpegImagePlugin + except ImportError: + pass + try: + from . import PpmImagePlugin + + assert PpmImagePlugin + except ImportError: + pass + try: + from . import PngImagePlugin + + assert PngImagePlugin + except ImportError: + pass + # try: + # import TiffImagePlugin + # assert TiffImagePlugin + # except ImportError: + # pass + + _initialized = 1 + + +def init(): + """ + Explicitly initializes the Python Imaging Library. This function + loads all available file format drivers. + """ + + global _initialized + if _initialized >= 2: + return 0 + + for plugin in _plugins: + try: + logger.debug("Importing %s", plugin) + __import__(f"PIL.{plugin}", globals(), locals(), []) + except ImportError as e: + logger.debug("Image: failed to import %s: %s", plugin, e) + + if OPEN or SAVE: + _initialized = 2 + return 1 + + +# -------------------------------------------------------------------- +# Codec factories (used by tobytes/frombytes and ImageFile.load) + + +def _getdecoder(mode, decoder_name, args, extra=()): + + # tweak arguments + if args is None: + args = () + elif not isinstance(args, tuple): + args = (args,) + + try: + decoder = DECODERS[decoder_name] + except KeyError: + pass + else: + return decoder(mode, *args + extra) + + try: + # get decoder + decoder = getattr(core, decoder_name + "_decoder") + except AttributeError as e: + raise OSError(f"decoder {decoder_name} not available") from e + return decoder(mode, *args + extra) + + +def _getencoder(mode, encoder_name, args, extra=()): + + # tweak arguments + if args is None: + args = () + elif not isinstance(args, tuple): + args = (args,) + + try: + encoder = ENCODERS[encoder_name] + except KeyError: + pass + else: + return encoder(mode, *args + extra) + + try: + # get encoder + encoder = getattr(core, encoder_name + "_encoder") + except AttributeError as e: + raise OSError(f"encoder {encoder_name} not available") from e + return encoder(mode, *args + extra) + + +# -------------------------------------------------------------------- +# Simple expression analyzer + + +def coerce_e(value): + return value if isinstance(value, _E) else _E(value) + + +class _E: + def __init__(self, data): + self.data = data + + def __add__(self, other): + return _E((self.data, "__add__", coerce_e(other).data)) + + def __mul__(self, other): + return _E((self.data, "__mul__", coerce_e(other).data)) + + +def _getscaleoffset(expr): + stub = ["stub"] + data = expr(_E(stub)).data + try: + (a, b, c) = data # simplified syntax + if a is stub and b == "__mul__" and isinstance(c, numbers.Number): + return c, 0.0 + if a is stub and b == "__add__" and isinstance(c, numbers.Number): + return 1.0, c + except TypeError: + pass + try: + ((a, b, c), d, e) = data # full syntax + if ( + a is stub + and b == "__mul__" + and isinstance(c, numbers.Number) + and d == "__add__" + and isinstance(e, numbers.Number) + ): + return c, e + except TypeError: + pass + raise ValueError("illegal expression") + + +# -------------------------------------------------------------------- +# Implementation wrapper + + +class Image: + """ + This class represents an image object. To create + :py:class:`~PIL.Image.Image` objects, use the appropriate factory + functions. There's hardly ever any reason to call the Image constructor + directly. + + * :py:func:`~PIL.Image.open` + * :py:func:`~PIL.Image.new` + * :py:func:`~PIL.Image.frombytes` + """ + + format = None + format_description = None + _close_exclusive_fp_after_loading = True + + def __init__(self): + # FIXME: take "new" parameters / other image? + # FIXME: turn mode and size into delegating properties? + self.im = None + self.mode = "" + self._size = (0, 0) + self.palette = None + self.info = {} + self._category = 0 + self.readonly = 0 + self.pyaccess = None + self._exif = None + + def __getattr__(self, name): + if name == "category": + warnings.warn( + "Image categories are deprecated and will be removed in Pillow 10 " + "(2023-01-02). Use is_animated instead.", + DeprecationWarning, + stacklevel=2, + ) + return self._category + raise AttributeError(name) + + @property + def width(self): + return self.size[0] + + @property + def height(self): + return self.size[1] + + @property + def size(self): + return self._size + + def _new(self, im): + new = Image() + new.im = im + new.mode = im.mode + new._size = im.size + if im.mode in ("P", "PA"): + if self.palette: + new.palette = self.palette.copy() + else: + from . import ImagePalette + + new.palette = ImagePalette.ImagePalette() + new.info = self.info.copy() + return new + + # Context manager support + def __enter__(self): + return self + + def __exit__(self, *args): + if hasattr(self, "fp") and getattr(self, "_exclusive_fp", False): + if hasattr(self, "_close__fp"): + self._close__fp() + if self.fp: + self.fp.close() + self.fp = None + + def close(self): + """ + Closes the file pointer, if possible. + + This operation will destroy the image core and release its memory. + The image data will be unusable afterward. + + This function is required to close images that have multiple frames or + have not had their file read and closed by the + :py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for + more information. + """ + try: + if hasattr(self, "_close__fp"): + self._close__fp() + if self.fp: + self.fp.close() + self.fp = None + except Exception as msg: + logger.debug("Error closing: %s", msg) + + if getattr(self, "map", None): + self.map = None + + # Instead of simply setting to None, we're setting up a + # deferred error that will better explain that the core image + # object is gone. + self.im = deferred_error(ValueError("Operation on closed image")) + + def _copy(self): + self.load() + self.im = self.im.copy() + self.pyaccess = None + self.readonly = 0 + + def _ensure_mutable(self): + if self.readonly: + self._copy() + else: + self.load() + + def _dump(self, file=None, format=None, **options): + suffix = "" + if format: + suffix = "." + format + + if not file: + f, filename = tempfile.mkstemp(suffix) + os.close(f) + else: + filename = file + if not filename.endswith(suffix): + filename = filename + suffix + + self.load() + + if not format or format == "PPM": + self.im.save_ppm(filename) + else: + self.save(filename, format, **options) + + return filename + + def __eq__(self, other): + return ( + self.__class__ is other.__class__ + and self.mode == other.mode + and self.size == other.size + and self.info == other.info + and self._category == other._category + and self.readonly == other.readonly + and self.getpalette() == other.getpalette() + and self.tobytes() == other.tobytes() + ) + + def __repr__(self): + return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % ( + self.__class__.__module__, + self.__class__.__name__, + self.mode, + self.size[0], + self.size[1], + id(self), + ) + + def _repr_png_(self): + """iPython display hook support + + :returns: png version of the image as bytes + """ + b = io.BytesIO() + try: + self.save(b, "PNG") + except Exception as e: + raise ValueError("Could not save to PNG for display") from e + return b.getvalue() + + def __array__(self, dtype=None): + # numpy array interface support + import numpy as np + + new = {} + shape, typestr = _conv_type_shape(self) + new["shape"] = shape + new["typestr"] = typestr + new["version"] = 3 + if self.mode == "1": + # Binary images need to be extended from bits to bytes + # See: https://github.com/python-pillow/Pillow/issues/350 + new["data"] = self.tobytes("raw", "L") + else: + new["data"] = self.tobytes() + + class ArrayData: + __array_interface__ = new + + return np.array(ArrayData(), dtype) + + def __getstate__(self): + return [self.info, self.mode, self.size, self.getpalette(), self.tobytes()] + + def __setstate__(self, state): + Image.__init__(self) + self.tile = [] + info, mode, size, palette, data = state + self.info = info + self.mode = mode + self._size = size + self.im = core.new(mode, size) + if mode in ("L", "LA", "P", "PA") and palette: + self.putpalette(palette) + self.frombytes(data) + + def tobytes(self, encoder_name="raw", *args): + """ + Return image as a bytes object. + + .. warning:: + + This method returns the raw image data from the internal + storage. For compressed image data (e.g. PNG, JPEG) use + :meth:`~.save`, with a BytesIO parameter for in-memory + data. + + :param encoder_name: What encoder to use. The default is to + use the standard "raw" encoder. + :param args: Extra arguments to the encoder. + :returns: A :py:class:`bytes` object. + """ + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if encoder_name == "raw" and args == (): + args = self.mode + + self.load() + + # unpack data + e = _getencoder(self.mode, encoder_name, args) + e.setimage(self.im) + + bufsize = max(65536, self.size[0] * 4) # see RawEncode.c + + data = [] + while True: + l, s, d = e.encode(bufsize) + data.append(d) + if s: + break + if s < 0: + raise RuntimeError(f"encoder error {s} in tobytes") + + return b"".join(data) + + def tobitmap(self, name="image"): + """ + Returns the image converted to an X11 bitmap. + + .. note:: This method only works for mode "1" images. + + :param name: The name prefix to use for the bitmap variables. + :returns: A string containing an X11 bitmap. + :raises ValueError: If the mode is not "1" + """ + + self.load() + if self.mode != "1": + raise ValueError("not a bitmap") + data = self.tobytes("xbm") + return b"".join( + [ + f"#define {name}_width {self.size[0]}\n".encode("ascii"), + f"#define {name}_height {self.size[1]}\n".encode("ascii"), + f"static char {name}_bits[] = {{\n".encode("ascii"), + data, + b"};", + ] + ) + + def frombytes(self, data, decoder_name="raw", *args): + """ + Loads this image with pixel data from a bytes object. + + This method is similar to the :py:func:`~PIL.Image.frombytes` function, + but loads data into this image instead of creating a new image object. + """ + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + # default format + if decoder_name == "raw" and args == (): + args = self.mode + + # unpack data + d = _getdecoder(self.mode, decoder_name, args) + d.setimage(self.im) + s = d.decode(data) + + if s[0] >= 0: + raise ValueError("not enough image data") + if s[1] != 0: + raise ValueError("cannot decode image data") + + def load(self): + """ + Allocates storage for the image and loads the pixel data. In + normal cases, you don't need to call this method, since the + Image class automatically loads an opened image when it is + accessed for the first time. + + If the file associated with the image was opened by Pillow, then this + method will close it. The exception to this is if the image has + multiple frames, in which case the file will be left open for seek + operations. See :ref:`file-handling` for more information. + + :returns: An image access object. + :rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess` + """ + if self.im and self.palette and self.palette.dirty: + # realize palette + mode, arr = self.palette.getdata() + if mode == "RGBA": + mode = "RGB" + self.info["transparency"] = arr[3::4] + arr = bytes( + value for (index, value) in enumerate(arr) if index % 4 != 3 + ) + palette_length = self.im.putpalette(mode, arr) + self.palette.dirty = 0 + self.palette.rawmode = None + if "transparency" in self.info and mode in ("RGBA", "LA", "PA"): + if isinstance(self.info["transparency"], int): + self.im.putpalettealpha(self.info["transparency"], 0) + else: + self.im.putpalettealphas(self.info["transparency"]) + self.palette.mode = "RGBA" + else: + self.palette.mode = "RGB" + self.palette.palette = self.im.getpalette()[: palette_length * 3] + + if self.im: + if cffi and USE_CFFI_ACCESS: + if self.pyaccess: + return self.pyaccess + from . import PyAccess + + self.pyaccess = PyAccess.new(self, self.readonly) + if self.pyaccess: + return self.pyaccess + return self.im.pixel_access(self.readonly) + + def verify(self): + """ + Verifies the contents of a file. For data read from a file, this + method attempts to determine if the file is broken, without + actually decoding the image data. If this method finds any + problems, it raises suitable exceptions. If you need to load + the image after using this method, you must reopen the image + file. + """ + pass + + def convert(self, mode=None, matrix=None, dither=None, palette=WEB, colors=256): + """ + Returns a converted copy of this image. For the "P" mode, this + method translates pixels through the palette. If mode is + omitted, a mode is chosen so that all information in the image + and the palette can be represented without a palette. + + The current version supports all possible conversions between + "L", "RGB" and "CMYK." The ``matrix`` argument only supports "L" + and "RGB". + + When translating a color image to greyscale (mode "L"), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + + The default method of converting a greyscale ("L") or "RGB" + image into a bilevel (mode "1") image uses Floyd-Steinberg + dither to approximate the original image luminosity levels. If + dither is :data:`NONE`, all values larger than 127 are set to 255 (white), + all other values to 0 (black). To use other thresholds, use the + :py:meth:`~PIL.Image.Image.point` method. + + When converting from "RGBA" to "P" without a ``matrix`` argument, + this passes the operation to :py:meth:`~PIL.Image.Image.quantize`, + and ``dither`` and ``palette`` are ignored. + + :param mode: The requested mode. See: :ref:`concept-modes`. + :param matrix: An optional conversion matrix. If given, this + should be 4- or 12-tuple containing floating point values. + :param dither: Dithering method, used when converting from + mode "RGB" to "P" or from "RGB" or "L" to "1". + Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default). + Note that this is not used when ``matrix`` is supplied. + :param palette: Palette to use when converting from mode "RGB" + to "P". Available palettes are :data:`WEB` or :data:`ADAPTIVE`. + :param colors: Number of colors to use for the :data:`ADAPTIVE` palette. + Defaults to 256. + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + self.load() + + has_transparency = self.info.get("transparency") is not None + if not mode and self.mode == "P": + # determine default mode + if self.palette: + mode = self.palette.mode + else: + mode = "RGB" + if mode == "RGB" and has_transparency: + mode = "RGBA" + if not mode or (mode == self.mode and not matrix): + return self.copy() + + if matrix: + # matrix conversion + if mode not in ("L", "RGB"): + raise ValueError("illegal conversion") + im = self.im.convert_matrix(mode, matrix) + new = self._new(im) + if has_transparency and self.im.bands == 3: + transparency = new.info["transparency"] + + def convert_transparency(m, v): + v = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5 + return max(0, min(255, int(v))) + + if mode == "L": + transparency = convert_transparency(matrix, transparency) + elif len(mode) == 3: + transparency = tuple( + [ + convert_transparency( + matrix[i * 4 : i * 4 + 4], transparency + ) + for i in range(0, len(transparency)) + ] + ) + new.info["transparency"] = transparency + return new + + if mode == "P" and self.mode == "RGBA": + return self.quantize(colors) + + trns = None + delete_trns = False + # transparency handling + if has_transparency: + if self.mode in ("1", "L", "I", "RGB") and mode == "RGBA": + # Use transparent conversion to promote from transparent + # color to an alpha channel. + new_im = self._new( + self.im.convert_transparent(mode, self.info["transparency"]) + ) + del new_im.info["transparency"] + return new_im + elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"): + t = self.info["transparency"] + if isinstance(t, bytes): + # Dragons. This can't be represented by a single color + warnings.warn( + "Palette images with Transparency expressed in bytes should be " + "converted to RGBA images" + ) + delete_trns = True + else: + # get the new transparency color. + # use existing conversions + trns_im = Image()._new(core.new(self.mode, (1, 1))) + if self.mode == "P": + trns_im.putpalette(self.palette) + if isinstance(t, tuple): + err = "Couldn't allocate a palette color for transparency" + try: + t = trns_im.palette.getcolor(t, self) + except ValueError as e: + if str(e) == "cannot allocate more than 256 colors": + # If all 256 colors are in use, + # then there is no need for transparency + t = None + else: + raise ValueError(err) from e + if t is None: + trns = None + else: + trns_im.putpixel((0, 0), t) + + if mode in ("L", "RGB"): + trns_im = trns_im.convert(mode) + else: + # can't just retrieve the palette number, got to do it + # after quantization. + trns_im = trns_im.convert("RGB") + trns = trns_im.getpixel((0, 0)) + + elif self.mode == "P" and mode in ("LA", "PA", "RGBA"): + t = self.info["transparency"] + delete_trns = True + + if isinstance(t, bytes): + self.im.putpalettealphas(t) + elif isinstance(t, int): + self.im.putpalettealpha(t, 0) + else: + raise ValueError("Transparency for P mode should be bytes or int") + + if mode == "P" and palette == ADAPTIVE: + im = self.im.quantize(colors) + new = self._new(im) + from . import ImagePalette + + new.palette = ImagePalette.ImagePalette("RGB", new.im.getpalette("RGB")) + if delete_trns: + # This could possibly happen if we requantize to fewer colors. + # The transparency would be totally off in that case. + del new.info["transparency"] + if trns is not None: + try: + new.info["transparency"] = new.palette.getcolor(trns, new) + except Exception: + # if we can't make a transparent color, don't leave the old + # transparency hanging around to mess us up. + del new.info["transparency"] + warnings.warn("Couldn't allocate palette entry for transparency") + return new + + # colorspace conversion + if dither is None: + dither = FLOYDSTEINBERG + + try: + im = self.im.convert(mode, dither) + except ValueError: + try: + # normalize source image and try again + im = self.im.convert(getmodebase(self.mode)) + im = im.convert(mode, dither) + except KeyError as e: + raise ValueError("illegal conversion") from e + + new_im = self._new(im) + if mode == "P" and palette != ADAPTIVE: + from . import ImagePalette + + new_im.palette = ImagePalette.ImagePalette("RGB", list(range(256)) * 3) + if delete_trns: + # crash fail if we leave a bytes transparency in an rgb/l mode. + del new_im.info["transparency"] + if trns is not None: + if new_im.mode == "P": + try: + new_im.info["transparency"] = new_im.palette.getcolor(trns, new_im) + except ValueError as e: + del new_im.info["transparency"] + if str(e) != "cannot allocate more than 256 colors": + # If all 256 colors are in use, + # then there is no need for transparency + warnings.warn( + "Couldn't allocate palette entry for transparency" + ) + else: + new_im.info["transparency"] = trns + return new_im + + def quantize(self, colors=256, method=None, kmeans=0, palette=None, dither=1): + """ + Convert the image to 'P' mode with the specified number + of colors. + + :param colors: The desired number of colors, <= 256 + :param method: :data:`MEDIANCUT` (median cut), + :data:`MAXCOVERAGE` (maximum coverage), + :data:`FASTOCTREE` (fast octree), + :data:`LIBIMAGEQUANT` (libimagequant; check support using + :py:func:`PIL.features.check_feature` + with ``feature="libimagequant"``). + + By default, :data:`MEDIANCUT` will be used. + + The exception to this is RGBA images. :data:`MEDIANCUT` and + :data:`MAXCOVERAGE` do not support RGBA images, so + :data:`FASTOCTREE` is used by default instead. + :param kmeans: Integer + :param palette: Quantize to the palette of given + :py:class:`PIL.Image.Image`. + :param dither: Dithering method, used when converting from + mode "RGB" to "P" or from "RGB" or "L" to "1". + Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default). + Default: 1 (legacy setting) + :returns: A new image + + """ + + self.load() + + if method is None: + # defaults: + method = MEDIANCUT + if self.mode == "RGBA": + method = FASTOCTREE + + if self.mode == "RGBA" and method not in (FASTOCTREE, LIBIMAGEQUANT): + # Caller specified an invalid mode. + raise ValueError( + "Fast Octree (method == 2) and libimagequant (method == 3) " + "are the only valid methods for quantizing RGBA images" + ) + + if palette: + # use palette from reference image + palette.load() + if palette.mode != "P": + raise ValueError("bad mode for palette image") + if self.mode != "RGB" and self.mode != "L": + raise ValueError( + "only RGB or L mode images can be quantized to a palette" + ) + im = self.im.convert("P", dither, palette.im) + new_im = self._new(im) + new_im.palette = palette.palette.copy() + return new_im + + im = self._new(self.im.quantize(colors, method, kmeans)) + + from . import ImagePalette + + mode = im.im.getpalettemode() + im.palette = ImagePalette.ImagePalette(mode, im.im.getpalette(mode, mode)) + + return im + + def copy(self): + """ + Copies this image. Use this method if you wish to paste things + into an image, but still retain the original. + + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + self.load() + return self._new(self.im.copy()) + + __copy__ = copy + + def crop(self, box=None): + """ + Returns a rectangular region from this image. The box is a + 4-tuple defining the left, upper, right, and lower pixel + coordinate. See :ref:`coordinate-system`. + + Note: Prior to Pillow 3.4.0, this was a lazy operation. + + :param box: The crop rectangle, as a (left, upper, right, lower)-tuple. + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if box is None: + return self.copy() + + self.load() + return self._new(self._crop(self.im, box)) + + def _crop(self, im, box): + """ + Returns a rectangular region from the core image object im. + + This is equivalent to calling im.crop((x0, y0, x1, y1)), but + includes additional sanity checks. + + :param im: a core image object + :param box: The crop rectangle, as a (left, upper, right, lower)-tuple. + :returns: A core image object. + """ + + x0, y0, x1, y1 = map(int, map(round, box)) + + absolute_values = (abs(x1 - x0), abs(y1 - y0)) + + _decompression_bomb_check(absolute_values) + + return im.crop((x0, y0, x1, y1)) + + def draft(self, mode, size): + """ + Configures the image file loader so it returns a version of the + image that as closely as possible matches the given mode and + size. For example, you can use this method to convert a color + JPEG to greyscale while loading it. + + If any changes are made, returns a tuple with the chosen ``mode`` and + ``box`` with coordinates of the original image within the altered one. + + Note that this method modifies the :py:class:`~PIL.Image.Image` object + in place. If the image has already been loaded, this method has no + effect. + + Note: This method is not implemented for most images. It is + currently implemented only for JPEG and MPO images. + + :param mode: The requested mode. + :param size: The requested size. + """ + pass + + def _expand(self, xmargin, ymargin=None): + if ymargin is None: + ymargin = xmargin + self.load() + return self._new(self.im.expand(xmargin, ymargin, 0)) + + def filter(self, filter): + """ + Filters this image using the given filter. For a list of + available filters, see the :py:mod:`~PIL.ImageFilter` module. + + :param filter: Filter kernel. + :returns: An :py:class:`~PIL.Image.Image` object.""" + + from . import ImageFilter + + self.load() + + if isinstance(filter, Callable): + filter = filter() + if not hasattr(filter, "filter"): + raise TypeError( + "filter argument should be ImageFilter.Filter instance or class" + ) + + multiband = isinstance(filter, ImageFilter.MultibandFilter) + if self.im.bands == 1 or multiband: + return self._new(filter.filter(self.im)) + + ims = [] + for c in range(self.im.bands): + ims.append(self._new(filter.filter(self.im.getband(c)))) + return merge(self.mode, ims) + + def getbands(self): + """ + Returns a tuple containing the name of each band in this image. + For example, ``getbands`` on an RGB image returns ("R", "G", "B"). + + :returns: A tuple containing band names. + :rtype: tuple + """ + return ImageMode.getmode(self.mode).bands + + def getbbox(self): + """ + Calculates the bounding box of the non-zero regions in the + image. + + :returns: The bounding box is returned as a 4-tuple defining the + left, upper, right, and lower pixel coordinate. See + :ref:`coordinate-system`. If the image is completely empty, this + method returns None. + + """ + + self.load() + return self.im.getbbox() + + def getcolors(self, maxcolors=256): + """ + Returns a list of colors used in this image. + + The colors will be in the image's mode. For example, an RGB image will + return a tuple of (red, green, blue) color values, and a P image will + return the index of the color in the palette. + + :param maxcolors: Maximum number of colors. If this number is + exceeded, this method returns None. The default limit is + 256 colors. + :returns: An unsorted list of (count, pixel) values. + """ + + self.load() + if self.mode in ("1", "L", "P"): + h = self.im.histogram() + out = [] + for i in range(256): + if h[i]: + out.append((h[i], i)) + if len(out) > maxcolors: + return None + return out + return self.im.getcolors(maxcolors) + + def getdata(self, band=None): + """ + Returns the contents of this image as a sequence object + containing pixel values. The sequence object is flattened, so + that values for line one follow directly after the values of + line zero, and so on. + + Note that the sequence object returned by this method is an + internal PIL data type, which only supports certain sequence + operations. To convert it to an ordinary sequence (e.g. for + printing), use ``list(im.getdata())``. + + :param band: What band to return. The default is to return + all bands. To return a single band, pass in the index + value (e.g. 0 to get the "R" band from an "RGB" image). + :returns: A sequence-like object. + """ + + self.load() + if band is not None: + return self.im.getband(band) + return self.im # could be abused + + def getextrema(self): + """ + Gets the the minimum and maximum pixel values for each band in + the image. + + :returns: For a single-band image, a 2-tuple containing the + minimum and maximum pixel value. For a multi-band image, + a tuple containing one 2-tuple for each band. + """ + + self.load() + if self.im.bands > 1: + extrema = [] + for i in range(self.im.bands): + extrema.append(self.im.getband(i).getextrema()) + return tuple(extrema) + return self.im.getextrema() + + def _getxmp(self, xmp_tags): + def get_name(tag): + return tag.split("}")[1] + + def get_value(element): + value = {get_name(k): v for k, v in element.attrib.items()} + children = list(element) + if children: + for child in children: + name = get_name(child.tag) + child_value = get_value(child) + if name in value: + if not isinstance(value[name], list): + value[name] = [value[name]] + value[name].append(child_value) + else: + value[name] = child_value + elif value: + if element.text: + value["text"] = element.text + else: + return element.text + return value + + if ElementTree is None: + warnings.warn("XMP data cannot be read without defusedxml dependency") + return {} + else: + root = ElementTree.fromstring(xmp_tags) + return {get_name(root.tag): get_value(root)} + + def getexif(self): + if self._exif is None: + self._exif = Exif() + + exif_info = self.info.get("exif") + if exif_info is None: + if "Raw profile type exif" in self.info: + exif_info = bytes.fromhex( + "".join(self.info["Raw profile type exif"].split("\n")[3:]) + ) + elif hasattr(self, "tag_v2"): + self._exif.endian = self.tag_v2._endian + self._exif.load_from_fp(self.fp, self.tag_v2._offset) + if exif_info is not None: + self._exif.load(exif_info) + + # XMP tags + if 0x0112 not in self._exif: + xmp_tags = self.info.get("XML:com.adobe.xmp") + if xmp_tags: + match = re.search(r'tiff:Orientation="([0-9])"', xmp_tags) + if match: + self._exif[0x0112] = int(match[1]) + + return self._exif + + def getim(self): + """ + Returns a capsule that points to the internal image memory. + + :returns: A capsule object. + """ + + self.load() + return self.im.ptr + + def getpalette(self): + """ + Returns the image palette as a list. + + :returns: A list of color values [r, g, b, ...], or None if the + image has no palette. + """ + + self.load() + try: + return list(self.im.getpalette()) + except ValueError: + return None # no palette + + def getpixel(self, xy): + """ + Returns the pixel value at a given position. + + :param xy: The coordinate, given as (x, y). See + :ref:`coordinate-system`. + :returns: The pixel value. If the image is a multi-layer image, + this method returns a tuple. + """ + + self.load() + if self.pyaccess: + return self.pyaccess.getpixel(xy) + return self.im.getpixel(xy) + + def getprojection(self): + """ + Get projection to x and y axes + + :returns: Two sequences, indicating where there are non-zero + pixels along the X-axis and the Y-axis, respectively. + """ + + self.load() + x, y = self.im.getprojection() + return list(x), list(y) + + def histogram(self, mask=None, extrema=None): + """ + Returns a histogram for the image. The histogram is returned as + a list of pixel counts, one for each pixel value in the source + image. If the image has more than one band, the histograms for + all bands are concatenated (for example, the histogram for an + "RGB" image contains 768 values). + + A bilevel image (mode "1") is treated as a greyscale ("L") image + by this method. + + If a mask is provided, the method returns a histogram for those + parts of the image where the mask image is non-zero. The mask + image must have the same size as the image, and be either a + bi-level image (mode "1") or a greyscale image ("L"). + + :param mask: An optional mask. + :param extrema: An optional tuple of manually-specified extrema. + :returns: A list containing pixel counts. + """ + self.load() + if mask: + mask.load() + return self.im.histogram((0, 0), mask.im) + if self.mode in ("I", "F"): + if extrema is None: + extrema = self.getextrema() + return self.im.histogram(extrema) + return self.im.histogram() + + def entropy(self, mask=None, extrema=None): + """ + Calculates and returns the entropy for the image. + + A bilevel image (mode "1") is treated as a greyscale ("L") + image by this method. + + If a mask is provided, the method employs the histogram for + those parts of the image where the mask image is non-zero. + The mask image must have the same size as the image, and be + either a bi-level image (mode "1") or a greyscale image ("L"). + + :param mask: An optional mask. + :param extrema: An optional tuple of manually-specified extrema. + :returns: A float value representing the image entropy + """ + self.load() + if mask: + mask.load() + return self.im.entropy((0, 0), mask.im) + if self.mode in ("I", "F"): + if extrema is None: + extrema = self.getextrema() + return self.im.entropy(extrema) + return self.im.entropy() + + def paste(self, im, box=None, mask=None): + """ + Pastes another image into this image. The box argument is either + a 2-tuple giving the upper left corner, a 4-tuple defining the + left, upper, right, and lower pixel coordinate, or None (same as + (0, 0)). See :ref:`coordinate-system`. If a 4-tuple is given, the size + of the pasted image must match the size of the region. + + If the modes don't match, the pasted image is converted to the mode of + this image (see the :py:meth:`~PIL.Image.Image.convert` method for + details). + + Instead of an image, the source can be a integer or tuple + containing pixel values. The method then fills the region + with the given color. When creating RGB images, you can + also use color strings as supported by the ImageColor module. + + If a mask is given, this method updates only the regions + indicated by the mask. You can use either "1", "L" or "RGBA" + images (in the latter case, the alpha band is used as mask). + Where the mask is 255, the given image is copied as is. Where + the mask is 0, the current value is preserved. Intermediate + values will mix the two images together, including their alpha + channels if they have them. + + See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to + combine images with respect to their alpha channels. + + :param im: Source image or pixel value (integer or tuple). + :param box: An optional 4-tuple giving the region to paste into. + If a 2-tuple is used instead, it's treated as the upper left + corner. If omitted or None, the source is pasted into the + upper left corner. + + If an image is given as the second argument and there is no + third, the box defaults to (0, 0), and the second argument + is interpreted as a mask image. + :param mask: An optional mask image. + """ + + if isImageType(box) and mask is None: + # abbreviated paste(im, mask) syntax + mask = box + box = None + + if box is None: + box = (0, 0) + + if len(box) == 2: + # upper left corner given; get size from image or mask + if isImageType(im): + size = im.size + elif isImageType(mask): + size = mask.size + else: + # FIXME: use self.size here? + raise ValueError("cannot determine region size; use 4-item box") + box += (box[0] + size[0], box[1] + size[1]) + + if isinstance(im, str): + from . import ImageColor + + im = ImageColor.getcolor(im, self.mode) + + elif isImageType(im): + im.load() + if self.mode != im.mode: + if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"): + # should use an adapter for this! + im = im.convert(self.mode) + im = im.im + + self._ensure_mutable() + + if mask: + mask.load() + self.im.paste(im, box, mask.im) + else: + self.im.paste(im, box) + + def alpha_composite(self, im, dest=(0, 0), source=(0, 0)): + """'In-place' analog of Image.alpha_composite. Composites an image + onto this image. + + :param im: image to composite over this one + :param dest: Optional 2 tuple (left, top) specifying the upper + left corner in this (destination) image. + :param source: Optional 2 (left, top) tuple for the upper left + corner in the overlay source image, or 4 tuple (left, top, right, + bottom) for the bounds of the source rectangle + + Performance Note: Not currently implemented in-place in the core layer. + """ + + if not isinstance(source, (list, tuple)): + raise ValueError("Source must be a tuple") + if not isinstance(dest, (list, tuple)): + raise ValueError("Destination must be a tuple") + if not len(source) in (2, 4): + raise ValueError("Source must be a 2 or 4-tuple") + if not len(dest) == 2: + raise ValueError("Destination must be a 2-tuple") + if min(source) < 0: + raise ValueError("Source must be non-negative") + + if len(source) == 2: + source = source + im.size + + # over image, crop if it's not the whole thing. + if source == (0, 0) + im.size: + overlay = im + else: + overlay = im.crop(source) + + # target for the paste + box = dest + (dest[0] + overlay.width, dest[1] + overlay.height) + + # destination image. don't copy if we're using the whole image. + if box == (0, 0) + self.size: + background = self + else: + background = self.crop(box) + + result = alpha_composite(background, overlay) + self.paste(result, box) + + def point(self, lut, mode=None): + """ + Maps this image through a lookup table or function. + + :param lut: A lookup table, containing 256 (or 65536 if + self.mode=="I" and mode == "L") values per band in the + image. A function can be used instead, it should take a + single argument. The function is called once for each + possible pixel value, and the resulting table is applied to + all bands of the image. + + It may also be an :py:class:`~PIL.Image.ImagePointHandler` + object:: + + class Example(Image.ImagePointHandler): + def point(self, data): + # Return result + :param mode: Output mode (default is same as input). In the + current version, this can only be used if the source image + has mode "L" or "P", and the output has mode "1" or the + source image mode is "I" and the output mode is "L". + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + self.load() + + if isinstance(lut, ImagePointHandler): + return lut.point(self) + + if callable(lut): + # if it isn't a list, it should be a function + if self.mode in ("I", "I;16", "F"): + # check if the function can be used with point_transform + # UNDONE wiredfool -- I think this prevents us from ever doing + # a gamma function point transform on > 8bit images. + scale, offset = _getscaleoffset(lut) + return self._new(self.im.point_transform(scale, offset)) + # for other modes, convert the function to a table + lut = [lut(i) for i in range(256)] * self.im.bands + + if self.mode == "F": + # FIXME: _imaging returns a confusing error message for this case + raise ValueError("point operation not supported for this mode") + + return self._new(self.im.point(lut, mode)) + + def putalpha(self, alpha): + """ + Adds or replaces the alpha layer in this image. If the image + does not have an alpha layer, it's converted to "LA" or "RGBA". + The new layer must be either "L" or "1". + + :param alpha: The new alpha layer. This can either be an "L" or "1" + image having the same size as this image, or an integer or + other color value. + """ + + self._ensure_mutable() + + if self.mode not in ("LA", "PA", "RGBA"): + # attempt to promote self to a matching alpha mode + try: + mode = getmodebase(self.mode) + "A" + try: + self.im.setmode(mode) + except (AttributeError, ValueError) as e: + # do things the hard way + im = self.im.convert(mode) + if im.mode not in ("LA", "PA", "RGBA"): + raise ValueError from e # sanity check + self.im = im + self.pyaccess = None + self.mode = self.im.mode + except KeyError as e: + raise ValueError("illegal image mode") from e + + if self.mode in ("LA", "PA"): + band = 1 + else: + band = 3 + + if isImageType(alpha): + # alpha layer + if alpha.mode not in ("1", "L"): + raise ValueError("illegal image mode") + alpha.load() + if alpha.mode == "1": + alpha = alpha.convert("L") + else: + # constant alpha + try: + self.im.fillband(band, alpha) + except (AttributeError, ValueError): + # do things the hard way + alpha = new("L", self.size, alpha) + else: + return + + self.im.putband(alpha.im, band) + + def putdata(self, data, scale=1.0, offset=0.0): + """ + Copies pixel data to this image. This method copies data from a + sequence object into the image, starting at the upper left + corner (0, 0), and continuing until either the image or the + sequence ends. The scale and offset values are used to adjust + the sequence values: **pixel = value*scale + offset**. + + :param data: A sequence object. + :param scale: An optional scale value. The default is 1.0. + :param offset: An optional offset value. The default is 0.0. + """ + + self._ensure_mutable() + + self.im.putdata(data, scale, offset) + + def putpalette(self, data, rawmode="RGB"): + """ + Attaches a palette to this image. The image must be a "P", "PA", "L" + or "LA" image. + + The palette sequence must contain at most 256 colors, made up of one + integer value for each channel in the raw mode. + For example, if the raw mode is "RGB", then it can contain at most 768 + values, made up of red, green and blue values for the corresponding pixel + index in the 256 colors. + If the raw mode is "RGBA", then it can contain at most 1024 values, + containing red, green, blue and alpha values. + + Alternatively, an 8-bit string may be used instead of an integer sequence. + + :param data: A palette sequence (either a list or a string). + :param rawmode: The raw mode of the palette. Either "RGB", "RGBA", or a + mode that can be transformed to "RGB" (e.g. "R", "BGR;15", "RGBA;L"). + """ + from . import ImagePalette + + if self.mode not in ("L", "LA", "P", "PA"): + raise ValueError("illegal image mode") + if isinstance(data, ImagePalette.ImagePalette): + palette = ImagePalette.raw(data.rawmode, data.palette) + else: + if not isinstance(data, bytes): + data = bytes(data) + palette = ImagePalette.raw(rawmode, data) + self.mode = "PA" if "A" in self.mode else "P" + self.palette = palette + self.palette.mode = "RGB" + self.load() # install new palette + + def putpixel(self, xy, value): + """ + Modifies the pixel at the given position. The color is given as + a single numerical value for single-band images, and a tuple for + multi-band images. In addition to this, RGB and RGBA tuples are + accepted for P images. + + Note that this method is relatively slow. For more extensive changes, + use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw` + module instead. + + See: + + * :py:meth:`~PIL.Image.Image.paste` + * :py:meth:`~PIL.Image.Image.putdata` + * :py:mod:`~PIL.ImageDraw` + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :param value: The pixel value. + """ + + if self.readonly: + self._copy() + self.load() + + if self.pyaccess: + return self.pyaccess.putpixel(xy, value) + + if ( + self.mode == "P" + and isinstance(value, (list, tuple)) + and len(value) in [3, 4] + ): + # RGB or RGBA value for a P image + value = self.palette.getcolor(value, self) + return self.im.putpixel(xy, value) + + def remap_palette(self, dest_map, source_palette=None): + """ + Rewrites the image to reorder the palette. + + :param dest_map: A list of indexes into the original palette. + e.g. ``[1,0]`` would swap a two item palette, and ``list(range(256))`` + is the identity transform. + :param source_palette: Bytes or None. + :returns: An :py:class:`~PIL.Image.Image` object. + + """ + from . import ImagePalette + + if self.mode not in ("L", "P"): + raise ValueError("illegal image mode") + + if source_palette is None: + if self.mode == "P": + self.load() + source_palette = self.im.getpalette("RGB")[:768] + else: # L-mode + source_palette = bytearray(i // 3 for i in range(768)) + + palette_bytes = b"" + new_positions = [0] * 256 + + # pick only the used colors from the palette + for i, oldPosition in enumerate(dest_map): + palette_bytes += source_palette[oldPosition * 3 : oldPosition * 3 + 3] + new_positions[oldPosition] = i + + # replace the palette color id of all pixel with the new id + + # Palette images are [0..255], mapped through a 1 or 3 + # byte/color map. We need to remap the whole image + # from palette 1 to palette 2. New_positions is + # an array of indexes into palette 1. Palette 2 is + # palette 1 with any holes removed. + + # We're going to leverage the convert mechanism to use the + # C code to remap the image from palette 1 to palette 2, + # by forcing the source image into 'L' mode and adding a + # mapping 'L' mode palette, then converting back to 'L' + # sans palette thus converting the image bytes, then + # assigning the optimized RGB palette. + + # perf reference, 9500x4000 gif, w/~135 colors + # 14 sec prepatch, 1 sec postpatch with optimization forced. + + mapping_palette = bytearray(new_positions) + + m_im = self.copy() + m_im.mode = "P" + + m_im.palette = ImagePalette.ImagePalette("RGB", palette=mapping_palette * 3) + # possibly set palette dirty, then + # m_im.putpalette(mapping_palette, 'L') # converts to 'P' + # or just force it. + # UNDONE -- this is part of the general issue with palettes + m_im.im.putpalette("RGB;L", m_im.palette.tobytes()) + + m_im = m_im.convert("L") + + # Internally, we require 768 bytes for a palette. + new_palette_bytes = palette_bytes + (768 - len(palette_bytes)) * b"\x00" + m_im.putpalette(new_palette_bytes) + m_im.palette = ImagePalette.ImagePalette("RGB", palette=palette_bytes) + + return m_im + + def _get_safe_box(self, size, resample, box): + """Expands the box so it includes adjacent pixels + that may be used by resampling with the given resampling filter. + """ + filter_support = _filters_support[resample] - 0.5 + scale_x = (box[2] - box[0]) / size[0] + scale_y = (box[3] - box[1]) / size[1] + support_x = filter_support * scale_x + support_y = filter_support * scale_y + + return ( + max(0, int(box[0] - support_x)), + max(0, int(box[1] - support_y)), + min(self.size[0], math.ceil(box[2] + support_x)), + min(self.size[1], math.ceil(box[3] + support_y)), + ) + + def resize(self, size, resample=None, box=None, reducing_gap=None): + """ + Returns a resized copy of this image. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param resample: An optional resampling filter. This can be + one of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BOX`, + :py:data:`PIL.Image.BILINEAR`, :py:data:`PIL.Image.HAMMING`, + :py:data:`PIL.Image.BICUBIC` or :py:data:`PIL.Image.LANCZOS`. + If the image has mode "1" or "P", it is always set to + :py:data:`PIL.Image.NEAREST`. + If the image mode specifies a number of bits, such as "I;16", then the + default filter is :py:data:`PIL.Image.NEAREST`. + Otherwise, the default filter is :py:data:`PIL.Image.BICUBIC`. + See: :ref:`concept-filters`. + :param box: An optional 4-tuple of floats providing + the source image region to be scaled. + The values must be within (0, 0, width, height) rectangle. + If omitted or None, the entire source is used. + :param reducing_gap: Apply optimization by resizing the image + in two steps. First, reducing the image by integer times + using :py:meth:`~PIL.Image.Image.reduce`. + Second, resizing using regular resampling. The last step + changes size no less than by ``reducing_gap`` times. + ``reducing_gap`` may be None (no first step is performed) + or should be greater than 1.0. The bigger ``reducing_gap``, + the closer the result to the fair resampling. + The smaller ``reducing_gap``, the faster resizing. + With ``reducing_gap`` greater or equal to 3.0, the result is + indistinguishable from fair resampling in most cases. + The default value is None (no optimization). + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if resample is None: + type_special = ";" in self.mode + resample = NEAREST if type_special else BICUBIC + elif resample not in (NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING): + message = f"Unknown resampling filter ({resample})." + + filters = [ + "{} ({})".format(filter[1], filter[0]) + for filter in ( + (NEAREST, "Image.NEAREST"), + (LANCZOS, "Image.LANCZOS"), + (BILINEAR, "Image.BILINEAR"), + (BICUBIC, "Image.BICUBIC"), + (BOX, "Image.BOX"), + (HAMMING, "Image.HAMMING"), + ) + ] + raise ValueError( + message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1] + ) + + if reducing_gap is not None and reducing_gap < 1.0: + raise ValueError("reducing_gap must be 1.0 or greater") + + size = tuple(size) + + if box is None: + box = (0, 0) + self.size + else: + box = tuple(box) + + if self.size == size and box == (0, 0) + self.size: + return self.copy() + + if self.mode in ("1", "P"): + resample = NEAREST + + if self.mode in ["LA", "RGBA"] and resample != NEAREST: + im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode]) + im = im.resize(size, resample, box) + return im.convert(self.mode) + + self.load() + + if reducing_gap is not None and resample != NEAREST: + factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1 + factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1 + if factor_x > 1 or factor_y > 1: + reduce_box = self._get_safe_box(size, resample, box) + factor = (factor_x, factor_y) + if callable(self.reduce): + self = self.reduce(factor, box=reduce_box) + else: + self = Image.reduce(self, factor, box=reduce_box) + box = ( + (box[0] - reduce_box[0]) / factor_x, + (box[1] - reduce_box[1]) / factor_y, + (box[2] - reduce_box[0]) / factor_x, + (box[3] - reduce_box[1]) / factor_y, + ) + + return self._new(self.im.resize(size, resample, box)) + + def reduce(self, factor, box=None): + """ + Returns a copy of the image reduced ``factor`` times. + If the size of the image is not dividable by ``factor``, + the resulting size will be rounded up. + + :param factor: A greater than 0 integer or tuple of two integers + for width and height separately. + :param box: An optional 4-tuple of ints providing + the source image region to be reduced. + The values must be within ``(0, 0, width, height)`` rectangle. + If omitted or ``None``, the entire source is used. + """ + if not isinstance(factor, (list, tuple)): + factor = (factor, factor) + + if box is None: + box = (0, 0) + self.size + else: + box = tuple(box) + + if factor == (1, 1) and box == (0, 0) + self.size: + return self.copy() + + if self.mode in ["LA", "RGBA"]: + im = self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode]) + im = im.reduce(factor, box) + return im.convert(self.mode) + + self.load() + + return self._new(self.im.reduce(factor, box)) + + def rotate( + self, + angle, + resample=NEAREST, + expand=0, + center=None, + translate=None, + fillcolor=None, + ): + """ + Returns a rotated copy of this image. This method returns a + copy of this image, rotated the given number of degrees counter + clockwise around its centre. + + :param angle: In degrees counter clockwise. + :param resample: An optional resampling filter. This can be + one of :py:data:`PIL.Image.NEAREST` (use nearest neighbour), + :py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2 + environment), or :py:data:`PIL.Image.BICUBIC` + (cubic spline interpolation in a 4x4 environment). + If omitted, or if the image has mode "1" or "P", it is + set to :py:data:`PIL.Image.NEAREST`. See :ref:`concept-filters`. + :param expand: Optional expansion flag. If true, expands the output + image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the + input image. Note that the expand flag assumes rotation around + the center and no translation. + :param center: Optional center of rotation (a 2-tuple). Origin is + the upper left corner. Default is the center of the image. + :param translate: An optional post-rotate translation (a 2-tuple). + :param fillcolor: An optional color for area outside the rotated image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + angle = angle % 360.0 + + # Fast paths regardless of filter, as long as we're not + # translating or changing the center. + if not (center or translate): + if angle == 0: + return self.copy() + if angle == 180: + return self.transpose(ROTATE_180) + if angle in (90, 270) and (expand or self.width == self.height): + return self.transpose(ROTATE_90 if angle == 90 else ROTATE_270) + + # Calculate the affine matrix. Note that this is the reverse + # transformation (from destination image to source) because we + # want to interpolate the (discrete) destination pixel from + # the local area around the (floating) source pixel. + + # The matrix we actually want (note that it operates from the right): + # (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx) + # (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy) + # (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1) + + # The reverse matrix is thus: + # (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx) + # (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty) + # (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1) + + # In any case, the final translation may be updated at the end to + # compensate for the expand flag. + + w, h = self.size + + if translate is None: + post_trans = (0, 0) + else: + post_trans = translate + if center is None: + # FIXME These should be rounded to ints? + rotn_center = (w / 2.0, h / 2.0) + else: + rotn_center = center + + angle = -math.radians(angle) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + + if expand: + # calculate output size + xx = [] + yy = [] + for x, y in ((0, 0), (w, 0), (w, h), (0, h)): + x, y = transform(x, y, matrix) + xx.append(x) + yy.append(y) + nw = math.ceil(max(xx)) - math.floor(min(xx)) + nh = math.ceil(max(yy)) - math.floor(min(yy)) + + # We multiply a translation matrix from the right. Because of its + # special form, this is the same as taking the image of the + # translation vector as new translation vector. + matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix) + w, h = nw, nh + + return self.transform((w, h), AFFINE, matrix, resample, fillcolor=fillcolor) + + def save(self, fp, format=None, **params): + """ + Saves this image under the given filename. If no format is + specified, the format to use is determined from the filename + extension, if possible. + + Keyword options can be used to provide additional instructions + to the writer. If a writer doesn't recognise an option, it is + silently ignored. The available options are described in the + :doc:`image format documentation + <../handbook/image-file-formats>` for each writer. + + You can use a file object instead of a filename. In this case, + you must always specify the format. The file object must + implement the ``seek``, ``tell``, and ``write`` + methods, and be opened in binary mode. + + :param fp: A filename (string), pathlib.Path object or file object. + :param format: Optional format override. If omitted, the + format to use is determined from the filename extension. + If a file object was used instead of a filename, this + parameter should always be used. + :param params: Extra parameters to the image writer. + :returns: None + :exception ValueError: If the output format could not be determined + from the file name. Use the format option to solve this. + :exception OSError: If the file could not be written. The file + may have been created, and may contain partial data. + """ + + filename = "" + open_fp = False + if isinstance(fp, Path): + filename = str(fp) + open_fp = True + elif isPath(fp): + filename = fp + open_fp = True + elif fp == sys.stdout: + try: + fp = sys.stdout.buffer + except AttributeError: + pass + if not filename and hasattr(fp, "name") and isPath(fp.name): + # only set the name for metadata purposes + filename = fp.name + + # may mutate self! + self._ensure_mutable() + + save_all = params.pop("save_all", False) + self.encoderinfo = params + self.encoderconfig = () + + preinit() + + ext = os.path.splitext(filename)[1].lower() + + if not format: + if ext not in EXTENSION: + init() + try: + format = EXTENSION[ext] + except KeyError as e: + raise ValueError(f"unknown file extension: {ext}") from e + + if format.upper() not in SAVE: + init() + if save_all: + save_handler = SAVE_ALL[format.upper()] + else: + save_handler = SAVE[format.upper()] + + if open_fp: + if params.get("append", False): + # Open also for reading ("+"), because TIFF save_all + # writer needs to go back and edit the written data. + fp = builtins.open(filename, "r+b") + else: + fp = builtins.open(filename, "w+b") + + try: + save_handler(self, fp, filename) + finally: + # do what we can to clean up + if open_fp: + fp.close() + + def seek(self, frame): + """ + Seeks to the given frame in this sequence file. If you seek + beyond the end of the sequence, the method raises an + ``EOFError`` exception. When a sequence file is opened, the + library automatically seeks to frame 0. + + See :py:meth:`~PIL.Image.Image.tell`. + + If defined, :attr:`~PIL.Image.Image.n_frames` refers to the + number of available frames. + + :param frame: Frame number, starting at 0. + :exception EOFError: If the call attempts to seek beyond the end + of the sequence. + """ + + # overridden by file handlers + if frame != 0: + raise EOFError + + def show(self, title=None, command=None): + """ + Displays this image. This method is mainly intended for debugging purposes. + + This method calls :py:func:`PIL.ImageShow.show` internally. You can use + :py:func:`PIL.ImageShow.register` to override its default behaviour. + + The image is first saved to a temporary file. By default, it will be in + PNG format. + + On Unix, the image is then opened using the **display**, **eog** or + **xv** utility, depending on which one can be found. + + On macOS, the image is opened with the native Preview application. + + On Windows, the image is opened with the standard PNG display utility. + + :param title: Optional title to use for the image window, where possible. + """ + + if command is not None: + warnings.warn( + "The command parameter is deprecated and will be removed in Pillow 9 " + "(2022-01-02). Use a subclass of ImageShow.Viewer instead.", + DeprecationWarning, + ) + + _show(self, title=title, command=command) + + def split(self): + """ + Split this image into individual bands. This method returns a + tuple of individual image bands from an image. For example, + splitting an "RGB" image creates three new images each + containing a copy of one of the original bands (red, green, + blue). + + If you need only one band, :py:meth:`~PIL.Image.Image.getchannel` + method can be more convenient and faster. + + :returns: A tuple containing bands. + """ + + self.load() + if self.im.bands == 1: + ims = [self.copy()] + else: + ims = map(self._new, self.im.split()) + return tuple(ims) + + def getchannel(self, channel): + """ + Returns an image containing a single channel of the source image. + + :param channel: What channel to return. Could be index + (0 for "R" channel of "RGB") or channel name + ("A" for alpha channel of "RGBA"). + :returns: An image in "L" mode. + + .. versionadded:: 4.3.0 + """ + self.load() + + if isinstance(channel, str): + try: + channel = self.getbands().index(channel) + except ValueError as e: + raise ValueError(f'The image has no channel "{channel}"') from e + + return self._new(self.im.getband(channel)) + + def tell(self): + """ + Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`. + + If defined, :attr:`~PIL.Image.Image.n_frames` refers to the + number of available frames. + + :returns: Frame number, starting with 0. + """ + return 0 + + def thumbnail(self, size, resample=BICUBIC, reducing_gap=2.0): + """ + Make this image into a thumbnail. This method modifies the + image to contain a thumbnail version of itself, no larger than + the given size. This method calculates an appropriate thumbnail + size to preserve the aspect of the image, calls the + :py:meth:`~PIL.Image.Image.draft` method to configure the file reader + (where applicable), and finally resizes the image. + + Note that this function modifies the :py:class:`~PIL.Image.Image` + object in place. If you need to use the full resolution image as well, + apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original + image. + + :param size: Requested size. + :param resample: Optional resampling filter. This can be one + of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BOX`, + :py:data:`PIL.Image.BILINEAR`, :py:data:`PIL.Image.HAMMING`, + :py:data:`PIL.Image.BICUBIC` or :py:data:`PIL.Image.LANCZOS`. + If omitted, it defaults to :py:data:`PIL.Image.BICUBIC`. + (was :py:data:`PIL.Image.NEAREST` prior to version 2.5.0). + See: :ref:`concept-filters`. + :param reducing_gap: Apply optimization by resizing the image + in two steps. First, reducing the image by integer times + using :py:meth:`~PIL.Image.Image.reduce` or + :py:meth:`~PIL.Image.Image.draft` for JPEG images. + Second, resizing using regular resampling. The last step + changes size no less than by ``reducing_gap`` times. + ``reducing_gap`` may be None (no first step is performed) + or should be greater than 1.0. The bigger ``reducing_gap``, + the closer the result to the fair resampling. + The smaller ``reducing_gap``, the faster resizing. + With ``reducing_gap`` greater or equal to 3.0, the result is + indistinguishable from fair resampling in most cases. + The default value is 2.0 (very close to fair resampling + while still being faster in many cases). + :returns: None + """ + + x, y = map(math.floor, size) + if x >= self.width and y >= self.height: + return + + def round_aspect(number, key): + return max(min(math.floor(number), math.ceil(number), key=key), 1) + + # preserve aspect ratio + aspect = self.width / self.height + if x / y >= aspect: + x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y)) + else: + y = round_aspect( + x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n) + ) + size = (x, y) + + box = None + if reducing_gap is not None: + res = self.draft(None, (size[0] * reducing_gap, size[1] * reducing_gap)) + if res is not None: + box = res[1] + + if self.size != size: + im = self.resize(size, resample, box=box, reducing_gap=reducing_gap) + + self.im = im.im + self._size = size + self.mode = self.im.mode + + self.readonly = 0 + self.pyaccess = None + + # FIXME: the different transform methods need further explanation + # instead of bloating the method docs, add a separate chapter. + def transform( + self, size, method, data=None, resample=NEAREST, fill=1, fillcolor=None + ): + """ + Transforms this image. This method creates a new image with the + given size, and the same mode as the original, and copies data + to the new image using the given transform. + + :param size: The output size. + :param method: The transformation method. This is one of + :py:data:`PIL.Image.EXTENT` (cut out a rectangular subregion), + :py:data:`PIL.Image.AFFINE` (affine transform), + :py:data:`PIL.Image.PERSPECTIVE` (perspective transform), + :py:data:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or + :py:data:`PIL.Image.MESH` (map a number of source quadrilaterals + in one operation). + + It may also be an :py:class:`~PIL.Image.ImageTransformHandler` + object:: + + class Example(Image.ImageTransformHandler): + def transform(self, size, data, resample, fill=1): + # Return result + + It may also be an object with a ``method.getdata`` method + that returns a tuple supplying new ``method`` and ``data`` values:: + + class Example: + def getdata(self): + method = Image.EXTENT + data = (0, 0, 100, 100) + return method, data + :param data: Extra data to the transformation method. + :param resample: Optional resampling filter. It can be one of + :py:data:`PIL.Image.NEAREST` (use nearest neighbour), + :py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2 + environment), or :py:data:`PIL.Image.BICUBIC` (cubic spline + interpolation in a 4x4 environment). If omitted, or if the image + has mode "1" or "P", it is set to :py:data:`PIL.Image.NEAREST`. + See: :ref:`concept-filters`. + :param fill: If ``method`` is an + :py:class:`~PIL.Image.ImageTransformHandler` object, this is one of + the arguments passed to it. Otherwise, it is unused. + :param fillcolor: Optional fill color for the area outside the + transform in the output image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if self.mode in ("LA", "RGBA") and resample != NEAREST: + return ( + self.convert({"LA": "La", "RGBA": "RGBa"}[self.mode]) + .transform(size, method, data, resample, fill, fillcolor) + .convert(self.mode) + ) + + if isinstance(method, ImageTransformHandler): + return method.transform(size, self, resample=resample, fill=fill) + + if hasattr(method, "getdata"): + # compatibility w. old-style transform objects + method, data = method.getdata() + + if data is None: + raise ValueError("missing method data") + + im = new(self.mode, size, fillcolor) + if self.mode == "P" and self.palette: + im.palette = self.palette.copy() + im.info = self.info.copy() + if method == MESH: + # list of quads + for box, quad in data: + im.__transformer(box, self, QUAD, quad, resample, fillcolor is None) + else: + im.__transformer( + (0, 0) + size, self, method, data, resample, fillcolor is None + ) + + return im + + def __transformer(self, box, image, method, data, resample=NEAREST, fill=1): + w = box[2] - box[0] + h = box[3] - box[1] + + if method == AFFINE: + data = data[0:6] + + elif method == EXTENT: + # convert extent to an affine transform + x0, y0, x1, y1 = data + xs = (x1 - x0) / w + ys = (y1 - y0) / h + method = AFFINE + data = (xs, 0, x0, 0, ys, y0) + + elif method == PERSPECTIVE: + data = data[0:8] + + elif method == QUAD: + # quadrilateral warp. data specifies the four corners + # given as NW, SW, SE, and NE. + nw = data[0:2] + sw = data[2:4] + se = data[4:6] + ne = data[6:8] + x0, y0 = nw + As = 1.0 / w + At = 1.0 / h + data = ( + x0, + (ne[0] - x0) * As, + (sw[0] - x0) * At, + (se[0] - sw[0] - ne[0] + x0) * As * At, + y0, + (ne[1] - y0) * As, + (sw[1] - y0) * At, + (se[1] - sw[1] - ne[1] + y0) * As * At, + ) + + else: + raise ValueError("unknown transformation method") + + if resample not in (NEAREST, BILINEAR, BICUBIC): + if resample in (BOX, HAMMING, LANCZOS): + message = { + BOX: "Image.BOX", + HAMMING: "Image.HAMMING", + LANCZOS: "Image.LANCZOS/Image.ANTIALIAS", + }[resample] + f" ({resample}) cannot be used." + else: + message = f"Unknown resampling filter ({resample})." + + filters = [ + "{} ({})".format(filter[1], filter[0]) + for filter in ( + (NEAREST, "Image.NEAREST"), + (BILINEAR, "Image.BILINEAR"), + (BICUBIC, "Image.BICUBIC"), + ) + ] + raise ValueError( + message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1] + ) + + image.load() + + self.load() + + if image.mode in ("1", "P"): + resample = NEAREST + + self.im.transform2(box, image.im, method, data, resample, fill) + + def transpose(self, method): + """ + Transpose image (flip or rotate in 90 degree steps) + + :param method: One of :py:data:`PIL.Image.FLIP_LEFT_RIGHT`, + :py:data:`PIL.Image.FLIP_TOP_BOTTOM`, :py:data:`PIL.Image.ROTATE_90`, + :py:data:`PIL.Image.ROTATE_180`, :py:data:`PIL.Image.ROTATE_270`, + :py:data:`PIL.Image.TRANSPOSE` or :py:data:`PIL.Image.TRANSVERSE`. + :returns: Returns a flipped or rotated copy of this image. + """ + + self.load() + return self._new(self.im.transpose(method)) + + def effect_spread(self, distance): + """ + Randomly spread pixels in an image. + + :param distance: Distance to spread pixels. + """ + self.load() + return self._new(self.im.effect_spread(distance)) + + def toqimage(self): + """Returns a QImage copy of this image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.toqimage(self) + + def toqpixmap(self): + """Returns a QPixmap copy of this image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.toqpixmap(self) + + +# -------------------------------------------------------------------- +# Abstract handlers. + + +class ImagePointHandler: + """ + Used as a mixin by point transforms + (for use with :py:meth:`~PIL.Image.Image.point`) + """ + + pass + + +class ImageTransformHandler: + """ + Used as a mixin by geometry transforms + (for use with :py:meth:`~PIL.Image.Image.transform`) + """ + + pass + + +# -------------------------------------------------------------------- +# Factories + +# +# Debugging + + +def _wedge(): + """Create greyscale wedge (for debugging only)""" + + return Image()._new(core.wedge("L")) + + +def _check_size(size): + """ + Common check to enforce type and sanity check on size tuples + + :param size: Should be a 2 tuple of (width, height) + :returns: True, or raises a ValueError + """ + + if not isinstance(size, (list, tuple)): + raise ValueError("Size must be a tuple") + if len(size) != 2: + raise ValueError("Size must be a tuple of length 2") + if size[0] < 0 or size[1] < 0: + raise ValueError("Width and height must be >= 0") + + return True + + +def new(mode, size, color=0): + """ + Creates a new image with the given mode and size. + + :param mode: The mode to use for the new image. See: + :ref:`concept-modes`. + :param size: A 2-tuple, containing (width, height) in pixels. + :param color: What color to use for the image. Default is black. + If given, this should be a single integer or floating point value + for single-band modes, and a tuple for multi-band modes (one value + per band). When creating RGB images, you can also use color + strings as supported by the ImageColor module. If the color is + None, the image is not initialised. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + _check_size(size) + + if color is None: + # don't initialize + return Image()._new(core.new(mode, size)) + + if isinstance(color, str): + # css3-style specifier + + from . import ImageColor + + color = ImageColor.getcolor(color, mode) + + im = Image() + if mode == "P" and isinstance(color, (list, tuple)) and len(color) in [3, 4]: + # RGB or RGBA value for a P image + from . import ImagePalette + + im.palette = ImagePalette.ImagePalette() + color = im.palette.getcolor(color) + return im._new(core.fill(mode, size, color)) + + +def frombytes(mode, size, data, decoder_name="raw", *args): + """ + Creates a copy of an image memory from pixel data in a buffer. + + In its simplest form, this function takes three arguments + (mode, size, and unpacked pixel data). + + You can also use any pixel decoder supported by PIL. For more + information on available decoders, see the section + :ref:`Writing Your Own File Decoder `. + + Note that this function decodes pixel data only, not entire images. + If you have an entire image in a string, wrap it in a + :py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load + it. + + :param mode: The image mode. See: :ref:`concept-modes`. + :param size: The image size. + :param data: A byte buffer containing raw data for the given mode. + :param decoder_name: What decoder to use. + :param args: Additional parameters for the given decoder. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + _check_size(size) + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if decoder_name == "raw" and args == (): + args = mode + + im = new(mode, size) + im.frombytes(data, decoder_name, args) + return im + + +def frombuffer(mode, size, data, decoder_name="raw", *args): + """ + Creates an image memory referencing pixel data in a byte buffer. + + This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data + in the byte buffer, where possible. This means that changes to the + original buffer object are reflected in this image). Not all modes can + share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK". + + Note that this function decodes pixel data only, not entire images. + If you have an entire image file in a string, wrap it in a + :py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load it. + + In the current version, the default parameters used for the "raw" decoder + differs from that used for :py:func:`~PIL.Image.frombytes`. This is a + bug, and will probably be fixed in a future release. The current release + issues a warning if you do this; to disable the warning, you should provide + the full set of parameters. See below for details. + + :param mode: The image mode. See: :ref:`concept-modes`. + :param size: The image size. + :param data: A bytes or other buffer object containing raw + data for the given mode. + :param decoder_name: What decoder to use. + :param args: Additional parameters for the given decoder. For the + default encoder ("raw"), it's recommended that you provide the + full set of parameters:: + + frombuffer(mode, size, data, "raw", mode, 0, 1) + + :returns: An :py:class:`~PIL.Image.Image` object. + + .. versionadded:: 1.1.4 + """ + + _check_size(size) + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if decoder_name == "raw": + if args == (): + args = mode, 0, 1 + if args[0] in _MAPMODES: + im = new(mode, (1, 1)) + im = im._new(core.map_buffer(data, size, decoder_name, 0, args)) + im.readonly = 1 + return im + + return frombytes(mode, size, data, decoder_name, args) + + +def fromarray(obj, mode=None): + """ + Creates an image memory from an object exporting the array interface + (using the buffer protocol). + + If ``obj`` is not contiguous, then the ``tobytes`` method is called + and :py:func:`~PIL.Image.frombuffer` is used. + + If you have an image in NumPy:: + + from PIL import Image + import numpy as np + im = Image.open('hopper.jpg') + a = np.asarray(im) + + Then this can be used to convert it to a Pillow image:: + + im = Image.fromarray(a) + + :param obj: Object with array interface + :param mode: Mode to use (will be determined from type if None) + See: :ref:`concept-modes`. + :returns: An image object. + + .. versionadded:: 1.1.6 + """ + arr = obj.__array_interface__ + shape = arr["shape"] + ndim = len(shape) + strides = arr.get("strides", None) + if mode is None: + try: + typekey = (1, 1) + shape[2:], arr["typestr"] + except KeyError as e: + raise TypeError("Cannot handle this data type") from e + try: + mode, rawmode = _fromarray_typemap[typekey] + except KeyError as e: + raise TypeError("Cannot handle this data type: %s, %s" % typekey) from e + else: + rawmode = mode + if mode in ["1", "L", "I", "P", "F"]: + ndmax = 2 + elif mode == "RGB": + ndmax = 3 + else: + ndmax = 4 + if ndim > ndmax: + raise ValueError(f"Too many dimensions: {ndim} > {ndmax}.") + + size = 1 if ndim == 1 else shape[1], shape[0] + if strides is not None: + if hasattr(obj, "tobytes"): + obj = obj.tobytes() + else: + obj = obj.tostring() + + return frombuffer(mode, size, obj, "raw", rawmode, 0, 1) + + +def fromqimage(im): + """Creates an image instance from a QImage image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.fromqimage(im) + + +def fromqpixmap(im): + """Creates an image instance from a QPixmap image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.fromqpixmap(im) + + +_fromarray_typemap = { + # (shape, typestr) => mode, rawmode + # first two members of shape are set to one + ((1, 1), "|b1"): ("1", "1;8"), + ((1, 1), "|u1"): ("L", "L"), + ((1, 1), "|i1"): ("I", "I;8"), + ((1, 1), "u2"): ("I", "I;16B"), + ((1, 1), "i2"): ("I", "I;16BS"), + ((1, 1), "u4"): ("I", "I;32B"), + ((1, 1), "i4"): ("I", "I;32BS"), + ((1, 1), "f4"): ("F", "F;32BF"), + ((1, 1), "f8"): ("F", "F;64BF"), + ((1, 1, 2), "|u1"): ("LA", "LA"), + ((1, 1, 3), "|u1"): ("RGB", "RGB"), + ((1, 1, 4), "|u1"): ("RGBA", "RGBA"), +} + +# shortcuts +_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I") +_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F") + + +def _decompression_bomb_check(size): + if MAX_IMAGE_PIXELS is None: + return + + pixels = size[0] * size[1] + + if pixels > 2 * MAX_IMAGE_PIXELS: + raise DecompressionBombError( + f"Image size ({pixels} pixels) exceeds limit of {2 * MAX_IMAGE_PIXELS} " + "pixels, could be decompression bomb DOS attack." + ) + + if pixels > MAX_IMAGE_PIXELS: + warnings.warn( + f"Image size ({pixels} pixels) exceeds limit of {MAX_IMAGE_PIXELS} pixels, " + "could be decompression bomb DOS attack.", + DecompressionBombWarning, + ) + + +def open(fp, mode="r", formats=None): + """ + Opens and identifies the given image file. + + This is a lazy operation; this function identifies the file, but + the file remains open and the actual image data is not read from + the file until you try to process the data (or call the + :py:meth:`~PIL.Image.Image.load` method). See + :py:func:`~PIL.Image.new`. See :ref:`file-handling`. + + :param fp: A filename (string), pathlib.Path object or a file object. + The file object must implement ``file.read``, + ``file.seek``, and ``file.tell`` methods, + and be opened in binary mode. + :param mode: The mode. If given, this argument must be "r". + :param formats: A list or tuple of formats to attempt to load the file in. + This can be used to restrict the set of formats checked. + Pass ``None`` to try all supported formats. You can print the set of + available formats by running ``python3 -m PIL`` or using + the :py:func:`PIL.features.pilinfo` function. + :returns: An :py:class:`~PIL.Image.Image` object. + :exception FileNotFoundError: If the file cannot be found. + :exception PIL.UnidentifiedImageError: If the image cannot be opened and + identified. + :exception ValueError: If the ``mode`` is not "r", or if a ``StringIO`` + instance is used for ``fp``. + :exception TypeError: If ``formats`` is not ``None``, a list or a tuple. + """ + + if mode != "r": + raise ValueError(f"bad mode {repr(mode)}") + elif isinstance(fp, io.StringIO): + raise ValueError( + "StringIO cannot be used to open an image. " + "Binary data must be used instead." + ) + + if formats is None: + formats = ID + elif not isinstance(formats, (list, tuple)): + raise TypeError("formats must be a list or tuple") + + exclusive_fp = False + filename = "" + if isinstance(fp, Path): + filename = str(fp.resolve()) + elif isPath(fp): + filename = fp + + if filename: + fp = builtins.open(filename, "rb") + exclusive_fp = True + + try: + fp.seek(0) + except (AttributeError, io.UnsupportedOperation): + fp = io.BytesIO(fp.read()) + exclusive_fp = True + + prefix = fp.read(16) + + preinit() + + accept_warnings = [] + + def _open_core(fp, filename, prefix, formats): + for i in formats: + i = i.upper() + if i not in OPEN: + init() + try: + factory, accept = OPEN[i] + result = not accept or accept(prefix) + if type(result) in [str, bytes]: + accept_warnings.append(result) + elif result: + fp.seek(0) + im = factory(fp, filename) + _decompression_bomb_check(im.size) + return im + except (SyntaxError, IndexError, TypeError, struct.error): + # Leave disabled by default, spams the logs with image + # opening failures that are entirely expected. + # logger.debug("", exc_info=True) + continue + except BaseException: + if exclusive_fp: + fp.close() + raise + return None + + im = _open_core(fp, filename, prefix, formats) + + if im is None: + if init(): + im = _open_core(fp, filename, prefix, formats) + + if im: + im._exclusive_fp = exclusive_fp + return im + + if exclusive_fp: + fp.close() + for message in accept_warnings: + warnings.warn(message) + raise UnidentifiedImageError( + "cannot identify image file %r" % (filename if filename else fp) + ) + + +# +# Image processing. + + +def alpha_composite(im1, im2): + """ + Alpha composite im2 over im1. + + :param im1: The first image. Must have mode RGBA. + :param im2: The second image. Must have mode RGBA, and the same size as + the first image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + im1.load() + im2.load() + return im1._new(core.alpha_composite(im1.im, im2.im)) + + +def blend(im1, im2, alpha): + """ + Creates a new image by interpolating between two input images, using + a constant alpha.:: + + out = image1 * (1.0 - alpha) + image2 * alpha + + :param im1: The first image. + :param im2: The second image. Must have the same mode and size as + the first image. + :param alpha: The interpolation alpha factor. If alpha is 0.0, a + copy of the first image is returned. If alpha is 1.0, a copy of + the second image is returned. There are no restrictions on the + alpha value. If necessary, the result is clipped to fit into + the allowed output range. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + im1.load() + im2.load() + return im1._new(core.blend(im1.im, im2.im, alpha)) + + +def composite(image1, image2, mask): + """ + Create composite image by blending images using a transparency mask. + + :param image1: The first image. + :param image2: The second image. Must have the same mode and + size as the first image. + :param mask: A mask image. This image can have mode + "1", "L", or "RGBA", and must have the same size as the + other two images. + """ + + image = image2.copy() + image.paste(image1, None, mask) + return image + + +def eval(image, *args): + """ + Applies the function (which should take one argument) to each pixel + in the given image. If the image has more than one band, the same + function is applied to each band. Note that the function is + evaluated once for each possible pixel value, so you cannot use + random components or other generators. + + :param image: The input image. + :param function: A function object, taking one integer argument. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + return image.point(args[0]) + + +def merge(mode, bands): + """ + Merge a set of single band images into a new multiband image. + + :param mode: The mode to use for the output image. See: + :ref:`concept-modes`. + :param bands: A sequence containing one single-band image for + each band in the output image. All bands must have the + same size. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if getmodebands(mode) != len(bands) or "*" in mode: + raise ValueError("wrong number of bands") + for band in bands[1:]: + if band.mode != getmodetype(mode): + raise ValueError("mode mismatch") + if band.size != bands[0].size: + raise ValueError("size mismatch") + for band in bands: + band.load() + return bands[0]._new(core.merge(mode, *[b.im for b in bands])) + + +# -------------------------------------------------------------------- +# Plugin registry + + +def register_open(id, factory, accept=None): + """ + Register an image file plugin. This function should not be used + in application code. + + :param id: An image format identifier. + :param factory: An image file factory method. + :param accept: An optional function that can be used to quickly + reject images having another format. + """ + id = id.upper() + ID.append(id) + OPEN[id] = factory, accept + + +def register_mime(id, mimetype): + """ + Registers an image MIME type. This function should not be used + in application code. + + :param id: An image format identifier. + :param mimetype: The image MIME type for this format. + """ + MIME[id.upper()] = mimetype + + +def register_save(id, driver): + """ + Registers an image save function. This function should not be + used in application code. + + :param id: An image format identifier. + :param driver: A function to save images in this format. + """ + SAVE[id.upper()] = driver + + +def register_save_all(id, driver): + """ + Registers an image function to save all the frames + of a multiframe format. This function should not be + used in application code. + + :param id: An image format identifier. + :param driver: A function to save images in this format. + """ + SAVE_ALL[id.upper()] = driver + + +def register_extension(id, extension): + """ + Registers an image extension. This function should not be + used in application code. + + :param id: An image format identifier. + :param extension: An extension used for this format. + """ + EXTENSION[extension.lower()] = id.upper() + + +def register_extensions(id, extensions): + """ + Registers image extensions. This function should not be + used in application code. + + :param id: An image format identifier. + :param extensions: A list of extensions used for this format. + """ + for extension in extensions: + register_extension(id, extension) + + +def registered_extensions(): + """ + Returns a dictionary containing all file extensions belonging + to registered plugins + """ + if not EXTENSION: + init() + return EXTENSION + + +def register_decoder(name, decoder): + """ + Registers an image decoder. This function should not be + used in application code. + + :param name: The name of the decoder + :param decoder: A callable(mode, args) that returns an + ImageFile.PyDecoder object + + .. versionadded:: 4.1.0 + """ + DECODERS[name] = decoder + + +def register_encoder(name, encoder): + """ + Registers an image encoder. This function should not be + used in application code. + + :param name: The name of the encoder + :param encoder: A callable(mode, args) that returns an + ImageFile.PyEncoder object + + .. versionadded:: 4.1.0 + """ + ENCODERS[name] = encoder + + +# -------------------------------------------------------------------- +# Simple display support. + + +def _show(image, **options): + options["_internal_pillow"] = True + _showxv(image, **options) + + +def _showxv(image, title=None, **options): + from . import ImageShow + + if "_internal_pillow" in options: + del options["_internal_pillow"] + else: + warnings.warn( + "_showxv is deprecated and will be removed in Pillow 9 (2022-01-02). " + "Use Image.show instead.", + DeprecationWarning, + ) + ImageShow.show(image, title, **options) + + +# -------------------------------------------------------------------- +# Effects + + +def effect_mandelbrot(size, extent, quality): + """ + Generate a Mandelbrot set covering the given extent. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param extent: The extent to cover, as a 4-tuple: + (x0, y0, x1, y2). + :param quality: Quality. + """ + return Image()._new(core.effect_mandelbrot(size, extent, quality)) + + +def effect_noise(size, sigma): + """ + Generate Gaussian noise centered around 128. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param sigma: Standard deviation of noise. + """ + return Image()._new(core.effect_noise(size, sigma)) + + +def linear_gradient(mode): + """ + Generate 256x256 linear gradient from black to white, top to bottom. + + :param mode: Input mode. + """ + return Image()._new(core.linear_gradient(mode)) + + +def radial_gradient(mode): + """ + Generate 256x256 radial gradient from black to white, centre to edge. + + :param mode: Input mode. + """ + return Image()._new(core.radial_gradient(mode)) + + +# -------------------------------------------------------------------- +# Resources + + +def _apply_env_variables(env=None): + if env is None: + env = os.environ + + for var_name, setter in [ + ("PILLOW_ALIGNMENT", core.set_alignment), + ("PILLOW_BLOCK_SIZE", core.set_block_size), + ("PILLOW_BLOCKS_MAX", core.set_blocks_max), + ]: + if var_name not in env: + continue + + var = env[var_name].lower() + + units = 1 + for postfix, mul in [("k", 1024), ("m", 1024 * 1024)]: + if var.endswith(postfix): + units = mul + var = var[: -len(postfix)] + + try: + var = int(var) * units + except ValueError: + warnings.warn(f"{var_name} is not int") + continue + + try: + setter(var) + except ValueError as e: + warnings.warn(f"{var_name}: {e}") + + +_apply_env_variables() +atexit.register(core.clear_cache) + + +class Exif(MutableMapping): + endian = None + + def __init__(self): + self._data = {} + self._ifds = {} + self._info = None + self._loaded_exif = None + + def _fixup(self, value): + try: + if len(value) == 1 and isinstance(value, tuple): + return value[0] + except Exception: + pass + return value + + def _fixup_dict(self, src_dict): + # Helper function + # returns a dict with any single item tuples/lists as individual values + return {k: self._fixup(v) for k, v in src_dict.items()} + + def _get_ifd_dict(self, offset): + try: + # an offset pointer to the location of the nested embedded IFD. + # It should be a long, but may be corrupted. + self.fp.seek(offset) + except (KeyError, TypeError): + pass + else: + from . import TiffImagePlugin + + info = TiffImagePlugin.ImageFileDirectory_v2(self.head) + info.load(self.fp) + return self._fixup_dict(info) + + def _get_head(self): + if self.endian == "<": + return b"II\x2A\x00\x08\x00\x00\x00" + else: + return b"MM\x00\x2A\x00\x00\x00\x08" + + def load(self, data): + # Extract EXIF information. This is highly experimental, + # and is likely to be replaced with something better in a future + # version. + + # The EXIF record consists of a TIFF file embedded in a JPEG + # application marker (!). + if data == self._loaded_exif: + return + self._loaded_exif = data + self._data.clear() + self._ifds.clear() + if not data: + self._info = None + return + + if data.startswith(b"Exif\x00\x00"): + data = data[6:] + self.fp = io.BytesIO(data) + self.head = self.fp.read(8) + # process dictionary + from . import TiffImagePlugin + + self._info = TiffImagePlugin.ImageFileDirectory_v2(self.head) + self.endian = self._info._endian + self.fp.seek(self._info.next) + self._info.load(self.fp) + + def load_from_fp(self, fp, offset=None): + self._loaded_exif = None + self._data.clear() + self._ifds.clear() + + # process dictionary + from . import TiffImagePlugin + + self.fp = fp + if offset is not None: + self.head = self._get_head() + else: + self.head = self.fp.read(8) + self._info = TiffImagePlugin.ImageFileDirectory_v2(self.head) + if self.endian is None: + self.endian = self._info._endian + if offset is None: + offset = self._info.next + self.fp.seek(offset) + self._info.load(self.fp) + + def _get_merged_dict(self): + merged_dict = dict(self) + + # get EXIF extension + if 0x8769 in self: + ifd = self._get_ifd_dict(self[0x8769]) + if ifd: + merged_dict.update(ifd) + + # GPS + if 0x8825 in self: + merged_dict[0x8825] = self._get_ifd_dict(self[0x8825]) + + return merged_dict + + def tobytes(self, offset=8): + from . import TiffImagePlugin + + head = self._get_head() + ifd = TiffImagePlugin.ImageFileDirectory_v2(ifh=head) + for tag, value in self.items(): + if tag in [0x8769, 0x8225, 0x8825] and not isinstance(value, dict): + value = self.get_ifd(tag) + if ( + tag == 0x8769 + and 0xA005 in value + and not isinstance(value[0xA005], dict) + ): + value = value.copy() + value[0xA005] = self.get_ifd(0xA005) + ifd[tag] = value + return b"Exif\x00\x00" + head + ifd.tobytes(offset) + + def get_ifd(self, tag): + if tag not in self._ifds: + if tag in [0x8769, 0x8825]: + # exif, gpsinfo + if tag in self: + self._ifds[tag] = self._get_ifd_dict(self[tag]) + elif tag in [0xA005, 0x927C]: + # interop, makernote + if 0x8769 not in self._ifds: + self.get_ifd(0x8769) + tag_data = self._ifds[0x8769][tag] + if tag == 0x927C: + # makernote + from .TiffImagePlugin import ImageFileDirectory_v2 + + if tag_data[:8] == b"FUJIFILM": + ifd_offset = i32le(tag_data, 8) + ifd_data = tag_data[ifd_offset:] + + makernote = {} + for i in range(0, struct.unpack(" 4: + (offset,) = struct.unpack("H", tag_data[:2])[0]): + ifd_tag, typ, count, data = struct.unpack( + ">HHL4s", tag_data[i * 12 + 2 : (i + 1) * 12 + 2] + ) + if ifd_tag == 0x1101: + # CameraInfo + (offset,) = struct.unpack(">L", data) + self.fp.seek(offset) + + camerainfo = {"ModelID": self.fp.read(4)} + + self.fp.read(4) + # Seconds since 2000 + camerainfo["TimeStamp"] = i32le(self.fp.read(12)) + + self.fp.read(4) + camerainfo["InternalSerialNumber"] = self.fp.read(4) + + self.fp.read(12) + parallax = self.fp.read(4) + handler = ImageFileDirectory_v2._load_dispatch[ + TiffTags.FLOAT + ][1] + camerainfo["Parallax"] = handler( + ImageFileDirectory_v2(), parallax, False + ) + + self.fp.read(4) + camerainfo["Category"] = self.fp.read(2) + + makernote = {0x1101: dict(self._fixup_dict(camerainfo))} + self._ifds[tag] = makernote + else: + # interop + self._ifds[tag] = self._get_ifd_dict(tag_data) + return self._ifds.get(tag, {}) + + def __str__(self): + if self._info is not None: + # Load all keys into self._data + for tag in self._info.keys(): + self[tag] + + return str(self._data) + + def __len__(self): + keys = set(self._data) + if self._info is not None: + keys.update(self._info) + return len(keys) + + def __getitem__(self, tag): + if self._info is not None and tag not in self._data and tag in self._info: + self._data[tag] = self._fixup(self._info[tag]) + del self._info[tag] + return self._data[tag] + + def __contains__(self, tag): + return tag in self._data or (self._info is not None and tag in self._info) + + def __setitem__(self, tag, value): + if self._info is not None and tag in self._info: + del self._info[tag] + self._data[tag] = value + + def __delitem__(self, tag): + if self._info is not None and tag in self._info: + del self._info[tag] + else: + del self._data[tag] + + def __iter__(self): + keys = set(self._data) + if self._info is not None: + keys.update(self._info) + return iter(keys) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageChops.py b/.venv/lib/python3.9/site-packages/PIL/ImageChops.py new file mode 100644 index 00000000..61d3a295 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageChops.py @@ -0,0 +1,328 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard channel operations +# +# History: +# 1996-03-24 fl Created +# 1996-08-13 fl Added logical operations (for "1" images) +# 2000-10-12 fl Added offset method (from Image.py) +# +# Copyright (c) 1997-2000 by Secret Labs AB +# Copyright (c) 1996-2000 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +def constant(image, value): + """Fill a channel with a given grey level. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.new("L", image.size, value) + + +def duplicate(image): + """Copy a channel. Alias for :py:meth:`PIL.Image.Image.copy`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return image.copy() + + +def invert(image): + """ + Invert an image (channel). + + .. code-block:: python + + out = MAX - image + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image.load() + return image._new(image.im.chop_invert()) + + +def lighter(image1, image2): + """ + Compares the two images, pixel by pixel, and returns a new image containing + the lighter values. + + .. code-block:: python + + out = max(image1, image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_lighter(image2.im)) + + +def darker(image1, image2): + """ + Compares the two images, pixel by pixel, and returns a new image containing + the darker values. + + .. code-block:: python + + out = min(image1, image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_darker(image2.im)) + + +def difference(image1, image2): + """ + Returns the absolute value of the pixel-by-pixel difference between the two + images. + + .. code-block:: python + + out = abs(image1 - image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_difference(image2.im)) + + +def multiply(image1, image2): + """ + Superimposes two images on top of each other. + + If you multiply an image with a solid black image, the result is black. If + you multiply with a solid white image, the image is unaffected. + + .. code-block:: python + + out = image1 * image2 / MAX + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_multiply(image2.im)) + + +def screen(image1, image2): + """ + Superimposes two inverted images on top of each other. + + .. code-block:: python + + out = MAX - ((MAX - image1) * (MAX - image2) / MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_screen(image2.im)) + + +def soft_light(image1, image2): + """ + Superimposes two images on top of each other using the Soft Light algorithm + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_soft_light(image2.im)) + + +def hard_light(image1, image2): + """ + Superimposes two images on top of each other using the Hard Light algorithm + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_hard_light(image2.im)) + + +def overlay(image1, image2): + """ + Superimposes two images on top of each other using the Overlay algorithm + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_overlay(image2.im)) + + +def add(image1, image2, scale=1.0, offset=0): + """ + Adds two images, dividing the result by scale and adding the + offset. If omitted, scale defaults to 1.0, and offset to 0.0. + + .. code-block:: python + + out = ((image1 + image2) / scale + offset) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_add(image2.im, scale, offset)) + + +def subtract(image1, image2, scale=1.0, offset=0): + """ + Subtracts two images, dividing the result by scale and adding the offset. + If omitted, scale defaults to 1.0, and offset to 0.0. + + .. code-block:: python + + out = ((image1 - image2) / scale + offset) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_subtract(image2.im, scale, offset)) + + +def add_modulo(image1, image2): + """Add two images, without clipping the result. + + .. code-block:: python + + out = ((image1 + image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_add_modulo(image2.im)) + + +def subtract_modulo(image1, image2): + """Subtract two images, without clipping the result. + + .. code-block:: python + + out = ((image1 - image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_subtract_modulo(image2.im)) + + +def logical_and(image1, image2): + """Logical AND between two images. + + Both of the images must have mode "1". If you would like to perform a + logical AND on an image with a mode other than "1", try + :py:meth:`~PIL.ImageChops.multiply` instead, using a black-and-white mask + as the second image. + + .. code-block:: python + + out = ((image1 and image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_and(image2.im)) + + +def logical_or(image1, image2): + """Logical OR between two images. + + Both of the images must have mode "1". + + .. code-block:: python + + out = ((image1 or image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_or(image2.im)) + + +def logical_xor(image1, image2): + """Logical XOR between two images. + + Both of the images must have mode "1". + + .. code-block:: python + + out = ((bool(image1) != bool(image2)) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_xor(image2.im)) + + +def blend(image1, image2, alpha): + """Blend images using constant transparency weight. Alias for + :py:func:`PIL.Image.blend`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.blend(image1, image2, alpha) + + +def composite(image1, image2, mask): + """Create composite using transparency mask. Alias for + :py:func:`PIL.Image.composite`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.composite(image1, image2, mask) + + +def offset(image, xoffset, yoffset=None): + """Returns a copy of the image where data has been offset by the given + distances. Data wraps around the edges. If ``yoffset`` is omitted, it + is assumed to be equal to ``xoffset``. + + :param xoffset: The horizontal distance. + :param yoffset: The vertical distance. If omitted, both + distances are set to the same value. + :rtype: :py:class:`~PIL.Image.Image` + """ + + if yoffset is None: + yoffset = xoffset + image.load() + return image._new(image.im.offset(xoffset, yoffset)) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageCms.py b/.venv/lib/python3.9/site-packages/PIL/ImageCms.py new file mode 100644 index 00000000..36990959 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageCms.py @@ -0,0 +1,999 @@ +# The Python Imaging Library. +# $Id$ + +# Optional color management support, based on Kevin Cazabon's PyCMS +# library. + +# History: + +# 2009-03-08 fl Added to PIL. + +# Copyright (C) 2002-2003 Kevin Cazabon +# Copyright (c) 2009 by Fredrik Lundh +# Copyright (c) 2013 by Eric Soroos + +# See the README file for information on usage and redistribution. See +# below for the original description. + +import sys + +from PIL import Image + +try: + from PIL import _imagingcms +except ImportError as ex: + # Allow error import for doc purposes, but error out when accessing + # anything in core. + from ._util import deferred_error + + _imagingcms = deferred_error(ex) + +DESCRIPTION = """ +pyCMS + + a Python / PIL interface to the littleCMS ICC Color Management System + Copyright (C) 2002-2003 Kevin Cazabon + kevin@cazabon.com + http://www.cazabon.com + + pyCMS home page: http://www.cazabon.com/pyCMS + littleCMS home page: https://www.littlecms.com + (littleCMS is Copyright (C) 1998-2001 Marti Maria) + + Originally released under LGPL. Graciously donated to PIL in + March 2009, for distribution under the standard PIL license + + The pyCMS.py module provides a "clean" interface between Python/PIL and + pyCMSdll, taking care of some of the more complex handling of the direct + pyCMSdll functions, as well as error-checking and making sure that all + relevant data is kept together. + + While it is possible to call pyCMSdll functions directly, it's not highly + recommended. + + Version History: + + 1.0.0 pil Oct 2013 Port to LCMS 2. + + 0.1.0 pil mod March 10, 2009 + + Renamed display profile to proof profile. The proof + profile is the profile of the device that is being + simulated, not the profile of the device which is + actually used to display/print the final simulation + (that'd be the output profile) - also see LCMSAPI.txt + input colorspace -> using 'renderingIntent' -> proof + colorspace -> using 'proofRenderingIntent' -> output + colorspace + + Added LCMS FLAGS support. + Added FLAGS["SOFTPROOFING"] as default flag for + buildProofTransform (otherwise the proof profile/intent + would be ignored). + + 0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms + + 0.0.2 alpha Jan 6, 2002 + + Added try/except statements around type() checks of + potential CObjects... Python won't let you use type() + on them, and raises a TypeError (stupid, if you ask + me!) + + Added buildProofTransformFromOpenProfiles() function. + Additional fixes in DLL, see DLL code for details. + + 0.0.1 alpha first public release, Dec. 26, 2002 + + Known to-do list with current version (of Python interface, not pyCMSdll): + + none + +""" + +VERSION = "1.0.0 pil" + +# --------------------------------------------------------------------. + +core = _imagingcms + +# +# intent/direction values + +INTENT_PERCEPTUAL = 0 +INTENT_RELATIVE_COLORIMETRIC = 1 +INTENT_SATURATION = 2 +INTENT_ABSOLUTE_COLORIMETRIC = 3 + +DIRECTION_INPUT = 0 +DIRECTION_OUTPUT = 1 +DIRECTION_PROOF = 2 + +# +# flags + +FLAGS = { + "MATRIXINPUT": 1, + "MATRIXOUTPUT": 2, + "MATRIXONLY": (1 | 2), + "NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot + # Don't create prelinearization tables on precalculated transforms + # (internal use): + "NOPRELINEARIZATION": 16, + "GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink) + "NOTCACHE": 64, # Inhibit 1-pixel cache + "NOTPRECALC": 256, + "NULLTRANSFORM": 512, # Don't transform anyway + "HIGHRESPRECALC": 1024, # Use more memory to give better accuracy + "LOWRESPRECALC": 2048, # Use less memory to minimize resources + "WHITEBLACKCOMPENSATION": 8192, + "BLACKPOINTCOMPENSATION": 8192, + "GAMUTCHECK": 4096, # Out of Gamut alarm + "SOFTPROOFING": 16384, # Do softproofing + "PRESERVEBLACK": 32768, # Black preservation + "NODEFAULTRESOURCEDEF": 16777216, # CRD special + "GRIDPOINTS": lambda n: ((n) & 0xFF) << 16, # Gridpoints +} + +_MAX_FLAG = 0 +for flag in FLAGS.values(): + if isinstance(flag, int): + _MAX_FLAG = _MAX_FLAG | flag + + +# --------------------------------------------------------------------. +# Experimental PIL-level API +# --------------------------------------------------------------------. + +## +# Profile. + + +class ImageCmsProfile: + def __init__(self, profile): + """ + :param profile: Either a string representing a filename, + a file like object containing a profile or a + low-level profile object + + """ + + if isinstance(profile, str): + if sys.platform == "win32": + profile_bytes_path = profile.encode() + try: + profile_bytes_path.decode("ascii") + except UnicodeDecodeError: + with open(profile, "rb") as f: + self._set(core.profile_frombytes(f.read())) + return + self._set(core.profile_open(profile), profile) + elif hasattr(profile, "read"): + self._set(core.profile_frombytes(profile.read())) + elif isinstance(profile, _imagingcms.CmsProfile): + self._set(profile) + else: + raise TypeError("Invalid type for Profile") + + def _set(self, profile, filename=None): + self.profile = profile + self.filename = filename + if profile: + self.product_name = None # profile.product_name + self.product_info = None # profile.product_info + else: + self.product_name = None + self.product_info = None + + def tobytes(self): + """ + Returns the profile in a format suitable for embedding in + saved images. + + :returns: a bytes object containing the ICC profile. + """ + + return core.profile_tobytes(self.profile) + + +class ImageCmsTransform(Image.ImagePointHandler): + + """ + Transform. This can be used with the procedural API, or with the standard + :py:func:`~PIL.Image.Image.point` method. + + Will return the output profile in the ``output.info['icc_profile']``. + """ + + def __init__( + self, + input, + output, + input_mode, + output_mode, + intent=INTENT_PERCEPTUAL, + proof=None, + proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, + flags=0, + ): + if proof is None: + self.transform = core.buildTransform( + input.profile, output.profile, input_mode, output_mode, intent, flags + ) + else: + self.transform = core.buildProofTransform( + input.profile, + output.profile, + proof.profile, + input_mode, + output_mode, + intent, + proof_intent, + flags, + ) + # Note: inputMode and outputMode are for pyCMS compatibility only + self.input_mode = self.inputMode = input_mode + self.output_mode = self.outputMode = output_mode + + self.output_profile = output + + def point(self, im): + return self.apply(im) + + def apply(self, im, imOut=None): + im.load() + if imOut is None: + imOut = Image.new(self.output_mode, im.size, None) + self.transform.apply(im.im.id, imOut.im.id) + imOut.info["icc_profile"] = self.output_profile.tobytes() + return imOut + + def apply_in_place(self, im): + im.load() + if im.mode != self.output_mode: + raise ValueError("mode mismatch") # wrong output mode + self.transform.apply(im.im.id, im.im.id) + im.info["icc_profile"] = self.output_profile.tobytes() + return im + + +def get_display_profile(handle=None): + """ + (experimental) Fetches the profile for the current display device. + + :returns: ``None`` if the profile is not known. + """ + + if sys.platform != "win32": + return None + + from PIL import ImageWin + + if isinstance(handle, ImageWin.HDC): + profile = core.get_display_profile_win32(handle, 1) + else: + profile = core.get_display_profile_win32(handle or 0) + if profile is None: + return None + return ImageCmsProfile(profile) + + +# --------------------------------------------------------------------. +# pyCMS compatible layer +# --------------------------------------------------------------------. + + +class PyCMSError(Exception): + + """(pyCMS) Exception class. + This is used for all errors in the pyCMS API.""" + + pass + + +def profileToProfile( + im, + inputProfile, + outputProfile, + renderingIntent=INTENT_PERCEPTUAL, + outputMode=None, + inPlace=False, + flags=0, +): + """ + (pyCMS) Applies an ICC transformation to a given image, mapping from + ``inputProfile`` to ``outputProfile``. + + If the input or output profiles specified are not valid filenames, a + :exc:`PyCMSError` will be raised. If ``inPlace`` is ``True`` and + ``outputMode != im.mode``, a :exc:`PyCMSError` will be raised. + If an error occurs during application of the profiles, + a :exc:`PyCMSError` will be raised. + If ``outputMode`` is not a mode supported by the ``outputProfile`` (or by pyCMS), + a :exc:`PyCMSError` will be raised. + + This function applies an ICC transformation to im from ``inputProfile``'s + color space to ``outputProfile``'s color space using the specified rendering + intent to decide how to handle out-of-gamut colors. + + ``outputMode`` can be used to specify that a color mode conversion is to + be done using these profiles, but the specified profiles must be able + to handle that mode. I.e., if converting im from RGB to CMYK using + profiles, the input profile must handle RGB data, and the output + profile must handle CMYK data. + + :param im: An open :py:class:`~PIL.Image.Image` object (i.e. Image.new(...) + or Image.open(...), etc.) + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this image, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + profile you wish to use for this image, or a profile object + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the transform + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param outputMode: A valid PIL mode for the output image (i.e. "RGB", + "CMYK", etc.). Note: if rendering the image "inPlace", outputMode + MUST be the same mode as the input, or omitted completely. If + omitted, the outputMode will be the same as the mode of the input + image (im.mode) + :param inPlace: Boolean. If ``True``, the original image is modified in-place, + and ``None`` is returned. If ``False`` (default), a new + :py:class:`~PIL.Image.Image` object is returned with the transform applied. + :param flags: Integer (0-...) specifying additional flags + :returns: Either None or a new :py:class:`~PIL.Image.Image` object, depending on + the value of ``inPlace`` + :exception PyCMSError: + """ + + if outputMode is None: + outputMode = im.mode + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + raise PyCMSError("renderingIntent must be an integer between 0 and 3") + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + transform = ImageCmsTransform( + inputProfile, + outputProfile, + im.mode, + outputMode, + renderingIntent, + flags=flags, + ) + if inPlace: + transform.apply_in_place(im) + imOut = None + else: + imOut = transform.apply(im) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + return imOut + + +def getOpenProfile(profileFilename): + """ + (pyCMS) Opens an ICC profile file. + + The PyCMSProfile object can be passed back into pyCMS for use in creating + transforms and such (as in ImageCms.buildTransformFromOpenProfiles()). + + If ``profileFilename`` is not a valid filename for an ICC profile, + a :exc:`PyCMSError` will be raised. + + :param profileFilename: String, as a valid filename path to the ICC profile + you wish to open, or a file-like object. + :returns: A CmsProfile class object. + :exception PyCMSError: + """ + + try: + return ImageCmsProfile(profileFilename) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def buildTransform( + inputProfile, + outputProfile, + inMode, + outMode, + renderingIntent=INTENT_PERCEPTUAL, + flags=0, +): + """ + (pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the + ``outputProfile``. Use applyTransform to apply the transform to a given + image. + + If the input or output profiles specified are not valid filenames, a + :exc:`PyCMSError` will be raised. If an error occurs during creation + of the transform, a :exc:`PyCMSError` will be raised. + + If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile`` + (or by pyCMS), a :exc:`PyCMSError` will be raised. + + This function builds and returns an ICC transform from the ``inputProfile`` + to the ``outputProfile`` using the ``renderingIntent`` to determine what to do + with out-of-gamut colors. It will ONLY work for converting images that + are in ``inMode`` to images that are in ``outMode`` color format (PIL mode, + i.e. "RGB", "RGBA", "CMYK", etc.). + + Building the transform is a fair part of the overhead in + ImageCms.profileToProfile(), so if you're planning on converting multiple + images using the same input/output settings, this can save you time. + Once you have a transform object, it can be used with + ImageCms.applyProfile() to convert images without the need to re-compute + the lookup table for the transform. + + The reason pyCMS returns a class object rather than a handle directly + to the transform is that it needs to keep track of the PIL input/output + modes that the transform is meant for. These attributes are stored in + the ``inMode`` and ``outMode`` attributes of the object (which can be + manually overridden if you really want to, but I don't know of any + time that would be of use, or would even work). + + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this transform, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + profile you wish to use for this transform, or a profile object + :param inMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param outMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the transform + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param flags: Integer (0-...) specifying additional flags + :returns: A CmsTransform class object. + :exception PyCMSError: + """ + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + raise PyCMSError("renderingIntent must be an integer between 0 and 3") + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + return ImageCmsTransform( + inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags + ) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def buildProofTransform( + inputProfile, + outputProfile, + proofProfile, + inMode, + outMode, + renderingIntent=INTENT_PERCEPTUAL, + proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC, + flags=FLAGS["SOFTPROOFING"], +): + """ + (pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the + ``outputProfile``, but tries to simulate the result that would be + obtained on the ``proofProfile`` device. + + If the input, output, or proof profiles specified are not valid + filenames, a :exc:`PyCMSError` will be raised. + + If an error occurs during creation of the transform, + a :exc:`PyCMSError` will be raised. + + If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile`` + (or by pyCMS), a :exc:`PyCMSError` will be raised. + + This function builds and returns an ICC transform from the ``inputProfile`` + to the ``outputProfile``, but tries to simulate the result that would be + obtained on the ``proofProfile`` device using ``renderingIntent`` and + ``proofRenderingIntent`` to determine what to do with out-of-gamut + colors. This is known as "soft-proofing". It will ONLY work for + converting images that are in ``inMode`` to images that are in outMode + color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.). + + Usage of the resulting transform object is exactly the same as with + ImageCms.buildTransform(). + + Proof profiling is generally used when using an output device to get a + good idea of what the final printed/displayed image would look like on + the ``proofProfile`` device when it's quicker and easier to use the + output device for judging color. Generally, this means that the + output device is a monitor, or a dye-sub printer (etc.), and the simulated + device is something more expensive, complicated, or time consuming + (making it difficult to make a real print for color judgement purposes). + + Soft-proofing basically functions by adjusting the colors on the + output device to match the colors of the device being simulated. However, + when the simulated device has a much wider gamut than the output + device, you may obtain marginal results. + + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this transform, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + (monitor, usually) profile you wish to use for this transform, or a + profile object + :param proofProfile: String, as a valid filename path to the ICC proof + profile you wish to use for this transform, or a profile object + :param inMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param outMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the input->proof (simulated) transform + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param proofRenderingIntent: Integer (0-3) specifying the rendering intent + you wish to use for proof->output transform + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param flags: Integer (0-...) specifying additional flags + :returns: A CmsTransform class object. + :exception PyCMSError: + """ + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + raise PyCMSError("renderingIntent must be an integer between 0 and 3") + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + if not isinstance(proofProfile, ImageCmsProfile): + proofProfile = ImageCmsProfile(proofProfile) + return ImageCmsTransform( + inputProfile, + outputProfile, + inMode, + outMode, + renderingIntent, + proofProfile, + proofRenderingIntent, + flags, + ) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +buildTransformFromOpenProfiles = buildTransform +buildProofTransformFromOpenProfiles = buildProofTransform + + +def applyTransform(im, transform, inPlace=False): + """ + (pyCMS) Applies a transform to a given image. + + If ``im.mode != transform.inMode``, a :exc:`PyCMSError` is raised. + + If ``inPlace`` is ``True`` and ``transform.inMode != transform.outMode``, a + :exc:`PyCMSError` is raised. + + If ``im.mode``, ``transform.inMode`` or ``transform.outMode`` is not + supported by pyCMSdll or the profiles you used for the transform, a + :exc:`PyCMSError` is raised. + + If an error occurs while the transform is being applied, + a :exc:`PyCMSError` is raised. + + This function applies a pre-calculated transform (from + ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles()) + to an image. The transform can be used for multiple images, saving + considerable calculation time if doing the same conversion multiple times. + + If you want to modify im in-place instead of receiving a new image as + the return value, set ``inPlace`` to ``True``. This can only be done if + ``transform.inMode`` and ``transform.outMode`` are the same, because we can't + change the mode in-place (the buffer sizes for some modes are + different). The default behavior is to return a new :py:class:`~PIL.Image.Image` + object of the same dimensions in mode ``transform.outMode``. + + :param im: An :py:class:`~PIL.Image.Image` object, and im.mode must be the same + as the ``inMode`` supported by the transform. + :param transform: A valid CmsTransform class object + :param inPlace: Bool. If ``True``, ``im`` is modified in place and ``None`` is + returned, if ``False``, a new :py:class:`~PIL.Image.Image` object with the + transform applied is returned (and ``im`` is not changed). The default is + ``False``. + :returns: Either ``None``, or a new :py:class:`~PIL.Image.Image` object, + depending on the value of ``inPlace``. The profile will be returned in + the image's ``info['icc_profile']``. + :exception PyCMSError: + """ + + try: + if inPlace: + transform.apply_in_place(im) + imOut = None + else: + imOut = transform.apply(im) + except (TypeError, ValueError) as v: + raise PyCMSError(v) from v + + return imOut + + +def createProfile(colorSpace, colorTemp=-1): + """ + (pyCMS) Creates a profile. + + If colorSpace not in ``["LAB", "XYZ", "sRGB"]``, + a :exc:`PyCMSError` is raised. + + If using LAB and ``colorTemp`` is not a positive integer, + a :exc:`PyCMSError` is raised. + + If an error occurs while creating the profile, + a :exc:`PyCMSError` is raised. + + Use this function to create common profiles on-the-fly instead of + having to supply a profile on disk and knowing the path to it. It + returns a normal CmsProfile object that can be passed to + ImageCms.buildTransformFromOpenProfiles() to create a transform to apply + to images. + + :param colorSpace: String, the color space of the profile you wish to + create. + Currently only "LAB", "XYZ", and "sRGB" are supported. + :param colorTemp: Positive integer for the white point for the profile, in + degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50 + illuminant if omitted (5000k). colorTemp is ONLY applied to LAB + profiles, and is ignored for XYZ and sRGB. + :returns: A CmsProfile class object + :exception PyCMSError: + """ + + if colorSpace not in ["LAB", "XYZ", "sRGB"]: + raise PyCMSError( + f"Color space not supported for on-the-fly profile creation ({colorSpace})" + ) + + if colorSpace == "LAB": + try: + colorTemp = float(colorTemp) + except (TypeError, ValueError) as e: + raise PyCMSError( + f'Color temperature must be numeric, "{colorTemp}" not valid' + ) from e + + try: + return core.createProfile(colorSpace, colorTemp) + except (TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileName(profile): + """ + + (pyCMS) Gets the internal product name for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, + a :exc:`PyCMSError` is raised If an error occurs while trying + to obtain the name tag, a :exc:`PyCMSError` is raised. + + Use this function to obtain the INTERNAL name of the profile (stored + in an ICC tag in the profile itself), usually the one used when the + profile was originally created. Sometimes this tag also contains + additional information supplied by the creator. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal name of the profile as stored + in an ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # do it in python, not c. + # // name was "%s - %s" (model, manufacturer) || Description , + # // but if the Model and Manufacturer were the same or the model + # // was long, Just the model, in 1.x + model = profile.profile.model + manufacturer = profile.profile.manufacturer + + if not (model or manufacturer): + return (profile.profile.profile_description or "") + "\n" + if not manufacturer or len(model) > 30: + return model + "\n" + return f"{model} - {manufacturer}\n" + + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileInfo(profile): + """ + (pyCMS) Gets the internal product information for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, + a :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the info tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + info tag. This often contains details about the profile, and how it + was created, as supplied by the creator. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # add an extra newline to preserve pyCMS compatibility + # Python, not C. the white point bits weren't working well, + # so skipping. + # info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint + description = profile.profile.profile_description + cpright = profile.profile.copyright + arr = [] + for elt in (description, cpright): + if elt: + arr.append(elt) + return "\r\n\r\n".join(arr) + "\r\n\r\n" + + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileCopyright(profile): + """ + (pyCMS) Gets the copyright for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the copyright tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + copyright tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.copyright or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileManufacturer(profile): + """ + (pyCMS) Gets the manufacturer for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the manufacturer tag, a + :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + manufacturer tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.manufacturer or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileModel(profile): + """ + (pyCMS) Gets the model for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the model tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + model tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.model or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileDescription(profile): + """ + (pyCMS) Gets the description for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the description tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + description tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in an + ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.profile_description or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getDefaultIntent(profile): + """ + (pyCMS) Gets the default intent name for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the default intent, a + :exc:`PyCMSError` is raised. + + Use this function to determine the default (and usually best optimized) + rendering intent for this profile. Most profiles support multiple + rendering intents, but are intended mostly for one type of conversion. + If you wish to use a different intent than returned, use + ImageCms.isIntentSupported() to verify it will work first. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: Integer 0-3 specifying the default rendering intent for this + profile. + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return profile.profile.rendering_intent + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def isIntentSupported(profile, intent, direction): + """ + (pyCMS) Checks if a given intent is supported. + + Use this function to verify that you can use your desired + ``intent`` with ``profile``, and that ``profile`` can be used for the + input/output/proof profile as you desire. + + Some profiles are created specifically for one "direction", can cannot + be used for others. Some profiles can only be used for certain + rendering intents, so it's best to either verify this before trying + to create a transform with them (using this function), or catch the + potential :exc:`PyCMSError` that will occur if they don't + support the modes you select. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :param intent: Integer (0-3) specifying the rendering intent you wish to + use with this profile + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param direction: Integer specifying if the profile is to be used for + input, output, or proof + + INPUT = 0 (or use ImageCms.DIRECTION_INPUT) + OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT) + PROOF = 2 (or use ImageCms.DIRECTION_PROOF) + + :returns: 1 if the intent/direction are supported, -1 if they are not. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # FIXME: I get different results for the same data w. different + # compilers. Bug in LittleCMS or in the binding? + if profile.profile.is_intent_supported(intent, direction): + return 1 + else: + return -1 + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def versions(): + """ + (pyCMS) Fetches versions. + """ + + return (VERSION, core.littlecms_version, sys.version.split()[0], Image.__version__) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageColor.py b/.venv/lib/python3.9/site-packages/PIL/ImageColor.py new file mode 100644 index 00000000..25f92f2c --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageColor.py @@ -0,0 +1,302 @@ +# +# The Python Imaging Library +# $Id$ +# +# map CSS3-style colour description strings to RGB +# +# History: +# 2002-10-24 fl Added support for CSS-style color strings +# 2002-12-15 fl Added RGBA support +# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2 +# 2004-07-19 fl Fixed gray/grey spelling issues +# 2009-03-05 fl Fixed rounding error in grayscale calculation +# +# Copyright (c) 2002-2004 by Secret Labs AB +# Copyright (c) 2002-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import re + +from . import Image + + +def getrgb(color): + """ + Convert a color string to an RGB or RGBA tuple. If the string cannot be + parsed, this function raises a :py:exc:`ValueError` exception. + + .. versionadded:: 1.1.4 + + :param color: A color string + :return: ``(red, green, blue[, alpha])`` + """ + if len(color) > 100: + raise ValueError("color specifier is too long") + color = color.lower() + + rgb = colormap.get(color, None) + if rgb: + if isinstance(rgb, tuple): + return rgb + colormap[color] = rgb = getrgb(rgb) + return rgb + + # check for known string formats + if re.match("#[a-f0-9]{3}$", color): + return (int(color[1] * 2, 16), int(color[2] * 2, 16), int(color[3] * 2, 16)) + + if re.match("#[a-f0-9]{4}$", color): + return ( + int(color[1] * 2, 16), + int(color[2] * 2, 16), + int(color[3] * 2, 16), + int(color[4] * 2, 16), + ) + + if re.match("#[a-f0-9]{6}$", color): + return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16)) + + if re.match("#[a-f0-9]{8}$", color): + return ( + int(color[1:3], 16), + int(color[3:5], 16), + int(color[5:7], 16), + int(color[7:9], 16), + ) + + m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color) + if m: + return (int(m.group(1)), int(m.group(2)), int(m.group(3))) + + m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color) + if m: + return ( + int((int(m.group(1)) * 255) / 100.0 + 0.5), + int((int(m.group(2)) * 255) / 100.0 + 0.5), + int((int(m.group(3)) * 255) / 100.0 + 0.5), + ) + + m = re.match( + r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color + ) + if m: + from colorsys import hls_to_rgb + + rgb = hls_to_rgb( + float(m.group(1)) / 360.0, + float(m.group(3)) / 100.0, + float(m.group(2)) / 100.0, + ) + return ( + int(rgb[0] * 255 + 0.5), + int(rgb[1] * 255 + 0.5), + int(rgb[2] * 255 + 0.5), + ) + + m = re.match( + r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color + ) + if m: + from colorsys import hsv_to_rgb + + rgb = hsv_to_rgb( + float(m.group(1)) / 360.0, + float(m.group(2)) / 100.0, + float(m.group(3)) / 100.0, + ) + return ( + int(rgb[0] * 255 + 0.5), + int(rgb[1] * 255 + 0.5), + int(rgb[2] * 255 + 0.5), + ) + + m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color) + if m: + return (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))) + raise ValueError(f"unknown color specifier: {repr(color)}") + + +def getcolor(color, mode): + """ + Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a + greyscale value if the mode is not color or a palette image. If the string + cannot be parsed, this function raises a :py:exc:`ValueError` exception. + + .. versionadded:: 1.1.4 + + :param color: A color string + :return: ``(graylevel [, alpha]) or (red, green, blue[, alpha])`` + """ + # same as getrgb, but converts the result to the given mode + color, alpha = getrgb(color), 255 + if len(color) == 4: + color, alpha = color[0:3], color[3] + + if Image.getmodebase(mode) == "L": + r, g, b = color + # ITU-R Recommendation 601-2 for nonlinear RGB + # scaled to 24 bits to match the convert's implementation. + color = (r * 19595 + g * 38470 + b * 7471 + 0x8000) >> 16 + if mode[-1] == "A": + return (color, alpha) + else: + if mode[-1] == "A": + return color + (alpha,) + return color + + +colormap = { + # X11 colour table from https://drafts.csswg.org/css-color-4/, with + # gray/grey spelling issues fixed. This is a superset of HTML 4.0 + # colour names used in CSS 1. + "aliceblue": "#f0f8ff", + "antiquewhite": "#faebd7", + "aqua": "#00ffff", + "aquamarine": "#7fffd4", + "azure": "#f0ffff", + "beige": "#f5f5dc", + "bisque": "#ffe4c4", + "black": "#000000", + "blanchedalmond": "#ffebcd", + "blue": "#0000ff", + "blueviolet": "#8a2be2", + "brown": "#a52a2a", + "burlywood": "#deb887", + "cadetblue": "#5f9ea0", + "chartreuse": "#7fff00", + "chocolate": "#d2691e", + "coral": "#ff7f50", + "cornflowerblue": "#6495ed", + "cornsilk": "#fff8dc", + "crimson": "#dc143c", + "cyan": "#00ffff", + "darkblue": "#00008b", + "darkcyan": "#008b8b", + "darkgoldenrod": "#b8860b", + "darkgray": "#a9a9a9", + "darkgrey": "#a9a9a9", + "darkgreen": "#006400", + "darkkhaki": "#bdb76b", + "darkmagenta": "#8b008b", + "darkolivegreen": "#556b2f", + "darkorange": "#ff8c00", + "darkorchid": "#9932cc", + "darkred": "#8b0000", + "darksalmon": "#e9967a", + "darkseagreen": "#8fbc8f", + "darkslateblue": "#483d8b", + "darkslategray": "#2f4f4f", + "darkslategrey": "#2f4f4f", + "darkturquoise": "#00ced1", + "darkviolet": "#9400d3", + "deeppink": "#ff1493", + "deepskyblue": "#00bfff", + "dimgray": "#696969", + "dimgrey": "#696969", + "dodgerblue": "#1e90ff", + "firebrick": "#b22222", + "floralwhite": "#fffaf0", + "forestgreen": "#228b22", + "fuchsia": "#ff00ff", + "gainsboro": "#dcdcdc", + "ghostwhite": "#f8f8ff", + "gold": "#ffd700", + "goldenrod": "#daa520", + "gray": "#808080", + "grey": "#808080", + "green": "#008000", + "greenyellow": "#adff2f", + "honeydew": "#f0fff0", + "hotpink": "#ff69b4", + "indianred": "#cd5c5c", + "indigo": "#4b0082", + "ivory": "#fffff0", + "khaki": "#f0e68c", + "lavender": "#e6e6fa", + "lavenderblush": "#fff0f5", + "lawngreen": "#7cfc00", + "lemonchiffon": "#fffacd", + "lightblue": "#add8e6", + "lightcoral": "#f08080", + "lightcyan": "#e0ffff", + "lightgoldenrodyellow": "#fafad2", + "lightgreen": "#90ee90", + "lightgray": "#d3d3d3", + "lightgrey": "#d3d3d3", + "lightpink": "#ffb6c1", + "lightsalmon": "#ffa07a", + "lightseagreen": "#20b2aa", + "lightskyblue": "#87cefa", + "lightslategray": "#778899", + "lightslategrey": "#778899", + "lightsteelblue": "#b0c4de", + "lightyellow": "#ffffe0", + "lime": "#00ff00", + "limegreen": "#32cd32", + "linen": "#faf0e6", + "magenta": "#ff00ff", + "maroon": "#800000", + "mediumaquamarine": "#66cdaa", + "mediumblue": "#0000cd", + "mediumorchid": "#ba55d3", + "mediumpurple": "#9370db", + "mediumseagreen": "#3cb371", + "mediumslateblue": "#7b68ee", + "mediumspringgreen": "#00fa9a", + "mediumturquoise": "#48d1cc", + "mediumvioletred": "#c71585", + "midnightblue": "#191970", + "mintcream": "#f5fffa", + "mistyrose": "#ffe4e1", + "moccasin": "#ffe4b5", + "navajowhite": "#ffdead", + "navy": "#000080", + "oldlace": "#fdf5e6", + "olive": "#808000", + "olivedrab": "#6b8e23", + "orange": "#ffa500", + "orangered": "#ff4500", + "orchid": "#da70d6", + "palegoldenrod": "#eee8aa", + "palegreen": "#98fb98", + "paleturquoise": "#afeeee", + "palevioletred": "#db7093", + "papayawhip": "#ffefd5", + "peachpuff": "#ffdab9", + "peru": "#cd853f", + "pink": "#ffc0cb", + "plum": "#dda0dd", + "powderblue": "#b0e0e6", + "purple": "#800080", + "rebeccapurple": "#663399", + "red": "#ff0000", + "rosybrown": "#bc8f8f", + "royalblue": "#4169e1", + "saddlebrown": "#8b4513", + "salmon": "#fa8072", + "sandybrown": "#f4a460", + "seagreen": "#2e8b57", + "seashell": "#fff5ee", + "sienna": "#a0522d", + "silver": "#c0c0c0", + "skyblue": "#87ceeb", + "slateblue": "#6a5acd", + "slategray": "#708090", + "slategrey": "#708090", + "snow": "#fffafa", + "springgreen": "#00ff7f", + "steelblue": "#4682b4", + "tan": "#d2b48c", + "teal": "#008080", + "thistle": "#d8bfd8", + "tomato": "#ff6347", + "turquoise": "#40e0d0", + "violet": "#ee82ee", + "wheat": "#f5deb3", + "white": "#ffffff", + "whitesmoke": "#f5f5f5", + "yellow": "#ffff00", + "yellowgreen": "#9acd32", +} diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageDraw.py b/.venv/lib/python3.9/site-packages/PIL/ImageDraw.py new file mode 100644 index 00000000..aea0cc68 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageDraw.py @@ -0,0 +1,984 @@ +# +# The Python Imaging Library +# $Id$ +# +# drawing interface operations +# +# History: +# 1996-04-13 fl Created (experimental) +# 1996-08-07 fl Filled polygons, ellipses. +# 1996-08-13 fl Added text support +# 1998-06-28 fl Handle I and F images +# 1998-12-29 fl Added arc; use arc primitive to draw ellipses +# 1999-01-10 fl Added shape stuff (experimental) +# 1999-02-06 fl Added bitmap support +# 1999-02-11 fl Changed all primitives to take options +# 1999-02-20 fl Fixed backwards compatibility +# 2000-10-12 fl Copy on write, when necessary +# 2001-02-18 fl Use default ink for bitmap/text also in fill mode +# 2002-10-24 fl Added support for CSS-style color strings +# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing +# 2002-12-11 fl Refactored low-level drawing API (work in progress) +# 2004-08-26 fl Made Draw() a factory function, added getdraw() support +# 2004-09-04 fl Added width support to line primitive +# 2004-09-10 fl Added font mode handling +# 2006-06-19 fl Added font bearing support (getmask2) +# +# Copyright (c) 1997-2006 by Secret Labs AB +# Copyright (c) 1996-2006 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import math +import numbers + +from . import Image, ImageColor, ImageFont + +""" +A simple 2D drawing interface for PIL images. +

+Application code should use the Draw factory, instead of +directly. +""" + + +class ImageDraw: + def __init__(self, im, mode=None): + """ + Create a drawing instance. + + :param im: The image to draw in. + :param mode: Optional mode to use for color values. For RGB + images, this argument can be RGB or RGBA (to blend the + drawing into the image). For all other modes, this argument + must be the same as the image mode. If omitted, the mode + defaults to the mode of the image. + """ + im.load() + if im.readonly: + im._copy() # make it writeable + blend = 0 + if mode is None: + mode = im.mode + if mode != im.mode: + if mode == "RGBA" and im.mode == "RGB": + blend = 1 + else: + raise ValueError("mode mismatch") + if mode == "P": + self.palette = im.palette + else: + self.palette = None + self._image = im + self.im = im.im + self.draw = Image.core.draw(self.im, blend) + self.mode = mode + if mode in ("I", "F"): + self.ink = self.draw.draw_ink(1) + else: + self.ink = self.draw.draw_ink(-1) + if mode in ("1", "P", "I", "F"): + # FIXME: fix Fill2 to properly support matte for I+F images + self.fontmode = "1" + else: + self.fontmode = "L" # aliasing is okay for other modes + self.fill = 0 + self.font = None + + def getfont(self): + """ + Get the current default font. + + :returns: An image font.""" + if not self.font: + # FIXME: should add a font repository + from . import ImageFont + + self.font = ImageFont.load_default() + return self.font + + def _getink(self, ink, fill=None): + if ink is None and fill is None: + if self.fill: + fill = self.ink + else: + ink = self.ink + else: + if ink is not None: + if isinstance(ink, str): + ink = ImageColor.getcolor(ink, self.mode) + if self.palette and not isinstance(ink, numbers.Number): + ink = self.palette.getcolor(ink, self._image) + ink = self.draw.draw_ink(ink) + if fill is not None: + if isinstance(fill, str): + fill = ImageColor.getcolor(fill, self.mode) + if self.palette and not isinstance(fill, numbers.Number): + fill = self.palette.getcolor(fill, self._image) + fill = self.draw.draw_ink(fill) + return ink, fill + + def arc(self, xy, start, end, fill=None, width=1): + """Draw an arc.""" + ink, fill = self._getink(fill) + if ink is not None: + self.draw.draw_arc(xy, start, end, ink, width) + + def bitmap(self, xy, bitmap, fill=None): + """Draw a bitmap.""" + bitmap.load() + ink, fill = self._getink(fill) + if ink is None: + ink = fill + if ink is not None: + self.draw.draw_bitmap(xy, bitmap.im, ink) + + def chord(self, xy, start, end, fill=None, outline=None, width=1): + """Draw a chord.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_chord(xy, start, end, fill, 1) + if ink is not None and ink != fill and width != 0: + self.draw.draw_chord(xy, start, end, ink, 0, width) + + def ellipse(self, xy, fill=None, outline=None, width=1): + """Draw an ellipse.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_ellipse(xy, fill, 1) + if ink is not None and ink != fill and width != 0: + self.draw.draw_ellipse(xy, ink, 0, width) + + def line(self, xy, fill=None, width=0, joint=None): + """Draw a line, or a connected sequence of line segments.""" + ink = self._getink(fill)[0] + if ink is not None: + self.draw.draw_lines(xy, ink, width) + if joint == "curve" and width > 4: + if not isinstance(xy[0], (list, tuple)): + xy = [tuple(xy[i : i + 2]) for i in range(0, len(xy), 2)] + for i in range(1, len(xy) - 1): + point = xy[i] + angles = [ + math.degrees(math.atan2(end[0] - start[0], start[1] - end[1])) + % 360 + for start, end in ((xy[i - 1], point), (point, xy[i + 1])) + ] + if angles[0] == angles[1]: + # This is a straight line, so no joint is required + continue + + def coord_at_angle(coord, angle): + x, y = coord + angle -= 90 + distance = width / 2 - 1 + return tuple( + [ + p + (math.floor(p_d) if p_d > 0 else math.ceil(p_d)) + for p, p_d in ( + (x, distance * math.cos(math.radians(angle))), + (y, distance * math.sin(math.radians(angle))), + ) + ] + ) + + flipped = ( + angles[1] > angles[0] and angles[1] - 180 > angles[0] + ) or (angles[1] < angles[0] and angles[1] + 180 > angles[0]) + coords = [ + (point[0] - width / 2 + 1, point[1] - width / 2 + 1), + (point[0] + width / 2 - 1, point[1] + width / 2 - 1), + ] + if flipped: + start, end = (angles[1] + 90, angles[0] + 90) + else: + start, end = (angles[0] - 90, angles[1] - 90) + self.pieslice(coords, start - 90, end - 90, fill) + + if width > 8: + # Cover potential gaps between the line and the joint + if flipped: + gapCoords = [ + coord_at_angle(point, angles[0] + 90), + point, + coord_at_angle(point, angles[1] + 90), + ] + else: + gapCoords = [ + coord_at_angle(point, angles[0] - 90), + point, + coord_at_angle(point, angles[1] - 90), + ] + self.line(gapCoords, fill, width=3) + + def shape(self, shape, fill=None, outline=None): + """(Experimental) Draw a shape.""" + shape.close() + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_outline(shape, fill, 1) + if ink is not None and ink != fill: + self.draw.draw_outline(shape, ink, 0) + + def pieslice(self, xy, start, end, fill=None, outline=None, width=1): + """Draw a pieslice.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_pieslice(xy, start, end, fill, 1) + if ink is not None and ink != fill and width != 0: + self.draw.draw_pieslice(xy, start, end, ink, 0, width) + + def point(self, xy, fill=None): + """Draw one or more individual pixels.""" + ink, fill = self._getink(fill) + if ink is not None: + self.draw.draw_points(xy, ink) + + def polygon(self, xy, fill=None, outline=None): + """Draw a polygon.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_polygon(xy, fill, 1) + if ink is not None and ink != fill: + self.draw.draw_polygon(xy, ink, 0) + + def regular_polygon( + self, bounding_circle, n_sides, rotation=0, fill=None, outline=None + ): + """Draw a regular polygon.""" + xy = _compute_regular_polygon_vertices(bounding_circle, n_sides, rotation) + self.polygon(xy, fill, outline) + + def rectangle(self, xy, fill=None, outline=None, width=1): + """Draw a rectangle.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_rectangle(xy, fill, 1) + if ink is not None and ink != fill and width != 0: + self.draw.draw_rectangle(xy, ink, 0, width) + + def rounded_rectangle(self, xy, radius=0, fill=None, outline=None, width=1): + """Draw a rounded rectangle.""" + if isinstance(xy[0], (list, tuple)): + (x0, y0), (x1, y1) = xy + else: + x0, y0, x1, y1 = xy + + d = radius * 2 + + full_x = d >= x1 - x0 + if full_x: + # The two left and two right corners are joined + d = x1 - x0 + full_y = d >= y1 - y0 + if full_y: + # The two top and two bottom corners are joined + d = y1 - y0 + if full_x and full_y: + # If all corners are joined, that is a circle + return self.ellipse(xy, fill, outline, width) + + if d == 0: + # If the corners have no curve, that is a rectangle + return self.rectangle(xy, fill, outline, width) + + r = d // 2 + ink, fill = self._getink(outline, fill) + + def draw_corners(pieslice): + if full_x: + # Draw top and bottom halves + parts = ( + ((x0, y0, x0 + d, y0 + d), 180, 360), + ((x0, y1 - d, x0 + d, y1), 0, 180), + ) + elif full_y: + # Draw left and right halves + parts = ( + ((x0, y0, x0 + d, y0 + d), 90, 270), + ((x1 - d, y0, x1, y0 + d), 270, 90), + ) + else: + # Draw four separate corners + parts = ( + ((x1 - d, y0, x1, y0 + d), 270, 360), + ((x1 - d, y1 - d, x1, y1), 0, 90), + ((x0, y1 - d, x0 + d, y1), 90, 180), + ((x0, y0, x0 + d, y0 + d), 180, 270), + ) + for part in parts: + if pieslice: + self.draw.draw_pieslice(*(part + (fill, 1))) + else: + self.draw.draw_arc(*(part + (ink, width))) + + if fill is not None: + draw_corners(True) + + if full_x: + self.draw.draw_rectangle((x0, y0 + r + 1, x1, y1 - r - 1), fill, 1) + else: + self.draw.draw_rectangle((x0 + r + 1, y0, x1 - r - 1, y1), fill, 1) + if not full_x and not full_y: + self.draw.draw_rectangle((x0, y0 + r + 1, x0 + r, y1 - r - 1), fill, 1) + self.draw.draw_rectangle((x1 - r, y0 + r + 1, x1, y1 - r - 1), fill, 1) + if ink is not None and ink != fill and width != 0: + draw_corners(False) + + if not full_x: + self.draw.draw_rectangle( + (x0 + r + 1, y0, x1 - r - 1, y0 + width - 1), ink, 1 + ) + self.draw.draw_rectangle( + (x0 + r + 1, y1 - width + 1, x1 - r - 1, y1), ink, 1 + ) + if not full_y: + self.draw.draw_rectangle( + (x0, y0 + r + 1, x0 + width - 1, y1 - r - 1), ink, 1 + ) + self.draw.draw_rectangle( + (x1 - width + 1, y0 + r + 1, x1, y1 - r - 1), ink, 1 + ) + + def _multiline_check(self, text): + """Draw text.""" + split_character = "\n" if isinstance(text, str) else b"\n" + + return split_character in text + + def _multiline_split(self, text): + split_character = "\n" if isinstance(text, str) else b"\n" + + return text.split(split_character) + + def text( + self, + xy, + text, + fill=None, + font=None, + anchor=None, + spacing=4, + align="left", + direction=None, + features=None, + language=None, + stroke_width=0, + stroke_fill=None, + embedded_color=False, + *args, + **kwargs, + ): + if self._multiline_check(text): + return self.multiline_text( + xy, + text, + fill, + font, + anchor, + spacing, + align, + direction, + features, + language, + stroke_width, + stroke_fill, + embedded_color, + ) + + if embedded_color and self.mode not in ("RGB", "RGBA"): + raise ValueError("Embedded color supported only in RGB and RGBA modes") + + if font is None: + font = self.getfont() + + def getink(fill): + ink, fill = self._getink(fill) + if ink is None: + return fill + return ink + + def draw_text(ink, stroke_width=0, stroke_offset=None): + mode = self.fontmode + if stroke_width == 0 and embedded_color: + mode = "RGBA" + coord = xy + try: + mask, offset = font.getmask2( + text, + mode, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + anchor=anchor, + ink=ink, + *args, + **kwargs, + ) + coord = coord[0] + offset[0], coord[1] + offset[1] + except AttributeError: + try: + mask = font.getmask( + text, + mode, + direction, + features, + language, + stroke_width, + anchor, + ink, + *args, + **kwargs, + ) + except TypeError: + mask = font.getmask(text) + if stroke_offset: + coord = coord[0] + stroke_offset[0], coord[1] + stroke_offset[1] + if mode == "RGBA": + # font.getmask2(mode="RGBA") returns color in RGB bands and mask in A + # extract mask and set text alpha + color, mask = mask, mask.getband(3) + color.fillband(3, (ink >> 24) & 0xFF) + coord2 = coord[0] + mask.size[0], coord[1] + mask.size[1] + self.im.paste(color, coord + coord2, mask) + else: + self.draw.draw_bitmap(coord, mask, ink) + + ink = getink(fill) + if ink is not None: + stroke_ink = None + if stroke_width: + stroke_ink = getink(stroke_fill) if stroke_fill is not None else ink + + if stroke_ink is not None: + # Draw stroked text + draw_text(stroke_ink, stroke_width) + + # Draw normal text + draw_text(ink, 0) + else: + # Only draw normal text + draw_text(ink) + + def multiline_text( + self, + xy, + text, + fill=None, + font=None, + anchor=None, + spacing=4, + align="left", + direction=None, + features=None, + language=None, + stroke_width=0, + stroke_fill=None, + embedded_color=False, + ): + if direction == "ttb": + raise ValueError("ttb direction is unsupported for multiline text") + + if anchor is None: + anchor = "la" + elif len(anchor) != 2: + raise ValueError("anchor must be a 2 character string") + elif anchor[1] in "tb": + raise ValueError("anchor not supported for multiline text") + + widths = [] + max_width = 0 + lines = self._multiline_split(text) + line_spacing = ( + self.textsize("A", font=font, stroke_width=stroke_width)[1] + spacing + ) + for line in lines: + line_width = self.textlength( + line, font, direction=direction, features=features, language=language + ) + widths.append(line_width) + max_width = max(max_width, line_width) + + top = xy[1] + if anchor[1] == "m": + top -= (len(lines) - 1) * line_spacing / 2.0 + elif anchor[1] == "d": + top -= (len(lines) - 1) * line_spacing + + for idx, line in enumerate(lines): + left = xy[0] + width_difference = max_width - widths[idx] + + # first align left by anchor + if anchor[0] == "m": + left -= width_difference / 2.0 + elif anchor[0] == "r": + left -= width_difference + + # then align by align parameter + if align == "left": + pass + elif align == "center": + left += width_difference / 2.0 + elif align == "right": + left += width_difference + else: + raise ValueError('align must be "left", "center" or "right"') + + self.text( + (left, top), + line, + fill, + font, + anchor, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + stroke_fill=stroke_fill, + embedded_color=embedded_color, + ) + top += line_spacing + + def textsize( + self, + text, + font=None, + spacing=4, + direction=None, + features=None, + language=None, + stroke_width=0, + ): + """Get the size of a given string, in pixels.""" + if self._multiline_check(text): + return self.multiline_textsize( + text, font, spacing, direction, features, language, stroke_width + ) + + if font is None: + font = self.getfont() + return font.getsize(text, direction, features, language, stroke_width) + + def multiline_textsize( + self, + text, + font=None, + spacing=4, + direction=None, + features=None, + language=None, + stroke_width=0, + ): + max_width = 0 + lines = self._multiline_split(text) + line_spacing = ( + self.textsize("A", font=font, stroke_width=stroke_width)[1] + spacing + ) + for line in lines: + line_width, line_height = self.textsize( + line, font, spacing, direction, features, language, stroke_width + ) + max_width = max(max_width, line_width) + return max_width, len(lines) * line_spacing - spacing + + def textlength( + self, + text, + font=None, + direction=None, + features=None, + language=None, + embedded_color=False, + ): + """Get the length of a given string, in pixels with 1/64 precision.""" + if self._multiline_check(text): + raise ValueError("can't measure length of multiline text") + if embedded_color and self.mode not in ("RGB", "RGBA"): + raise ValueError("Embedded color supported only in RGB and RGBA modes") + + if font is None: + font = self.getfont() + mode = "RGBA" if embedded_color else self.fontmode + try: + return font.getlength(text, mode, direction, features, language) + except AttributeError: + size = self.textsize( + text, font, direction=direction, features=features, language=language + ) + if direction == "ttb": + return size[1] + return size[0] + + def textbbox( + self, + xy, + text, + font=None, + anchor=None, + spacing=4, + align="left", + direction=None, + features=None, + language=None, + stroke_width=0, + embedded_color=False, + ): + """Get the bounding box of a given string, in pixels.""" + if embedded_color and self.mode not in ("RGB", "RGBA"): + raise ValueError("Embedded color supported only in RGB and RGBA modes") + + if self._multiline_check(text): + return self.multiline_textbbox( + xy, + text, + font, + anchor, + spacing, + align, + direction, + features, + language, + stroke_width, + embedded_color, + ) + + if font is None: + font = self.getfont() + if not isinstance(font, ImageFont.FreeTypeFont): + raise ValueError("Only supported for TrueType fonts") + mode = "RGBA" if embedded_color else self.fontmode + bbox = font.getbbox( + text, mode, direction, features, language, stroke_width, anchor + ) + return bbox[0] + xy[0], bbox[1] + xy[1], bbox[2] + xy[0], bbox[3] + xy[1] + + def multiline_textbbox( + self, + xy, + text, + font=None, + anchor=None, + spacing=4, + align="left", + direction=None, + features=None, + language=None, + stroke_width=0, + embedded_color=False, + ): + if direction == "ttb": + raise ValueError("ttb direction is unsupported for multiline text") + + if anchor is None: + anchor = "la" + elif len(anchor) != 2: + raise ValueError("anchor must be a 2 character string") + elif anchor[1] in "tb": + raise ValueError("anchor not supported for multiline text") + + widths = [] + max_width = 0 + lines = self._multiline_split(text) + line_spacing = ( + self.textsize("A", font=font, stroke_width=stroke_width)[1] + spacing + ) + for line in lines: + line_width = self.textlength( + line, + font, + direction=direction, + features=features, + language=language, + embedded_color=embedded_color, + ) + widths.append(line_width) + max_width = max(max_width, line_width) + + top = xy[1] + if anchor[1] == "m": + top -= (len(lines) - 1) * line_spacing / 2.0 + elif anchor[1] == "d": + top -= (len(lines) - 1) * line_spacing + + bbox = None + + for idx, line in enumerate(lines): + left = xy[0] + width_difference = max_width - widths[idx] + + # first align left by anchor + if anchor[0] == "m": + left -= width_difference / 2.0 + elif anchor[0] == "r": + left -= width_difference + + # then align by align parameter + if align == "left": + pass + elif align == "center": + left += width_difference / 2.0 + elif align == "right": + left += width_difference + else: + raise ValueError('align must be "left", "center" or "right"') + + bbox_line = self.textbbox( + (left, top), + line, + font, + anchor, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + embedded_color=embedded_color, + ) + if bbox is None: + bbox = bbox_line + else: + bbox = ( + min(bbox[0], bbox_line[0]), + min(bbox[1], bbox_line[1]), + max(bbox[2], bbox_line[2]), + max(bbox[3], bbox_line[3]), + ) + + top += line_spacing + + if bbox is None: + return xy[0], xy[1], xy[0], xy[1] + return bbox + + +def Draw(im, mode=None): + """ + A simple 2D drawing interface for PIL images. + + :param im: The image to draw in. + :param mode: Optional mode to use for color values. For RGB + images, this argument can be RGB or RGBA (to blend the + drawing into the image). For all other modes, this argument + must be the same as the image mode. If omitted, the mode + defaults to the mode of the image. + """ + try: + return im.getdraw(mode) + except AttributeError: + return ImageDraw(im, mode) + + +# experimental access to the outline API +try: + Outline = Image.core.outline +except AttributeError: + Outline = None + + +def getdraw(im=None, hints=None): + """ + (Experimental) A more advanced 2D drawing interface for PIL images, + based on the WCK interface. + + :param im: The image to draw in. + :param hints: An optional list of hints. + :returns: A (drawing context, drawing resource factory) tuple. + """ + # FIXME: this needs more work! + # FIXME: come up with a better 'hints' scheme. + handler = None + if not hints or "nicest" in hints: + try: + from . import _imagingagg as handler + except ImportError: + pass + if handler is None: + from . import ImageDraw2 as handler + if im: + im = handler.Draw(im) + return im, handler + + +def floodfill(image, xy, value, border=None, thresh=0): + """ + (experimental) Fills a bounded region with a given color. + + :param image: Target image. + :param xy: Seed position (a 2-item coordinate tuple). See + :ref:`coordinate-system`. + :param value: Fill color. + :param border: Optional border value. If given, the region consists of + pixels with a color different from the border color. If not given, + the region consists of pixels having the same color as the seed + pixel. + :param thresh: Optional threshold value which specifies a maximum + tolerable difference of a pixel value from the 'background' in + order for it to be replaced. Useful for filling regions of + non-homogeneous, but similar, colors. + """ + # based on an implementation by Eric S. Raymond + # amended by yo1995 @20180806 + pixel = image.load() + x, y = xy + try: + background = pixel[x, y] + if _color_diff(value, background) <= thresh: + return # seed point already has fill color + pixel[x, y] = value + except (ValueError, IndexError): + return # seed point outside image + edge = {(x, y)} + # use a set to keep record of current and previous edge pixels + # to reduce memory consumption + full_edge = set() + while edge: + new_edge = set() + for (x, y) in edge: # 4 adjacent method + for (s, t) in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)): + # If already processed, or if a coordinate is negative, skip + if (s, t) in full_edge or s < 0 or t < 0: + continue + try: + p = pixel[s, t] + except (ValueError, IndexError): + pass + else: + full_edge.add((s, t)) + if border is None: + fill = _color_diff(p, background) <= thresh + else: + fill = p != value and p != border + if fill: + pixel[s, t] = value + new_edge.add((s, t)) + full_edge = edge # discard pixels processed + edge = new_edge + + +def _compute_regular_polygon_vertices(bounding_circle, n_sides, rotation): + """ + Generate a list of vertices for a 2D regular polygon. + + :param bounding_circle: The bounding circle is a tuple defined + by a point and radius. The polygon is inscribed in this circle. + (e.g. ``bounding_circle=(x, y, r)`` or ``((x, y), r)``) + :param n_sides: Number of sides + (e.g. ``n_sides=3`` for a triangle, ``6`` for a hexagon) + :param rotation: Apply an arbitrary rotation to the polygon + (e.g. ``rotation=90``, applies a 90 degree rotation) + :return: List of regular polygon vertices + (e.g. ``[(25, 50), (50, 50), (50, 25), (25, 25)]``) + + How are the vertices computed? + 1. Compute the following variables + - theta: Angle between the apothem & the nearest polygon vertex + - side_length: Length of each polygon edge + - centroid: Center of bounding circle (1st, 2nd elements of bounding_circle) + - polygon_radius: Polygon radius (last element of bounding_circle) + - angles: Location of each polygon vertex in polar grid + (e.g. A square with 0 degree rotation => [225.0, 315.0, 45.0, 135.0]) + + 2. For each angle in angles, get the polygon vertex at that angle + The vertex is computed using the equation below. + X= xcos(φ) + ysin(φ) + Y= −xsin(φ) + ycos(φ) + + Note: + φ = angle in degrees + x = 0 + y = polygon_radius + + The formula above assumes rotation around the origin. + In our case, we are rotating around the centroid. + To account for this, we use the formula below + X = xcos(φ) + ysin(φ) + centroid_x + Y = −xsin(φ) + ycos(φ) + centroid_y + """ + # 1. Error Handling + # 1.1 Check `n_sides` has an appropriate value + if not isinstance(n_sides, int): + raise TypeError("n_sides should be an int") + if n_sides < 3: + raise ValueError("n_sides should be an int > 2") + + # 1.2 Check `bounding_circle` has an appropriate value + if not isinstance(bounding_circle, (list, tuple)): + raise TypeError("bounding_circle should be a tuple") + + if len(bounding_circle) == 3: + *centroid, polygon_radius = bounding_circle + elif len(bounding_circle) == 2: + centroid, polygon_radius = bounding_circle + else: + raise ValueError( + "bounding_circle should contain 2D coordinates " + "and a radius (e.g. (x, y, r) or ((x, y), r) )" + ) + + if not all(isinstance(i, (int, float)) for i in (*centroid, polygon_radius)): + raise ValueError("bounding_circle should only contain numeric data") + + if not len(centroid) == 2: + raise ValueError( + "bounding_circle centre should contain 2D coordinates (e.g. (x, y))" + ) + + if polygon_radius <= 0: + raise ValueError("bounding_circle radius should be > 0") + + # 1.3 Check `rotation` has an appropriate value + if not isinstance(rotation, (int, float)): + raise ValueError("rotation should be an int or float") + + # 2. Define Helper Functions + def _apply_rotation(point, degrees, centroid): + return ( + round( + point[0] * math.cos(math.radians(360 - degrees)) + - point[1] * math.sin(math.radians(360 - degrees)) + + centroid[0], + 2, + ), + round( + point[1] * math.cos(math.radians(360 - degrees)) + + point[0] * math.sin(math.radians(360 - degrees)) + + centroid[1], + 2, + ), + ) + + def _compute_polygon_vertex(centroid, polygon_radius, angle): + start_point = [polygon_radius, 0] + return _apply_rotation(start_point, angle, centroid) + + def _get_angles(n_sides, rotation): + angles = [] + degrees = 360 / n_sides + # Start with the bottom left polygon vertex + current_angle = (270 - 0.5 * degrees) + rotation + for _ in range(0, n_sides): + angles.append(current_angle) + current_angle += degrees + if current_angle > 360: + current_angle -= 360 + return angles + + # 3. Variable Declarations + angles = _get_angles(n_sides, rotation) + + # 4. Compute Vertices + return [ + _compute_polygon_vertex(centroid, polygon_radius, angle) for angle in angles + ] + + +def _color_diff(color1, color2): + """ + Uses 1-norm distance to calculate difference between two values. + """ + if isinstance(color2, tuple): + return sum([abs(color1[i] - color2[i]) for i in range(0, len(color2))]) + else: + return abs(color1 - color2) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageDraw2.py b/.venv/lib/python3.9/site-packages/PIL/ImageDraw2.py new file mode 100644 index 00000000..1f63110f --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageDraw2.py @@ -0,0 +1,179 @@ +# +# The Python Imaging Library +# $Id$ +# +# WCK-style drawing interface operations +# +# History: +# 2003-12-07 fl created +# 2005-05-15 fl updated; added to PIL as ImageDraw2 +# 2005-05-15 fl added text support +# 2005-05-20 fl added arc/chord/pieslice support +# +# Copyright (c) 2003-2005 by Secret Labs AB +# Copyright (c) 2003-2005 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +""" +(Experimental) WCK-style drawing interface operations + +.. seealso:: :py:mod:`PIL.ImageDraw` +""" + + +from . import Image, ImageColor, ImageDraw, ImageFont, ImagePath + + +class Pen: + """Stores an outline color and width.""" + + def __init__(self, color, width=1, opacity=255): + self.color = ImageColor.getrgb(color) + self.width = width + + +class Brush: + """Stores a fill color""" + + def __init__(self, color, opacity=255): + self.color = ImageColor.getrgb(color) + + +class Font: + """Stores a TrueType font and color""" + + def __init__(self, color, file, size=12): + # FIXME: add support for bitmap fonts + self.color = ImageColor.getrgb(color) + self.font = ImageFont.truetype(file, size) + + +class Draw: + """ + (Experimental) WCK-style drawing interface + """ + + def __init__(self, image, size=None, color=None): + if not hasattr(image, "im"): + image = Image.new(image, size, color) + self.draw = ImageDraw.Draw(image) + self.image = image + self.transform = None + + def flush(self): + return self.image + + def render(self, op, xy, pen, brush=None): + # handle color arguments + outline = fill = None + width = 1 + if isinstance(pen, Pen): + outline = pen.color + width = pen.width + elif isinstance(brush, Pen): + outline = brush.color + width = brush.width + if isinstance(brush, Brush): + fill = brush.color + elif isinstance(pen, Brush): + fill = pen.color + # handle transformation + if self.transform: + xy = ImagePath.Path(xy) + xy.transform(self.transform) + # render the item + if op == "line": + self.draw.line(xy, fill=outline, width=width) + else: + getattr(self.draw, op)(xy, fill=fill, outline=outline) + + def settransform(self, offset): + """Sets a transformation offset.""" + (xoffset, yoffset) = offset + self.transform = (1, 0, xoffset, 0, 1, yoffset) + + def arc(self, xy, start, end, *options): + """ + Draws an arc (a portion of a circle outline) between the start and end + angles, inside the given bounding box. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.arc` + """ + self.render("arc", xy, start, end, *options) + + def chord(self, xy, start, end, *options): + """ + Same as :py:meth:`~PIL.ImageDraw2.Draw.arc`, but connects the end points + with a straight line. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.chord` + """ + self.render("chord", xy, start, end, *options) + + def ellipse(self, xy, *options): + """ + Draws an ellipse inside the given bounding box. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.ellipse` + """ + self.render("ellipse", xy, *options) + + def line(self, xy, *options): + """ + Draws a line between the coordinates in the ``xy`` list. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.line` + """ + self.render("line", xy, *options) + + def pieslice(self, xy, start, end, *options): + """ + Same as arc, but also draws straight lines between the end points and the + center of the bounding box. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.pieslice` + """ + self.render("pieslice", xy, start, end, *options) + + def polygon(self, xy, *options): + """ + Draws a polygon. + + The polygon outline consists of straight lines between the given + coordinates, plus a straight line between the last and the first + coordinate. + + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.polygon` + """ + self.render("polygon", xy, *options) + + def rectangle(self, xy, *options): + """ + Draws a rectangle. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.rectangle` + """ + self.render("rectangle", xy, *options) + + def text(self, xy, text, font): + """ + Draws the string at the given position. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.text` + """ + if self.transform: + xy = ImagePath.Path(xy) + xy.transform(self.transform) + self.draw.text(xy, text, font=font.font, fill=font.color) + + def textsize(self, text, font): + """ + Return the size of the given string, in pixels. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textsize` + """ + return self.draw.textsize(text, font=font.font) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageEnhance.py b/.venv/lib/python3.9/site-packages/PIL/ImageEnhance.py new file mode 100644 index 00000000..3b79d5c4 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageEnhance.py @@ -0,0 +1,103 @@ +# +# The Python Imaging Library. +# $Id$ +# +# image enhancement classes +# +# For a background, see "Image Processing By Interpolation and +# Extrapolation", Paul Haeberli and Douglas Voorhies. Available +# at http://www.graficaobscura.com/interp/index.html +# +# History: +# 1996-03-23 fl Created +# 2009-06-16 fl Fixed mean calculation +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFilter, ImageStat + + +class _Enhance: + def enhance(self, factor): + """ + Returns an enhanced image. + + :param factor: A floating point value controlling the enhancement. + Factor 1.0 always returns a copy of the original image, + lower factors mean less color (brightness, contrast, + etc), and higher values more. There are no restrictions + on this value. + :rtype: :py:class:`~PIL.Image.Image` + """ + return Image.blend(self.degenerate, self.image, factor) + + +class Color(_Enhance): + """Adjust image color balance. + + This class can be used to adjust the colour balance of an image, in + a manner similar to the controls on a colour TV set. An enhancement + factor of 0.0 gives a black and white image. A factor of 1.0 gives + the original image. + """ + + def __init__(self, image): + self.image = image + self.intermediate_mode = "L" + if "A" in image.getbands(): + self.intermediate_mode = "LA" + + self.degenerate = image.convert(self.intermediate_mode).convert(image.mode) + + +class Contrast(_Enhance): + """Adjust image contrast. + + This class can be used to control the contrast of an image, similar + to the contrast control on a TV set. An enhancement factor of 0.0 + gives a solid grey image. A factor of 1.0 gives the original image. + """ + + def __init__(self, image): + self.image = image + mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5) + self.degenerate = Image.new("L", image.size, mean).convert(image.mode) + + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) + + +class Brightness(_Enhance): + """Adjust image brightness. + + This class can be used to control the brightness of an image. An + enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the + original image. + """ + + def __init__(self, image): + self.image = image + self.degenerate = Image.new(image.mode, image.size, 0) + + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) + + +class Sharpness(_Enhance): + """Adjust image sharpness. + + This class can be used to adjust the sharpness of an image. An + enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the + original image, and a factor of 2.0 gives a sharpened image. + """ + + def __init__(self, image): + self.image = image + self.degenerate = image.filter(ImageFilter.SMOOTH) + + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageFile.py b/.venv/lib/python3.9/site-packages/PIL/ImageFile.py new file mode 100644 index 00000000..43d2bf0c --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageFile.py @@ -0,0 +1,699 @@ +# +# The Python Imaging Library. +# $Id$ +# +# base class for image file handlers +# +# history: +# 1995-09-09 fl Created +# 1996-03-11 fl Fixed load mechanism. +# 1996-04-15 fl Added pcx/xbm decoders. +# 1996-04-30 fl Added encoders. +# 1996-12-14 fl Added load helpers +# 1997-01-11 fl Use encode_to_file where possible +# 1997-08-27 fl Flush output in _save +# 1998-03-05 fl Use memory mapping for some modes +# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B" +# 1999-05-31 fl Added image parser +# 2000-10-12 fl Set readonly flag on memory-mapped images +# 2002-03-20 fl Use better messages for common decoder errors +# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available +# 2003-10-30 fl Added StubImageFile class +# 2004-02-25 fl Made incremental parser more robust +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1995-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import io +import struct +import sys +import warnings + +from . import Image +from ._util import isPath + +MAXBLOCK = 65536 + +SAFEBLOCK = 1024 * 1024 + +LOAD_TRUNCATED_IMAGES = False +"""Whether or not to load truncated image files. User code may change this.""" + +ERRORS = { + -1: "image buffer overrun error", + -2: "decoding error", + -3: "unknown error", + -8: "bad configuration", + -9: "out of memory error", +} +"""Dict of known error codes returned from :meth:`.PyDecoder.decode`.""" + + +# +# -------------------------------------------------------------------- +# Helpers + + +def raise_oserror(error): + try: + message = Image.core.getcodecstatus(error) + except AttributeError: + message = ERRORS.get(error) + if not message: + message = f"decoder error {error}" + raise OSError(message + " when reading image file") + + +def raise_ioerror(error): + warnings.warn( + "raise_ioerror is deprecated and will be removed in Pillow 9 (2022-01-02). " + "Use raise_oserror instead.", + DeprecationWarning, + ) + return raise_oserror(error) + + +def _tilesort(t): + # sort on offset + return t[2] + + +# +# -------------------------------------------------------------------- +# ImageFile base class + + +class ImageFile(Image.Image): + """Base class for image file format handlers.""" + + def __init__(self, fp=None, filename=None): + super().__init__() + + self._min_frame = 0 + + self.custom_mimetype = None + + self.tile = None + """ A list of tile descriptors, or ``None`` """ + + self.readonly = 1 # until we know better + + self.decoderconfig = () + self.decodermaxblock = MAXBLOCK + + if isPath(fp): + # filename + self.fp = open(fp, "rb") + self.filename = fp + self._exclusive_fp = True + else: + # stream + self.fp = fp + self.filename = filename + # can be overridden + self._exclusive_fp = None + + try: + try: + self._open() + except ( + IndexError, # end of data + TypeError, # end of data (ord) + KeyError, # unsupported mode + EOFError, # got header but not the first frame + struct.error, + ) as v: + raise SyntaxError(v) from v + + if not self.mode or self.size[0] <= 0: + raise SyntaxError("not identified by this driver") + except BaseException: + # close the file only if we have opened it this constructor + if self._exclusive_fp: + self.fp.close() + raise + + def get_format_mimetype(self): + if self.custom_mimetype: + return self.custom_mimetype + if self.format is not None: + return Image.MIME.get(self.format.upper()) + + def verify(self): + """Check file integrity""" + + # raise exception if something's wrong. must be called + # directly after open, and closes file when finished. + if self._exclusive_fp: + self.fp.close() + self.fp = None + + def load(self): + """Load image data based on tile list""" + + if self.tile is None: + raise OSError("cannot load this image") + + pixel = Image.Image.load(self) + if not self.tile: + return pixel + + self.map = None + use_mmap = self.filename and len(self.tile) == 1 + # As of pypy 2.1.0, memory mapping was failing here. + use_mmap = use_mmap and not hasattr(sys, "pypy_version_info") + + readonly = 0 + + # look for read/seek overrides + try: + read = self.load_read + # don't use mmap if there are custom read/seek functions + use_mmap = False + except AttributeError: + read = self.fp.read + + try: + seek = self.load_seek + use_mmap = False + except AttributeError: + seek = self.fp.seek + + if use_mmap: + # try memory mapping + decoder_name, extents, offset, args = self.tile[0] + if ( + decoder_name == "raw" + and len(args) >= 3 + and args[0] == self.mode + and args[0] in Image._MAPMODES + ): + try: + # use mmap, if possible + import mmap + + with open(self.filename) as fp: + self.map = mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ) + self.im = Image.core.map_buffer( + self.map, self.size, decoder_name, offset, args + ) + readonly = 1 + # After trashing self.im, + # we might need to reload the palette data. + if self.palette: + self.palette.dirty = 1 + except (AttributeError, OSError, ImportError): + self.map = None + + self.load_prepare() + err_code = -3 # initialize to unknown error + if not self.map: + # sort tiles in file order + self.tile.sort(key=_tilesort) + + try: + # FIXME: This is a hack to handle TIFF's JpegTables tag. + prefix = self.tile_prefix + except AttributeError: + prefix = b"" + + for decoder_name, extents, offset, args in self.tile: + decoder = Image._getdecoder( + self.mode, decoder_name, args, self.decoderconfig + ) + try: + seek(offset) + decoder.setimage(self.im, extents) + if decoder.pulls_fd: + decoder.setfd(self.fp) + status, err_code = decoder.decode(b"") + else: + b = prefix + while True: + try: + s = read(self.decodermaxblock) + except (IndexError, struct.error) as e: + # truncated png/gif + if LOAD_TRUNCATED_IMAGES: + break + else: + raise OSError("image file is truncated") from e + + if not s: # truncated jpeg + if LOAD_TRUNCATED_IMAGES: + break + else: + raise OSError( + "image file is truncated " + f"({len(b)} bytes not processed)" + ) + + b = b + s + n, err_code = decoder.decode(b) + if n < 0: + break + b = b[n:] + finally: + # Need to cleanup here to prevent leaks + decoder.cleanup() + + self.tile = [] + self.readonly = readonly + + self.load_end() + + if self._exclusive_fp and self._close_exclusive_fp_after_loading: + self.fp.close() + self.fp = None + + if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0: + # still raised if decoder fails to return anything + raise_oserror(err_code) + + return Image.Image.load(self) + + def load_prepare(self): + # create image memory if necessary + if not self.im or self.im.mode != self.mode or self.im.size != self.size: + self.im = Image.core.new(self.mode, self.size) + # create palette (optional) + if self.mode == "P": + Image.Image.load(self) + + def load_end(self): + # may be overridden + pass + + # may be defined for contained formats + # def load_seek(self, pos): + # pass + + # may be defined for blocked formats (e.g. PNG) + # def load_read(self, bytes): + # pass + + def _seek_check(self, frame): + if ( + frame < self._min_frame + # Only check upper limit on frames if additional seek operations + # are not required to do so + or ( + not (hasattr(self, "_n_frames") and self._n_frames is None) + and frame >= self.n_frames + self._min_frame + ) + ): + raise EOFError("attempt to seek outside sequence") + + return self.tell() != frame + + +class StubImageFile(ImageFile): + """ + Base class for stub image loaders. + + A stub loader is an image loader that can identify files of a + certain format, but relies on external code to load the file. + """ + + def _open(self): + raise NotImplementedError("StubImageFile subclass must implement _open") + + def load(self): + loader = self._load() + if loader is None: + raise OSError(f"cannot find loader for this {self.format} file") + image = loader.load(self) + assert image is not None + # become the other object (!) + self.__class__ = image.__class__ + self.__dict__ = image.__dict__ + + def _load(self): + """(Hook) Find actual image loader.""" + raise NotImplementedError("StubImageFile subclass must implement _load") + + +class Parser: + """ + Incremental image parser. This class implements the standard + feed/close consumer interface. + """ + + incremental = None + image = None + data = None + decoder = None + offset = 0 + finished = 0 + + def reset(self): + """ + (Consumer) Reset the parser. Note that you can only call this + method immediately after you've created a parser; parser + instances cannot be reused. + """ + assert self.data is None, "cannot reuse parsers" + + def feed(self, data): + """ + (Consumer) Feed data to the parser. + + :param data: A string buffer. + :exception OSError: If the parser failed to parse the image file. + """ + # collect data + + if self.finished: + return + + if self.data is None: + self.data = data + else: + self.data = self.data + data + + # parse what we have + if self.decoder: + + if self.offset > 0: + # skip header + skip = min(len(self.data), self.offset) + self.data = self.data[skip:] + self.offset = self.offset - skip + if self.offset > 0 or not self.data: + return + + n, e = self.decoder.decode(self.data) + + if n < 0: + # end of stream + self.data = None + self.finished = 1 + if e < 0: + # decoding error + self.image = None + raise_oserror(e) + else: + # end of image + return + self.data = self.data[n:] + + elif self.image: + + # if we end up here with no decoder, this file cannot + # be incrementally parsed. wait until we've gotten all + # available data + pass + + else: + + # attempt to open this file + try: + with io.BytesIO(self.data) as fp: + im = Image.open(fp) + except OSError: + # traceback.print_exc() + pass # not enough data + else: + flag = hasattr(im, "load_seek") or hasattr(im, "load_read") + if flag or len(im.tile) != 1: + # custom load code, or multiple tiles + self.decode = None + else: + # initialize decoder + im.load_prepare() + d, e, o, a = im.tile[0] + im.tile = [] + self.decoder = Image._getdecoder(im.mode, d, a, im.decoderconfig) + self.decoder.setimage(im.im, e) + + # calculate decoder offset + self.offset = o + if self.offset <= len(self.data): + self.data = self.data[self.offset :] + self.offset = 0 + + self.image = im + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + """ + (Consumer) Close the stream. + + :returns: An image object. + :exception OSError: If the parser failed to parse the image file either + because it cannot be identified or cannot be + decoded. + """ + # finish decoding + if self.decoder: + # get rid of what's left in the buffers + self.feed(b"") + self.data = self.decoder = None + if not self.finished: + raise OSError("image was incomplete") + if not self.image: + raise OSError("cannot parse this image") + if self.data: + # incremental parsing not possible; reopen the file + # not that we have all data + with io.BytesIO(self.data) as fp: + try: + self.image = Image.open(fp) + finally: + self.image.load() + return self.image + + +# -------------------------------------------------------------------- + + +def _save(im, fp, tile, bufsize=0): + """Helper to save image based on tile list + + :param im: Image object. + :param fp: File object. + :param tile: Tile list. + :param bufsize: Optional buffer size + """ + + im.load() + if not hasattr(im, "encoderconfig"): + im.encoderconfig = () + tile.sort(key=_tilesort) + # FIXME: make MAXBLOCK a configuration parameter + # It would be great if we could have the encoder specify what it needs + # But, it would need at least the image size in most cases. RawEncode is + # a tricky case. + bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c + try: + stdout = fp == sys.stdout or fp == sys.stdout.buffer + except (OSError, AttributeError): + stdout = False + if stdout: + fp.flush() + return + try: + fh = fp.fileno() + fp.flush() + except (AttributeError, io.UnsupportedOperation) as exc: + # compress to Python file-compatible object + for e, b, o, a in tile: + e = Image._getencoder(im.mode, e, a, im.encoderconfig) + if o > 0: + fp.seek(o) + e.setimage(im.im, b) + if e.pushes_fd: + e.setfd(fp) + l, s = e.encode_to_pyfd() + else: + while True: + l, s, d = e.encode(bufsize) + fp.write(d) + if s: + break + if s < 0: + raise OSError(f"encoder error {s} when writing image file") from exc + e.cleanup() + else: + # slight speedup: compress to real file object + for e, b, o, a in tile: + e = Image._getencoder(im.mode, e, a, im.encoderconfig) + if o > 0: + fp.seek(o) + e.setimage(im.im, b) + if e.pushes_fd: + e.setfd(fp) + l, s = e.encode_to_pyfd() + else: + s = e.encode_to_file(fh, bufsize) + if s < 0: + raise OSError(f"encoder error {s} when writing image file") + e.cleanup() + if hasattr(fp, "flush"): + fp.flush() + + +def _safe_read(fp, size): + """ + Reads large blocks in a safe way. Unlike fp.read(n), this function + doesn't trust the user. If the requested size is larger than + SAFEBLOCK, the file is read block by block. + + :param fp: File handle. Must implement a read method. + :param size: Number of bytes to read. + :returns: A string containing size bytes of data. + + Raises an OSError if the file is truncated and the read cannot be completed + + """ + if size <= 0: + return b"" + if size <= SAFEBLOCK: + data = fp.read(size) + if len(data) < size: + raise OSError("Truncated File Read") + return data + data = [] + while size > 0: + block = fp.read(min(size, SAFEBLOCK)) + if not block: + break + data.append(block) + size -= len(block) + if sum(len(d) for d in data) < size: + raise OSError("Truncated File Read") + return b"".join(data) + + +class PyCodecState: + def __init__(self): + self.xsize = 0 + self.ysize = 0 + self.xoff = 0 + self.yoff = 0 + + def extents(self): + return (self.xoff, self.yoff, self.xoff + self.xsize, self.yoff + self.ysize) + + +class PyDecoder: + """ + Python implementation of a format decoder. Override this class and + add the decoding logic in the :meth:`decode` method. + + See :ref:`Writing Your Own File Decoder in Python` + """ + + _pulls_fd = False + + def __init__(self, mode, *args): + self.im = None + self.state = PyCodecState() + self.fd = None + self.mode = mode + self.init(args) + + def init(self, args): + """ + Override to perform decoder specific initialization + + :param args: Array of args items from the tile entry + :returns: None + """ + self.args = args + + @property + def pulls_fd(self): + return self._pulls_fd + + def decode(self, buffer): + """ + Override to perform the decoding process. + + :param buffer: A bytes object with the data to be decoded. + :returns: A tuple of ``(bytes consumed, errcode)``. + If finished with decoding return <0 for the bytes consumed. + Err codes are from :data:`.ImageFile.ERRORS`. + """ + raise NotImplementedError() + + def cleanup(self): + """ + Override to perform decoder specific cleanup + + :returns: None + """ + pass + + def setfd(self, fd): + """ + Called from ImageFile to set the python file-like object + + :param fd: A python file-like object + :returns: None + """ + self.fd = fd + + def setimage(self, im, extents=None): + """ + Called from ImageFile to set the core output image for the decoder + + :param im: A core image object + :param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle + for this tile + :returns: None + """ + + # following c code + self.im = im + + if extents: + (x0, y0, x1, y1) = extents + else: + (x0, y0, x1, y1) = (0, 0, 0, 0) + + if x0 == 0 and x1 == 0: + self.state.xsize, self.state.ysize = self.im.size + else: + self.state.xoff = x0 + self.state.yoff = y0 + self.state.xsize = x1 - x0 + self.state.ysize = y1 - y0 + + if self.state.xsize <= 0 or self.state.ysize <= 0: + raise ValueError("Size cannot be negative") + + if ( + self.state.xsize + self.state.xoff > self.im.size[0] + or self.state.ysize + self.state.yoff > self.im.size[1] + ): + raise ValueError("Tile cannot extend outside image") + + def set_as_raw(self, data, rawmode=None): + """ + Convenience method to set the internal image from a stream of raw data + + :param data: Bytes to be set + :param rawmode: The rawmode to be used for the decoder. + If not specified, it will default to the mode of the image + :returns: None + """ + + if not rawmode: + rawmode = self.mode + d = Image._getdecoder(self.mode, "raw", (rawmode)) + d.setimage(self.im, self.state.extents()) + s = d.decode(data) + + if s[0] >= 0: + raise ValueError("not enough image data") + if s[1] != 0: + raise ValueError("cannot decode image data") diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageFilter.py b/.venv/lib/python3.9/site-packages/PIL/ImageFilter.py new file mode 100644 index 00000000..d2ece375 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageFilter.py @@ -0,0 +1,538 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard filters +# +# History: +# 1995-11-27 fl Created +# 2002-06-08 fl Added rank and mode filters +# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2002 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +import functools + + +class Filter: + pass + + +class MultibandFilter(Filter): + pass + + +class BuiltinFilter(MultibandFilter): + def filter(self, image): + if image.mode == "P": + raise ValueError("cannot filter palette images") + return image.filter(*self.filterargs) + + +class Kernel(BuiltinFilter): + """ + Create a convolution kernel. The current version only + supports 3x3 and 5x5 integer and floating point kernels. + + In the current version, kernels can only be applied to + "L" and "RGB" images. + + :param size: Kernel size, given as (width, height). In the current + version, this must be (3,3) or (5,5). + :param kernel: A sequence containing kernel weights. + :param scale: Scale factor. If given, the result for each pixel is + divided by this value. The default is the sum of the + kernel weights. + :param offset: Offset. If given, this value is added to the result, + after it has been divided by the scale factor. + """ + + name = "Kernel" + + def __init__(self, size, kernel, scale=None, offset=0): + if scale is None: + # default scale is sum of kernel + scale = functools.reduce(lambda a, b: a + b, kernel) + if size[0] * size[1] != len(kernel): + raise ValueError("not enough coefficients in kernel") + self.filterargs = size, scale, offset, kernel + + +class RankFilter(Filter): + """ + Create a rank filter. The rank filter sorts all pixels in + a window of the given size, and returns the ``rank``'th value. + + :param size: The kernel size, in pixels. + :param rank: What pixel value to pick. Use 0 for a min filter, + ``size * size / 2`` for a median filter, ``size * size - 1`` + for a max filter, etc. + """ + + name = "Rank" + + def __init__(self, size, rank): + self.size = size + self.rank = rank + + def filter(self, image): + if image.mode == "P": + raise ValueError("cannot filter palette images") + image = image.expand(self.size // 2, self.size // 2) + return image.rankfilter(self.size, self.rank) + + +class MedianFilter(RankFilter): + """ + Create a median filter. Picks the median pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + + name = "Median" + + def __init__(self, size=3): + self.size = size + self.rank = size * size // 2 + + +class MinFilter(RankFilter): + """ + Create a min filter. Picks the lowest pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + + name = "Min" + + def __init__(self, size=3): + self.size = size + self.rank = 0 + + +class MaxFilter(RankFilter): + """ + Create a max filter. Picks the largest pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + + name = "Max" + + def __init__(self, size=3): + self.size = size + self.rank = size * size - 1 + + +class ModeFilter(Filter): + """ + Create a mode filter. Picks the most frequent pixel value in a box with the + given size. Pixel values that occur only once or twice are ignored; if no + pixel value occurs more than twice, the original pixel value is preserved. + + :param size: The kernel size, in pixels. + """ + + name = "Mode" + + def __init__(self, size=3): + self.size = size + + def filter(self, image): + return image.modefilter(self.size) + + +class GaussianBlur(MultibandFilter): + """Blurs the image with a sequence of extended box filters, which + approximates a Gaussian kernel. For details on accuracy see + + + :param radius: Standard deviation of the Gaussian kernel. + """ + + name = "GaussianBlur" + + def __init__(self, radius=2): + self.radius = radius + + def filter(self, image): + return image.gaussian_blur(self.radius) + + +class BoxBlur(MultibandFilter): + """Blurs the image by setting each pixel to the average value of the pixels + in a square box extending radius pixels in each direction. + Supports float radius of arbitrary size. Uses an optimized implementation + which runs in linear time relative to the size of the image + for any radius value. + + :param radius: Size of the box in one direction. Radius 0 does not blur, + returns an identical image. Radius 1 takes 1 pixel + in each direction, i.e. 9 pixels in total. + """ + + name = "BoxBlur" + + def __init__(self, radius): + self.radius = radius + + def filter(self, image): + return image.box_blur(self.radius) + + +class UnsharpMask(MultibandFilter): + """Unsharp mask filter. + + See Wikipedia's entry on `digital unsharp masking`_ for an explanation of + the parameters. + + :param radius: Blur Radius + :param percent: Unsharp strength, in percent + :param threshold: Threshold controls the minimum brightness change that + will be sharpened + + .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking + + """ # noqa: E501 + + name = "UnsharpMask" + + def __init__(self, radius=2, percent=150, threshold=3): + self.radius = radius + self.percent = percent + self.threshold = threshold + + def filter(self, image): + return image.unsharp_mask(self.radius, self.percent, self.threshold) + + +class BLUR(BuiltinFilter): + name = "Blur" + # fmt: off + filterargs = (5, 5), 16, 0, ( + 1, 1, 1, 1, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 1, 1, 1, 1, + ) + # fmt: on + + +class CONTOUR(BuiltinFilter): + name = "Contour" + # fmt: off + filterargs = (3, 3), 1, 255, ( + -1, -1, -1, + -1, 8, -1, + -1, -1, -1, + ) + # fmt: on + + +class DETAIL(BuiltinFilter): + name = "Detail" + # fmt: off + filterargs = (3, 3), 6, 0, ( + 0, -1, 0, + -1, 10, -1, + 0, -1, 0, + ) + # fmt: on + + +class EDGE_ENHANCE(BuiltinFilter): + name = "Edge-enhance" + # fmt: off + filterargs = (3, 3), 2, 0, ( + -1, -1, -1, + -1, 10, -1, + -1, -1, -1, + ) + # fmt: on + + +class EDGE_ENHANCE_MORE(BuiltinFilter): + name = "Edge-enhance More" + # fmt: off + filterargs = (3, 3), 1, 0, ( + -1, -1, -1, + -1, 9, -1, + -1, -1, -1, + ) + # fmt: on + + +class EMBOSS(BuiltinFilter): + name = "Emboss" + # fmt: off + filterargs = (3, 3), 1, 128, ( + -1, 0, 0, + 0, 1, 0, + 0, 0, 0, + ) + # fmt: on + + +class FIND_EDGES(BuiltinFilter): + name = "Find Edges" + # fmt: off + filterargs = (3, 3), 1, 0, ( + -1, -1, -1, + -1, 8, -1, + -1, -1, -1, + ) + # fmt: on + + +class SHARPEN(BuiltinFilter): + name = "Sharpen" + # fmt: off + filterargs = (3, 3), 16, 0, ( + -2, -2, -2, + -2, 32, -2, + -2, -2, -2, + ) + # fmt: on + + +class SMOOTH(BuiltinFilter): + name = "Smooth" + # fmt: off + filterargs = (3, 3), 13, 0, ( + 1, 1, 1, + 1, 5, 1, + 1, 1, 1, + ) + # fmt: on + + +class SMOOTH_MORE(BuiltinFilter): + name = "Smooth More" + # fmt: off + filterargs = (5, 5), 100, 0, ( + 1, 1, 1, 1, 1, + 1, 5, 5, 5, 1, + 1, 5, 44, 5, 1, + 1, 5, 5, 5, 1, + 1, 1, 1, 1, 1, + ) + # fmt: on + + +class Color3DLUT(MultibandFilter): + """Three-dimensional color lookup table. + + Transforms 3-channel pixels using the values of the channels as coordinates + in the 3D lookup table and interpolating the nearest elements. + + This method allows you to apply almost any color transformation + in constant time by using pre-calculated decimated tables. + + .. versionadded:: 5.2.0 + + :param size: Size of the table. One int or tuple of (int, int, int). + Minimal size in any dimension is 2, maximum is 65. + :param table: Flat lookup table. A list of ``channels * size**3`` + float elements or a list of ``size**3`` channels-sized + tuples with floats. Channels are changed first, + then first dimension, then second, then third. + Value 0.0 corresponds lowest value of output, 1.0 highest. + :param channels: Number of channels in the table. Could be 3 or 4. + Default is 3. + :param target_mode: A mode for the result image. Should have not less + than ``channels`` channels. Default is ``None``, + which means that mode wouldn't be changed. + """ + + name = "Color 3D LUT" + + def __init__(self, size, table, channels=3, target_mode=None, **kwargs): + if channels not in (3, 4): + raise ValueError("Only 3 or 4 output channels are supported") + self.size = size = self._check_size(size) + self.channels = channels + self.mode = target_mode + + # Hidden flag `_copy_table=False` could be used to avoid extra copying + # of the table if the table is specially made for the constructor. + copy_table = kwargs.get("_copy_table", True) + items = size[0] * size[1] * size[2] + wrong_size = False + + numpy = None + if hasattr(table, "shape"): + try: + import numpy + except ImportError: # pragma: no cover + pass + + if numpy and isinstance(table, numpy.ndarray): + if copy_table: + table = table.copy() + + if table.shape in [ + (items * channels,), + (items, channels), + (size[2], size[1], size[0], channels), + ]: + table = table.reshape(items * channels) + else: + wrong_size = True + + else: + if copy_table: + table = list(table) + + # Convert to a flat list + if table and isinstance(table[0], (list, tuple)): + table, raw_table = [], table + for pixel in raw_table: + if len(pixel) != channels: + raise ValueError( + "The elements of the table should " + "have a length of {}.".format(channels) + ) + table.extend(pixel) + + if wrong_size or len(table) != items * channels: + raise ValueError( + "The table should have either channels * size**3 float items " + "or size**3 items of channels-sized tuples with floats. " + f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. " + f"Actual length: {len(table)}" + ) + self.table = table + + @staticmethod + def _check_size(size): + try: + _, _, _ = size + except ValueError as e: + raise ValueError( + "Size should be either an integer or a tuple of three integers." + ) from e + except TypeError: + size = (size, size, size) + size = [int(x) for x in size] + for size1D in size: + if not 2 <= size1D <= 65: + raise ValueError("Size should be in [2, 65] range.") + return size + + @classmethod + def generate(cls, size, callback, channels=3, target_mode=None): + """Generates new LUT using provided callback. + + :param size: Size of the table. Passed to the constructor. + :param callback: Function with three parameters which correspond + three color channels. Will be called ``size**3`` + times with values from 0.0 to 1.0 and should return + a tuple with ``channels`` elements. + :param channels: The number of channels which should return callback. + :param target_mode: Passed to the constructor of the resulting + lookup table. + """ + size1D, size2D, size3D = cls._check_size(size) + if channels not in (3, 4): + raise ValueError("Only 3 or 4 output channels are supported") + + table = [0] * (size1D * size2D * size3D * channels) + idx_out = 0 + for b in range(size3D): + for g in range(size2D): + for r in range(size1D): + table[idx_out : idx_out + channels] = callback( + r / (size1D - 1), g / (size2D - 1), b / (size3D - 1) + ) + idx_out += channels + + return cls( + (size1D, size2D, size3D), + table, + channels=channels, + target_mode=target_mode, + _copy_table=False, + ) + + def transform(self, callback, with_normals=False, channels=None, target_mode=None): + """Transforms the table values using provided callback and returns + a new LUT with altered values. + + :param callback: A function which takes old lookup table values + and returns a new set of values. The number + of arguments which function should take is + ``self.channels`` or ``3 + self.channels`` + if ``with_normals`` flag is set. + Should return a tuple of ``self.channels`` or + ``channels`` elements if it is set. + :param with_normals: If true, ``callback`` will be called with + coordinates in the color cube as the first + three arguments. Otherwise, ``callback`` + will be called only with actual color values. + :param channels: The number of channels in the resulting lookup table. + :param target_mode: Passed to the constructor of the resulting + lookup table. + """ + if channels not in (None, 3, 4): + raise ValueError("Only 3 or 4 output channels are supported") + ch_in = self.channels + ch_out = channels or ch_in + size1D, size2D, size3D = self.size + + table = [0] * (size1D * size2D * size3D * ch_out) + idx_in = 0 + idx_out = 0 + for b in range(size3D): + for g in range(size2D): + for r in range(size1D): + values = self.table[idx_in : idx_in + ch_in] + if with_normals: + values = callback( + r / (size1D - 1), + g / (size2D - 1), + b / (size3D - 1), + *values, + ) + else: + values = callback(*values) + table[idx_out : idx_out + ch_out] = values + idx_in += ch_in + idx_out += ch_out + + return type(self)( + self.size, + table, + channels=ch_out, + target_mode=target_mode or self.mode, + _copy_table=False, + ) + + def __repr__(self): + r = [ + f"{self.__class__.__name__} from {self.table.__class__.__name__}", + "size={:d}x{:d}x{:d}".format(*self.size), + f"channels={self.channels:d}", + ] + if self.mode: + r.append(f"target_mode={self.mode}") + return "<{}>".format(" ".join(r)) + + def filter(self, image): + from . import Image + + return image.color_lut_3d( + self.mode or image.mode, + Image.LINEAR, + self.channels, + self.size[0], + self.size[1], + self.size[2], + self.table, + ) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageFont.py b/.venv/lib/python3.9/site-packages/PIL/ImageFont.py new file mode 100644 index 00000000..e99ca21b --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageFont.py @@ -0,0 +1,1060 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PIL raster font management +# +# History: +# 1996-08-07 fl created (experimental) +# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3 +# 1999-02-06 fl rewrote most font management stuff in C +# 1999-03-17 fl take pth files into account in load_path (from Richard Jones) +# 2001-02-17 fl added freetype support +# 2001-05-09 fl added TransposedFont wrapper class +# 2002-03-04 fl make sure we have a "L" or "1" font +# 2002-12-04 fl skip non-directory entries in the system path +# 2003-04-29 fl add embedded default font +# 2003-09-27 fl added support for truetype charmap encodings +# +# Todo: +# Adapt to PILFONT2 format (16-bit fonts, compressed, single file) +# +# Copyright (c) 1997-2003 by Secret Labs AB +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import base64 +import os +import sys +import warnings +from io import BytesIO + +from . import Image, features +from ._util import isDirectory, isPath + +LAYOUT_BASIC = 0 +LAYOUT_RAQM = 1 + + +class _imagingft_not_installed: + # module placeholder + def __getattr__(self, id): + raise ImportError("The _imagingft C module is not installed") + + +try: + from . import _imagingft as core +except ImportError: + core = _imagingft_not_installed() + + +# FIXME: add support for pilfont2 format (see FontFile.py) + +# -------------------------------------------------------------------- +# Font metrics format: +# "PILfont" LF +# fontdescriptor LF +# (optional) key=value... LF +# "DATA" LF +# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox) +# +# To place a character, cut out srcbox and paste at dstbox, +# relative to the character position. Then move the character +# position according to dx, dy. +# -------------------------------------------------------------------- + + +class ImageFont: + "PIL font wrapper" + + def _load_pilfont(self, filename): + + with open(filename, "rb") as fp: + image = None + for ext in (".png", ".gif", ".pbm"): + if image: + image.close() + try: + fullname = os.path.splitext(filename)[0] + ext + image = Image.open(fullname) + except Exception: + pass + else: + if image and image.mode in ("1", "L"): + break + else: + if image: + image.close() + raise OSError("cannot find glyph data file") + + self.file = fullname + + self._load_pilfont_data(fp, image) + image.close() + + def _load_pilfont_data(self, file, image): + + # read PILfont header + if file.readline() != b"PILfont\n": + raise SyntaxError("Not a PILfont file") + file.readline().split(b";") + self.info = [] # FIXME: should be a dictionary + while True: + s = file.readline() + if not s or s == b"DATA\n": + break + self.info.append(s) + + # read PILfont metrics + data = file.read(256 * 20) + + # check image + if image.mode not in ("1", "L"): + raise TypeError("invalid font image mode") + + image.load() + + self.font = Image.core.font(image.im, data) + + def getsize(self, text, *args, **kwargs): + """ + Returns width and height (in pixels) of given text. + + :param text: Text to measure. + + :return: (width, height) + """ + return self.font.getsize(text) + + def getmask(self, text, mode="", *args, **kwargs): + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :return: An internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module. + """ + return self.font.getmask(text, mode) + + +## +# Wrapper for FreeType fonts. Application code should use the +# truetype factory function to create font objects. + + +class FreeTypeFont: + "FreeType font wrapper (requires _imagingft service)" + + def __init__(self, font=None, size=10, index=0, encoding="", layout_engine=None): + # FIXME: use service provider instead + + self.path = font + self.size = size + self.index = index + self.encoding = encoding + + try: + from packaging.version import parse as parse_version + except ImportError: + pass + else: + freetype_version = features.version_module("freetype2") + if freetype_version is not None and parse_version( + freetype_version + ) < parse_version("2.8"): + warnings.warn( + "Support for FreeType 2.7 is deprecated and will be removed" + " in Pillow 9 (2022-01-02). Please upgrade to FreeType 2.8 " + "or newer, preferably FreeType 2.10.4 which fixes " + "CVE-2020-15999.", + DeprecationWarning, + ) + + if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM): + layout_engine = LAYOUT_BASIC + if core.HAVE_RAQM: + layout_engine = LAYOUT_RAQM + elif layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM: + layout_engine = LAYOUT_BASIC + + self.layout_engine = layout_engine + + def load_from_bytes(f): + self.font_bytes = f.read() + self.font = core.getfont( + "", size, index, encoding, self.font_bytes, layout_engine + ) + + if isPath(font): + if sys.platform == "win32": + font_bytes_path = font if isinstance(font, bytes) else font.encode() + try: + font_bytes_path.decode("ascii") + except UnicodeDecodeError: + # FreeType cannot load fonts with non-ASCII characters on Windows + # So load it into memory first + with open(font, "rb") as f: + load_from_bytes(f) + return + self.font = core.getfont( + font, size, index, encoding, layout_engine=layout_engine + ) + else: + load_from_bytes(font) + + def _multiline_split(self, text): + split_character = "\n" if isinstance(text, str) else b"\n" + return text.split(split_character) + + def getname(self): + """ + :return: A tuple of the font family (e.g. Helvetica) and the font style + (e.g. Bold) + """ + return self.font.family, self.font.style + + def getmetrics(self): + """ + :return: A tuple of the font ascent (the distance from the baseline to + the highest outline point) and descent (the distance from the + baseline to the lowest outline point, a negative value) + """ + return self.font.ascent, self.font.descent + + def getlength(self, text, mode="", direction=None, features=None, language=None): + """ + Returns length (in pixels with 1/64 precision) of given text when rendered + in font with provided direction, features, and language. + + This is the amount by which following text should be offset. + Text bounding box may extend past the length in some fonts, + e.g. when using italics or accents. + + The result is returned as a float; it is a whole number if using basic layout. + + Note that the sum of two lengths may not equal the length of a concatenated + string due to kerning. If you need to adjust for kerning, include the following + character and subtract its length. + + For example, instead of + + .. code-block:: python + + hello = font.getlength("Hello") + world = font.getlength("World") + hello_world = hello + world # not adjusted for kerning + assert hello_world == font.getlength("HelloWorld") # may fail + + use + + .. code-block:: python + + hello = font.getlength("HelloW") - font.getlength("W") # adjusted for kerning + world = font.getlength("World") + hello_world = hello + world # adjusted for kerning + assert hello_world == font.getlength("HelloWorld") # True + + or disable kerning with (requires libraqm) + + .. code-block:: python + + hello = draw.textlength("Hello", font, features=["-kern"]) + world = draw.textlength("World", font, features=["-kern"]) + hello_world = hello + world # kerning is disabled, no need to adjust + assert hello_world == draw.textlength("HelloWorld", font, features=["-kern"]) + + .. versionadded:: 8.0.0 + + :param text: Text to measure. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + :return: Width for horizontal, height for vertical text. + """ + return self.font.getlength(text, mode, direction, features, language) / 64 + + def getbbox( + self, + text, + mode="", + direction=None, + features=None, + language=None, + stroke_width=0, + anchor=None, + ): + """ + Returns bounding box (in pixels) of given text relative to given anchor + when rendered in font with provided direction, features, and language. + + Use :py:meth:`getlength()` to get the offset of following text with + 1/64 pixel precision. The bounding box includes extra margins for + some fonts, e.g. italics or accents. + + .. versionadded:: 8.0.0 + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + :param stroke_width: The width of the text stroke. + + :param anchor: The text anchor alignment. Determines the relative location of + the anchor to the text. The default alignment is top left. + See :ref:`text-anchors` for valid values. + + :return: ``(left, top, right, bottom)`` bounding box + """ + size, offset = self.font.getsize( + text, mode, direction, features, language, anchor + ) + left, top = offset[0] - stroke_width, offset[1] - stroke_width + width, height = size[0] + 2 * stroke_width, size[1] + 2 * stroke_width + return left, top, left + width, top + height + + def getsize( + self, text, direction=None, features=None, language=None, stroke_width=0 + ): + """ + Returns width and height (in pixels) of given text if rendered in font with + provided direction, features, and language. + + Use :py:meth:`getlength()` to measure the offset of following text with + 1/64 pixel precision. + Use :py:meth:`getbbox()` to get the exact bounding box based on an anchor. + + .. note:: For historical reasons this function measures text height from + the ascender line instead of the top, see :ref:`text-anchors`. + If you wish to measure text height from the top, it is recommended + to use the bottom value of :meth:`getbbox` with ``anchor='lt'`` instead. + + :param text: Text to measure. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: (width, height) + """ + # vertical offset is added for historical reasons + # see https://github.com/python-pillow/Pillow/pull/4910#discussion_r486682929 + size, offset = self.font.getsize(text, "L", direction, features, language) + return ( + size[0] + stroke_width * 2, + size[1] + stroke_width * 2 + offset[1], + ) + + def getsize_multiline( + self, + text, + direction=None, + spacing=4, + features=None, + language=None, + stroke_width=0, + ): + """ + Returns width and height (in pixels) of given text if rendered in font + with provided direction, features, and language, while respecting + newline characters. + + :param text: Text to measure. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + :param spacing: The vertical gap between lines, defaulting to 4 pixels. + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: (width, height) + """ + max_width = 0 + lines = self._multiline_split(text) + line_spacing = self.getsize("A", stroke_width=stroke_width)[1] + spacing + for line in lines: + line_width, line_height = self.getsize( + line, direction, features, language, stroke_width + ) + max_width = max(max_width, line_width) + + return max_width, len(lines) * line_spacing - spacing + + def getoffset(self, text): + """ + Returns the offset of given text. This is the gap between the + starting coordinate and the first marking. Note that this gap is + included in the result of :py:func:`~PIL.ImageFont.FreeTypeFont.getsize`. + + :param text: Text to measure. + + :return: A tuple of the x and y offset + """ + return self.font.getsize(text)[1] + + def getmask( + self, + text, + mode="", + direction=None, + features=None, + language=None, + stroke_width=0, + anchor=None, + ink=0, + ): + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. If the font has embedded color data, the bitmap + should have mode ``RGBA``. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :param anchor: The text anchor alignment. Determines the relative location of + the anchor to the text. The default alignment is top left. + See :ref:`text-anchors` for valid values. + + .. versionadded:: 8.0.0 + + :param ink: Foreground ink for rendering in RGBA mode. + + .. versionadded:: 8.0.0 + + :return: An internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module. + """ + return self.getmask2( + text, + mode, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + anchor=anchor, + ink=ink, + )[0] + + def getmask2( + self, + text, + mode="", + fill=Image.core.fill, + direction=None, + features=None, + language=None, + stroke_width=0, + anchor=None, + ink=0, + *args, + **kwargs, + ): + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. If the font has embedded color data, the bitmap + should have mode ``RGBA``. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + `_ + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :param anchor: The text anchor alignment. Determines the relative location of + the anchor to the text. The default alignment is top left. + See :ref:`text-anchors` for valid values. + + .. versionadded:: 8.0.0 + + :param ink: Foreground ink for rendering in RGBA mode. + + .. versionadded:: 8.0.0 + + :return: A tuple of an internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module, and the text offset, the + gap between the starting coordinate and the first marking + """ + size, offset = self.font.getsize( + text, mode, direction, features, language, anchor + ) + size = size[0] + stroke_width * 2, size[1] + stroke_width * 2 + offset = offset[0] - stroke_width, offset[1] - stroke_width + Image._decompression_bomb_check(size) + im = fill("RGBA" if mode == "RGBA" else "L", size, 0) + self.font.render( + text, im.id, mode, direction, features, language, stroke_width, ink + ) + return im, offset + + def font_variant( + self, font=None, size=None, index=None, encoding=None, layout_engine=None + ): + """ + Create a copy of this FreeTypeFont object, + using any specified arguments to override the settings. + + Parameters are identical to the parameters used to initialize this + object. + + :return: A FreeTypeFont object. + """ + return FreeTypeFont( + font=self.path if font is None else font, + size=self.size if size is None else size, + index=self.index if index is None else index, + encoding=self.encoding if encoding is None else encoding, + layout_engine=layout_engine or self.layout_engine, + ) + + def get_variation_names(self): + """ + :returns: A list of the named styles in a variation font. + :exception OSError: If the font is not a variation font. + """ + try: + names = self.font.getvarnames() + except AttributeError as e: + raise NotImplementedError("FreeType 2.9.1 or greater is required") from e + return [name.replace(b"\x00", b"") for name in names] + + def set_variation_by_name(self, name): + """ + :param name: The name of the style. + :exception OSError: If the font is not a variation font. + """ + names = self.get_variation_names() + if not isinstance(name, bytes): + name = name.encode() + index = names.index(name) + + if index == getattr(self, "_last_variation_index", None): + # When the same name is set twice in a row, + # there is an 'unknown freetype error' + # https://savannah.nongnu.org/bugs/?56186 + return + self._last_variation_index = index + + self.font.setvarname(index) + + def get_variation_axes(self): + """ + :returns: A list of the axes in a variation font. + :exception OSError: If the font is not a variation font. + """ + try: + axes = self.font.getvaraxes() + except AttributeError as e: + raise NotImplementedError("FreeType 2.9.1 or greater is required") from e + for axis in axes: + axis["name"] = axis["name"].replace(b"\x00", b"") + return axes + + def set_variation_by_axes(self, axes): + """ + :param axes: A list of values for each axis. + :exception OSError: If the font is not a variation font. + """ + try: + self.font.setvaraxes(axes) + except AttributeError as e: + raise NotImplementedError("FreeType 2.9.1 or greater is required") from e + + +class TransposedFont: + "Wrapper for writing rotated or mirrored text" + + def __init__(self, font, orientation=None): + """ + Wrapper that creates a transposed font from any existing font + object. + + :param font: A font object. + :param orientation: An optional orientation. If given, this should + be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM, + Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270. + """ + self.font = font + self.orientation = orientation # any 'transpose' argument, or None + + def getsize(self, text, *args, **kwargs): + w, h = self.font.getsize(text) + if self.orientation in (Image.ROTATE_90, Image.ROTATE_270): + return h, w + return w, h + + def getmask(self, text, mode="", *args, **kwargs): + im = self.font.getmask(text, mode, *args, **kwargs) + if self.orientation is not None: + return im.transpose(self.orientation) + return im + + +def load(filename): + """ + Load a font file. This function loads a font object from the given + bitmap font file, and returns the corresponding font object. + + :param filename: Name of font file. + :return: A font object. + :exception OSError: If the file could not be read. + """ + f = ImageFont() + f._load_pilfont(filename) + return f + + +def truetype(font=None, size=10, index=0, encoding="", layout_engine=None): + """ + Load a TrueType or OpenType font from a file or file-like object, + and create a font object. + This function loads a font object from the given file or file-like + object, and creates a font object for a font of the given size. + + Pillow uses FreeType to open font files. If you are opening many fonts + simultaneously on Windows, be aware that Windows limits the number of files + that can be open in C at once to 512. If you approach that limit, an + ``OSError`` may be thrown, reporting that FreeType "cannot open resource". + + This function requires the _imagingft service. + + :param font: A filename or file-like object containing a TrueType font. + If the file is not found in this filename, the loader may also + search in other directories, such as the :file:`fonts/` + directory on Windows or :file:`/Library/Fonts/`, + :file:`/System/Library/Fonts/` and :file:`~/Library/Fonts/` on + macOS. + + :param size: The requested size, in points. + :param index: Which font face to load (default is first available face). + :param encoding: Which font encoding to use (default is Unicode). Possible + encodings include (see the FreeType documentation for more + information): + + * "unic" (Unicode) + * "symb" (Microsoft Symbol) + * "ADOB" (Adobe Standard) + * "ADBE" (Adobe Expert) + * "ADBC" (Adobe Custom) + * "armn" (Apple Roman) + * "sjis" (Shift JIS) + * "gb " (PRC) + * "big5" + * "wans" (Extended Wansung) + * "joha" (Johab) + * "lat1" (Latin-1) + + This specifies the character set to use. It does not alter the + encoding of any text provided in subsequent operations. + :param layout_engine: Which layout engine to use, if available: + :data:`.ImageFont.LAYOUT_BASIC` or :data:`.ImageFont.LAYOUT_RAQM`. + + You can check support for Raqm layout using + :py:func:`PIL.features.check_feature` with ``feature="raqm"``. + + .. versionadded:: 4.2.0 + :return: A font object. + :exception OSError: If the file could not be read. + """ + + def freetype(font): + return FreeTypeFont(font, size, index, encoding, layout_engine) + + try: + return freetype(font) + except OSError: + if not isPath(font): + raise + ttf_filename = os.path.basename(font) + + dirs = [] + if sys.platform == "win32": + # check the windows font repository + # NOTE: must use uppercase WINDIR, to work around bugs in + # 1.5.2's os.environ.get() + windir = os.environ.get("WINDIR") + if windir: + dirs.append(os.path.join(windir, "fonts")) + elif sys.platform in ("linux", "linux2"): + lindirs = os.environ.get("XDG_DATA_DIRS", "") + if not lindirs: + # According to the freedesktop spec, XDG_DATA_DIRS should + # default to /usr/share + lindirs = "/usr/share" + dirs += [os.path.join(lindir, "fonts") for lindir in lindirs.split(":")] + elif sys.platform == "darwin": + dirs += [ + "/Library/Fonts", + "/System/Library/Fonts", + os.path.expanduser("~/Library/Fonts"), + ] + + ext = os.path.splitext(ttf_filename)[1] + first_font_with_a_different_extension = None + for directory in dirs: + for walkroot, walkdir, walkfilenames in os.walk(directory): + for walkfilename in walkfilenames: + if ext and walkfilename == ttf_filename: + return freetype(os.path.join(walkroot, walkfilename)) + elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename: + fontpath = os.path.join(walkroot, walkfilename) + if os.path.splitext(fontpath)[1] == ".ttf": + return freetype(fontpath) + if not ext and first_font_with_a_different_extension is None: + first_font_with_a_different_extension = fontpath + if first_font_with_a_different_extension: + return freetype(first_font_with_a_different_extension) + raise + + +def load_path(filename): + """ + Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a + bitmap font along the Python path. + + :param filename: Name of font file. + :return: A font object. + :exception OSError: If the file could not be read. + """ + for directory in sys.path: + if isDirectory(directory): + if not isinstance(filename, str): + filename = filename.decode("utf-8") + try: + return load(os.path.join(directory, filename)) + except OSError: + pass + raise OSError("cannot find font file") + + +def load_default(): + """Load a "better than nothing" default font. + + .. versionadded:: 1.1.4 + + :return: A font object. + """ + f = ImageFont() + f._load_pilfont_data( + # courB08 + BytesIO( + base64.b64decode( + b""" +UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA +BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL +AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA +AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB +ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A +BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB +//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA +AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH +AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA +ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv +AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/ +/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5 +AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA +AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG +AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA +BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA +AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA +2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF +AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA//// ++gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA +////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA +BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv +AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA +AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA +AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA +BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP// +//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA +AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF +AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB +mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn +AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA +AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7 +AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA +Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB +//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA +AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ +AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC +DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ +AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/ ++wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5 +AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/ +///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG +AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA +BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA +Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC +eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG +AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA//// ++gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA +////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA +BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT +AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A +AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA +Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA +Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP// +//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA +AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ +AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA +LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5 +AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA +AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5 +AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA +AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG +AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA +EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK +AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA +pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG +AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA//// ++QAGAAIAzgAKANUAEw== +""" + ) + ), + Image.open( + BytesIO( + base64.b64decode( + b""" +iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u +Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9 +M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g +LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F +IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA +Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791 +NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx +in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9 +SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY +AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt +y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG +ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY +lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H +/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3 +AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47 +c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/ +/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw +pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv +oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR +evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA +AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v// +Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR +w7IkEbzhVQAAAABJRU5ErkJggg== +""" + ) + ) + ), + ) + return f diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageGrab.py b/.venv/lib/python3.9/site-packages/PIL/ImageGrab.py new file mode 100644 index 00000000..b93ec3f2 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageGrab.py @@ -0,0 +1,120 @@ +# +# The Python Imaging Library +# $Id$ +# +# screen grabber +# +# History: +# 2001-04-26 fl created +# 2001-09-17 fl use builtin driver, if present +# 2002-11-19 fl added grabclipboard support +# +# Copyright (c) 2001-2002 by Secret Labs AB +# Copyright (c) 2001-2002 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import sys + +from . import Image + +if sys.platform == "darwin": + import os + import subprocess + import tempfile + + +def grab(bbox=None, include_layered_windows=False, all_screens=False, xdisplay=None): + if xdisplay is None: + if sys.platform == "darwin": + fh, filepath = tempfile.mkstemp(".png") + os.close(fh) + subprocess.call(["screencapture", "-x", filepath]) + im = Image.open(filepath) + im.load() + os.unlink(filepath) + if bbox: + im_cropped = im.crop(bbox) + im.close() + return im_cropped + return im + elif sys.platform == "win32": + offset, size, data = Image.core.grabscreen_win32( + include_layered_windows, all_screens + ) + im = Image.frombytes( + "RGB", + size, + data, + # RGB, 32-bit line padding, origin lower left corner + "raw", + "BGR", + (size[0] * 3 + 3) & -4, + -1, + ) + if bbox: + x0, y0 = offset + left, top, right, bottom = bbox + im = im.crop((left - x0, top - y0, right - x0, bottom - y0)) + return im + # use xdisplay=None for default display on non-win32/macOS systems + if not Image.core.HAVE_XCB: + raise OSError("Pillow was built without XCB support") + size, data = Image.core.grabscreen_x11(xdisplay) + im = Image.frombytes("RGB", size, data, "raw", "BGRX", size[0] * 4, 1) + if bbox: + im = im.crop(bbox) + return im + + +def grabclipboard(): + if sys.platform == "darwin": + fh, filepath = tempfile.mkstemp(".jpg") + os.close(fh) + commands = [ + 'set theFile to (open for access POSIX file "' + + filepath + + '" with write permission)', + "try", + " write (the clipboard as JPEG picture) to theFile", + "end try", + "close access theFile", + ] + script = ["osascript"] + for command in commands: + script += ["-e", command] + subprocess.call(script) + + im = None + if os.stat(filepath).st_size != 0: + im = Image.open(filepath) + im.load() + os.unlink(filepath) + return im + elif sys.platform == "win32": + fmt, data = Image.core.grabclipboard_win32() + if fmt == "file": # CF_HDROP + import struct + + o = struct.unpack_from("I", data)[0] + if data[16] != 0: + files = data[o:].decode("utf-16le").split("\0") + else: + files = data[o:].decode("mbcs").split("\0") + return files[: files.index("")] + if isinstance(data, bytes): + import io + + data = io.BytesIO(data) + if fmt == "png": + from . import PngImagePlugin + + return PngImagePlugin.PngImageFile(data) + elif fmt == "DIB": + from . import BmpImagePlugin + + return BmpImagePlugin.DibImageFile(data) + return None + else: + raise NotImplementedError("ImageGrab.grabclipboard() is macOS and Windows only") diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageMath.py b/.venv/lib/python3.9/site-packages/PIL/ImageMath.py new file mode 100644 index 00000000..7f9c88e1 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageMath.py @@ -0,0 +1,253 @@ +# +# The Python Imaging Library +# $Id$ +# +# a simple math add-on for the Python Imaging Library +# +# History: +# 1999-02-15 fl Original PIL Plus release +# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6 +# 2005-09-12 fl Fixed int() and float() for Python 2.4.1 +# +# Copyright (c) 1999-2005 by Secret Labs AB +# Copyright (c) 2005 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import builtins + +from . import Image, _imagingmath + +VERBOSE = 0 + + +def _isconstant(v): + return isinstance(v, (int, float)) + + +class _Operand: + """Wraps an image operand, providing standard operators""" + + def __init__(self, im): + self.im = im + + def __fixup(self, im1): + # convert image to suitable mode + if isinstance(im1, _Operand): + # argument was an image. + if im1.im.mode in ("1", "L"): + return im1.im.convert("I") + elif im1.im.mode in ("I", "F"): + return im1.im + else: + raise ValueError(f"unsupported mode: {im1.im.mode}") + else: + # argument was a constant + if _isconstant(im1) and self.im.mode in ("1", "L", "I"): + return Image.new("I", self.im.size, im1) + else: + return Image.new("F", self.im.size, im1) + + def apply(self, op, im1, im2=None, mode=None): + im1 = self.__fixup(im1) + if im2 is None: + # unary operation + out = Image.new(mode or im1.mode, im1.size, None) + im1.load() + try: + op = getattr(_imagingmath, op + "_" + im1.mode) + except AttributeError as e: + raise TypeError(f"bad operand type for '{op}'") from e + _imagingmath.unop(op, out.im.id, im1.im.id) + else: + # binary operation + im2 = self.__fixup(im2) + if im1.mode != im2.mode: + # convert both arguments to floating point + if im1.mode != "F": + im1 = im1.convert("F") + if im2.mode != "F": + im2 = im2.convert("F") + if im1.mode != im2.mode: + raise ValueError("mode mismatch") + if im1.size != im2.size: + # crop both arguments to a common size + size = (min(im1.size[0], im2.size[0]), min(im1.size[1], im2.size[1])) + if im1.size != size: + im1 = im1.crop((0, 0) + size) + if im2.size != size: + im2 = im2.crop((0, 0) + size) + out = Image.new(mode or im1.mode, size, None) + else: + out = Image.new(mode or im1.mode, im1.size, None) + im1.load() + im2.load() + try: + op = getattr(_imagingmath, op + "_" + im1.mode) + except AttributeError as e: + raise TypeError(f"bad operand type for '{op}'") from e + _imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id) + return _Operand(out) + + # unary operators + def __bool__(self): + # an image is "true" if it contains at least one non-zero pixel + return self.im.getbbox() is not None + + def __abs__(self): + return self.apply("abs", self) + + def __pos__(self): + return self + + def __neg__(self): + return self.apply("neg", self) + + # binary operators + def __add__(self, other): + return self.apply("add", self, other) + + def __radd__(self, other): + return self.apply("add", other, self) + + def __sub__(self, other): + return self.apply("sub", self, other) + + def __rsub__(self, other): + return self.apply("sub", other, self) + + def __mul__(self, other): + return self.apply("mul", self, other) + + def __rmul__(self, other): + return self.apply("mul", other, self) + + def __truediv__(self, other): + return self.apply("div", self, other) + + def __rtruediv__(self, other): + return self.apply("div", other, self) + + def __mod__(self, other): + return self.apply("mod", self, other) + + def __rmod__(self, other): + return self.apply("mod", other, self) + + def __pow__(self, other): + return self.apply("pow", self, other) + + def __rpow__(self, other): + return self.apply("pow", other, self) + + # bitwise + def __invert__(self): + return self.apply("invert", self) + + def __and__(self, other): + return self.apply("and", self, other) + + def __rand__(self, other): + return self.apply("and", other, self) + + def __or__(self, other): + return self.apply("or", self, other) + + def __ror__(self, other): + return self.apply("or", other, self) + + def __xor__(self, other): + return self.apply("xor", self, other) + + def __rxor__(self, other): + return self.apply("xor", other, self) + + def __lshift__(self, other): + return self.apply("lshift", self, other) + + def __rshift__(self, other): + return self.apply("rshift", self, other) + + # logical + def __eq__(self, other): + return self.apply("eq", self, other) + + def __ne__(self, other): + return self.apply("ne", self, other) + + def __lt__(self, other): + return self.apply("lt", self, other) + + def __le__(self, other): + return self.apply("le", self, other) + + def __gt__(self, other): + return self.apply("gt", self, other) + + def __ge__(self, other): + return self.apply("ge", self, other) + + +# conversions +def imagemath_int(self): + return _Operand(self.im.convert("I")) + + +def imagemath_float(self): + return _Operand(self.im.convert("F")) + + +# logical +def imagemath_equal(self, other): + return self.apply("eq", self, other, mode="I") + + +def imagemath_notequal(self, other): + return self.apply("ne", self, other, mode="I") + + +def imagemath_min(self, other): + return self.apply("min", self, other) + + +def imagemath_max(self, other): + return self.apply("max", self, other) + + +def imagemath_convert(self, mode): + return _Operand(self.im.convert(mode)) + + +ops = {} +for k, v in list(globals().items()): + if k[:10] == "imagemath_": + ops[k[10:]] = v + + +def eval(expression, _dict={}, **kw): + """ + Evaluates an image expression. + + :param expression: A string containing a Python-style expression. + :param options: Values to add to the evaluation context. You + can either use a dictionary, or one or more keyword + arguments. + :return: The evaluated expression. This is usually an image object, but can + also be an integer, a floating point value, or a pixel tuple, + depending on the expression. + """ + + # build execution namespace + args = ops.copy() + args.update(_dict) + args.update(kw) + for k, v in list(args.items()): + if hasattr(v, "im"): + args[k] = _Operand(v) + + out = builtins.eval(expression, args) + try: + return out.im + except AttributeError: + return out diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageMode.py b/.venv/lib/python3.9/site-packages/PIL/ImageMode.py new file mode 100644 index 00000000..0afcf9fe --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageMode.py @@ -0,0 +1,74 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard mode descriptors +# +# History: +# 2006-03-20 fl Added +# +# Copyright (c) 2006 by Secret Labs AB. +# Copyright (c) 2006 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +# mode descriptor cache +_modes = None + + +class ModeDescriptor: + """Wrapper for mode strings.""" + + def __init__(self, mode, bands, basemode, basetype): + self.mode = mode + self.bands = bands + self.basemode = basemode + self.basetype = basetype + + def __str__(self): + return self.mode + + +def getmode(mode): + """Gets a mode descriptor for the given mode.""" + global _modes + if not _modes: + # initialize mode cache + modes = {} + for m, (basemode, basetype, bands) in { + # core modes + "1": ("L", "L", ("1",)), + "L": ("L", "L", ("L",)), + "I": ("L", "I", ("I",)), + "F": ("L", "F", ("F",)), + "P": ("P", "L", ("P",)), + "RGB": ("RGB", "L", ("R", "G", "B")), + "RGBX": ("RGB", "L", ("R", "G", "B", "X")), + "RGBA": ("RGB", "L", ("R", "G", "B", "A")), + "CMYK": ("RGB", "L", ("C", "M", "Y", "K")), + "YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")), + "LAB": ("RGB", "L", ("L", "A", "B")), + "HSV": ("RGB", "L", ("H", "S", "V")), + # extra experimental modes + "RGBa": ("RGB", "L", ("R", "G", "B", "a")), + "LA": ("L", "L", ("L", "A")), + "La": ("L", "L", ("L", "a")), + "PA": ("RGB", "L", ("P", "A")), + }.items(): + modes[m] = ModeDescriptor(m, bands, basemode, basetype) + # mapping modes + for i16mode in ( + "I;16", + "I;16S", + "I;16L", + "I;16LS", + "I;16B", + "I;16BS", + "I;16N", + "I;16NS", + ): + modes[i16mode] = ModeDescriptor(i16mode, ("I",), "L", "L") + # set global mode cache atomically + _modes = modes + return _modes[mode] diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageMorph.py b/.venv/lib/python3.9/site-packages/PIL/ImageMorph.py new file mode 100644 index 00000000..fe008375 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageMorph.py @@ -0,0 +1,245 @@ +# A binary morphology add-on for the Python Imaging Library +# +# History: +# 2014-06-04 Initial version. +# +# Copyright (c) 2014 Dov Grobgeld + +import re + +from . import Image, _imagingmorph + +LUT_SIZE = 1 << 9 + +# fmt: off +ROTATION_MATRIX = [ + 6, 3, 0, + 7, 4, 1, + 8, 5, 2, +] +MIRROR_MATRIX = [ + 2, 1, 0, + 5, 4, 3, + 8, 7, 6, +] +# fmt: on + + +class LutBuilder: + """A class for building a MorphLut from a descriptive language + + The input patterns is a list of a strings sequences like these:: + + 4:(... + .1. + 111)->1 + + (whitespaces including linebreaks are ignored). The option 4 + describes a series of symmetry operations (in this case a + 4-rotation), the pattern is described by: + + - . or X - Ignore + - 1 - Pixel is on + - 0 - Pixel is off + + The result of the operation is described after "->" string. + + The default is to return the current pixel value, which is + returned if no other match is found. + + Operations: + + - 4 - 4 way rotation + - N - Negate + - 1 - Dummy op for no other operation (an op must always be given) + - M - Mirroring + + Example:: + + lb = LutBuilder(patterns = ["4:(... .1. 111)->1"]) + lut = lb.build_lut() + + """ + + def __init__(self, patterns=None, op_name=None): + if patterns is not None: + self.patterns = patterns + else: + self.patterns = [] + self.lut = None + if op_name is not None: + known_patterns = { + "corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"], + "dilation4": ["4:(... .0. .1.)->1"], + "dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"], + "erosion4": ["4:(... .1. .0.)->0"], + "erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"], + "edge": [ + "1:(... ... ...)->0", + "4:(.0. .1. ...)->1", + "4:(01. .1. ...)->1", + ], + } + if op_name not in known_patterns: + raise Exception("Unknown pattern " + op_name + "!") + + self.patterns = known_patterns[op_name] + + def add_patterns(self, patterns): + self.patterns += patterns + + def build_default_lut(self): + symbols = [0, 1] + m = 1 << 4 # pos of current pixel + self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE)) + + def get_lut(self): + return self.lut + + def _string_permute(self, pattern, permutation): + """string_permute takes a pattern and a permutation and returns the + string permuted according to the permutation list. + """ + assert len(permutation) == 9 + return "".join(pattern[p] for p in permutation) + + def _pattern_permute(self, basic_pattern, options, basic_result): + """pattern_permute takes a basic pattern and its result and clones + the pattern according to the modifications described in the $options + parameter. It returns a list of all cloned patterns.""" + patterns = [(basic_pattern, basic_result)] + + # rotations + if "4" in options: + res = patterns[-1][1] + for i in range(4): + patterns.append( + (self._string_permute(patterns[-1][0], ROTATION_MATRIX), res) + ) + # mirror + if "M" in options: + n = len(patterns) + for pattern, res in patterns[0:n]: + patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res)) + + # negate + if "N" in options: + n = len(patterns) + for pattern, res in patterns[0:n]: + # Swap 0 and 1 + pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1") + res = 1 - int(res) + patterns.append((pattern, res)) + + return patterns + + def build_lut(self): + """Compile all patterns into a morphology lut. + + TBD :Build based on (file) morphlut:modify_lut + """ + self.build_default_lut() + patterns = [] + + # Parse and create symmetries of the patterns strings + for p in self.patterns: + m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", "")) + if not m: + raise Exception('Syntax error in pattern "' + p + '"') + options = m.group(1) + pattern = m.group(2) + result = int(m.group(3)) + + # Get rid of spaces + pattern = pattern.replace(" ", "").replace("\n", "") + + patterns += self._pattern_permute(pattern, options, result) + + # compile the patterns into regular expressions for speed + for i, pattern in enumerate(patterns): + p = pattern[0].replace(".", "X").replace("X", "[01]") + p = re.compile(p) + patterns[i] = (p, pattern[1]) + + # Step through table and find patterns that match. + # Note that all the patterns are searched. The last one + # caught overrides + for i in range(LUT_SIZE): + # Build the bit pattern + bitpattern = bin(i)[2:] + bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1] + + for p, r in patterns: + if p.match(bitpattern): + self.lut[i] = [0, 1][r] + + return self.lut + + +class MorphOp: + """A class for binary morphological operators""" + + def __init__(self, lut=None, op_name=None, patterns=None): + """Create a binary morphological operator""" + self.lut = lut + if op_name is not None: + self.lut = LutBuilder(op_name=op_name).build_lut() + elif patterns is not None: + self.lut = LutBuilder(patterns=patterns).build_lut() + + def apply(self, image): + """Run a single morphological operation on an image + + Returns a tuple of the number of changed pixels and the + morphed image""" + if self.lut is None: + raise Exception("No operator loaded") + + if image.mode != "L": + raise ValueError("Image mode must be L") + outimage = Image.new(image.mode, image.size, None) + count = _imagingmorph.apply(bytes(self.lut), image.im.id, outimage.im.id) + return count, outimage + + def match(self, image): + """Get a list of coordinates matching the morphological operation on + an image. + + Returns a list of tuples of (x,y) coordinates + of all matching pixels. See :ref:`coordinate-system`.""" + if self.lut is None: + raise Exception("No operator loaded") + + if image.mode != "L": + raise ValueError("Image mode must be L") + return _imagingmorph.match(bytes(self.lut), image.im.id) + + def get_on_pixels(self, image): + """Get a list of all turned on pixels in a binary image + + Returns a list of tuples of (x,y) coordinates + of all matching pixels. See :ref:`coordinate-system`.""" + + if image.mode != "L": + raise ValueError("Image mode must be L") + return _imagingmorph.get_on_pixels(image.im.id) + + def load_lut(self, filename): + """Load an operator from an mrl file""" + with open(filename, "rb") as f: + self.lut = bytearray(f.read()) + + if len(self.lut) != LUT_SIZE: + self.lut = None + raise Exception("Wrong size operator file!") + + def save_lut(self, filename): + """Save an operator to an mrl file""" + if self.lut is None: + raise Exception("No operator loaded") + with open(filename, "wb") as f: + f.write(self.lut) + + def set_lut(self, lut): + """Set the lut from an external source""" + self.lut = lut diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageOps.py b/.venv/lib/python3.9/site-packages/PIL/ImageOps.py new file mode 100644 index 00000000..f0c932d3 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageOps.py @@ -0,0 +1,608 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard image operations +# +# History: +# 2001-10-20 fl Created +# 2001-10-23 fl Added autocontrast operator +# 2001-12-18 fl Added Kevin's fit operator +# 2004-03-14 fl Fixed potential division by zero in equalize +# 2005-05-05 fl Fixed equalize for low number of values +# +# Copyright (c) 2001-2004 by Secret Labs AB +# Copyright (c) 2001-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import functools +import operator +import re + +from . import Image + +# +# helpers + + +def _border(border): + if isinstance(border, tuple): + if len(border) == 2: + left, top = right, bottom = border + elif len(border) == 4: + left, top, right, bottom = border + else: + left = top = right = bottom = border + return left, top, right, bottom + + +def _color(color, mode): + if isinstance(color, str): + from . import ImageColor + + color = ImageColor.getcolor(color, mode) + return color + + +def _lut(image, lut): + if image.mode == "P": + # FIXME: apply to lookup table, not image data + raise NotImplementedError("mode P support coming soon") + elif image.mode in ("L", "RGB"): + if image.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return image.point(lut) + else: + raise OSError("not supported for this image mode") + + +# +# actions + + +def autocontrast(image, cutoff=0, ignore=None, mask=None, preserve_tone=False): + """ + Maximize (normalize) image contrast. This function calculates a + histogram of the input image (or mask region), removes ``cutoff`` percent of the + lightest and darkest pixels from the histogram, and remaps the image + so that the darkest pixel becomes black (0), and the lightest + becomes white (255). + + :param image: The image to process. + :param cutoff: The percent to cut off from the histogram on the low and + high ends. Either a tuple of (low, high), or a single + number for both. + :param ignore: The background pixel value (use None for no background). + :param mask: Histogram used in contrast operation is computed using pixels + within the mask. If no mask is given the entire image is used + for histogram computation. + :param preserve_tone: Preserve image tone in Photoshop-like style autocontrast. + + .. versionadded:: 8.2.0 + + :return: An image. + """ + if preserve_tone: + histogram = image.convert("L").histogram(mask) + else: + histogram = image.histogram(mask) + + lut = [] + for layer in range(0, len(histogram), 256): + h = histogram[layer : layer + 256] + if ignore is not None: + # get rid of outliers + try: + h[ignore] = 0 + except TypeError: + # assume sequence + for ix in ignore: + h[ix] = 0 + if cutoff: + # cut off pixels from both ends of the histogram + if not isinstance(cutoff, tuple): + cutoff = (cutoff, cutoff) + # get number of pixels + n = 0 + for ix in range(256): + n = n + h[ix] + # remove cutoff% pixels from the low end + cut = n * cutoff[0] // 100 + for lo in range(256): + if cut > h[lo]: + cut = cut - h[lo] + h[lo] = 0 + else: + h[lo] -= cut + cut = 0 + if cut <= 0: + break + # remove cutoff% samples from the high end + cut = n * cutoff[1] // 100 + for hi in range(255, -1, -1): + if cut > h[hi]: + cut = cut - h[hi] + h[hi] = 0 + else: + h[hi] -= cut + cut = 0 + if cut <= 0: + break + # find lowest/highest samples after preprocessing + for lo in range(256): + if h[lo]: + break + for hi in range(255, -1, -1): + if h[hi]: + break + if hi <= lo: + # don't bother + lut.extend(list(range(256))) + else: + scale = 255.0 / (hi - lo) + offset = -lo * scale + for ix in range(256): + ix = int(ix * scale + offset) + if ix < 0: + ix = 0 + elif ix > 255: + ix = 255 + lut.append(ix) + return _lut(image, lut) + + +def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoint=127): + """ + Colorize grayscale image. + This function calculates a color wedge which maps all black pixels in + the source image to the first color and all white pixels to the + second color. If ``mid`` is specified, it uses three-color mapping. + The ``black`` and ``white`` arguments should be RGB tuples or color names; + optionally you can use three-color mapping by also specifying ``mid``. + Mapping positions for any of the colors can be specified + (e.g. ``blackpoint``), where these parameters are the integer + value corresponding to where the corresponding color should be mapped. + These parameters must have logical order, such that + ``blackpoint <= midpoint <= whitepoint`` (if ``mid`` is specified). + + :param image: The image to colorize. + :param black: The color to use for black input pixels. + :param white: The color to use for white input pixels. + :param mid: The color to use for midtone input pixels. + :param blackpoint: an int value [0, 255] for the black mapping. + :param whitepoint: an int value [0, 255] for the white mapping. + :param midpoint: an int value [0, 255] for the midtone mapping. + :return: An image. + """ + + # Initial asserts + assert image.mode == "L" + if mid is None: + assert 0 <= blackpoint <= whitepoint <= 255 + else: + assert 0 <= blackpoint <= midpoint <= whitepoint <= 255 + + # Define colors from arguments + black = _color(black, "RGB") + white = _color(white, "RGB") + if mid is not None: + mid = _color(mid, "RGB") + + # Empty lists for the mapping + red = [] + green = [] + blue = [] + + # Create the low-end values + for i in range(0, blackpoint): + red.append(black[0]) + green.append(black[1]) + blue.append(black[2]) + + # Create the mapping (2-color) + if mid is None: + + range_map = range(0, whitepoint - blackpoint) + + for i in range_map: + red.append(black[0] + i * (white[0] - black[0]) // len(range_map)) + green.append(black[1] + i * (white[1] - black[1]) // len(range_map)) + blue.append(black[2] + i * (white[2] - black[2]) // len(range_map)) + + # Create the mapping (3-color) + else: + + range_map1 = range(0, midpoint - blackpoint) + range_map2 = range(0, whitepoint - midpoint) + + for i in range_map1: + red.append(black[0] + i * (mid[0] - black[0]) // len(range_map1)) + green.append(black[1] + i * (mid[1] - black[1]) // len(range_map1)) + blue.append(black[2] + i * (mid[2] - black[2]) // len(range_map1)) + for i in range_map2: + red.append(mid[0] + i * (white[0] - mid[0]) // len(range_map2)) + green.append(mid[1] + i * (white[1] - mid[1]) // len(range_map2)) + blue.append(mid[2] + i * (white[2] - mid[2]) // len(range_map2)) + + # Create the high-end values + for i in range(0, 256 - whitepoint): + red.append(white[0]) + green.append(white[1]) + blue.append(white[2]) + + # Return converted image + image = image.convert("RGB") + return _lut(image, red + green + blue) + + +def contain(image, size, method=Image.BICUBIC): + """ + Returns a resized version of the image, set to the maximum width and height + within the requested size, while maintaining the original aspect ratio. + + :param image: The image to resize and crop. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: Resampling method to use. Default is + :py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`. + :return: An image. + """ + + im_ratio = image.width / image.height + dest_ratio = size[0] / size[1] + + if im_ratio != dest_ratio: + if im_ratio > dest_ratio: + new_height = int(image.height / image.width * size[0]) + if new_height != size[1]: + size = (size[0], new_height) + else: + new_width = int(image.width / image.height * size[1]) + if new_width != size[0]: + size = (new_width, size[1]) + return image.resize(size, resample=method) + + +def pad(image, size, method=Image.BICUBIC, color=None, centering=(0.5, 0.5)): + """ + Returns a resized and padded version of the image, expanded to fill the + requested aspect ratio and size. + + :param image: The image to resize and crop. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: Resampling method to use. Default is + :py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`. + :param color: The background color of the padded image. + :param centering: Control the position of the original image within the + padded version. + + (0.5, 0.5) will keep the image centered + (0, 0) will keep the image aligned to the top left + (1, 1) will keep the image aligned to the bottom + right + :return: An image. + """ + + resized = contain(image, size, method) + if resized.size == size: + out = resized + else: + out = Image.new(image.mode, size, color) + if resized.width != size[0]: + x = int((size[0] - resized.width) * max(0, min(centering[0], 1))) + out.paste(resized, (x, 0)) + else: + y = int((size[1] - resized.height) * max(0, min(centering[1], 1))) + out.paste(resized, (0, y)) + return out + + +def crop(image, border=0): + """ + Remove border from image. The same amount of pixels are removed + from all four sides. This function works on all image modes. + + .. seealso:: :py:meth:`~PIL.Image.Image.crop` + + :param image: The image to crop. + :param border: The number of pixels to remove. + :return: An image. + """ + left, top, right, bottom = _border(border) + return image.crop((left, top, image.size[0] - right, image.size[1] - bottom)) + + +def scale(image, factor, resample=Image.BICUBIC): + """ + Returns a rescaled image by a specific factor given in parameter. + A factor greater than 1 expands the image, between 0 and 1 contracts the + image. + + :param image: The image to rescale. + :param factor: The expansion factor, as a float. + :param resample: Resampling method to use. Default is + :py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + if factor == 1: + return image.copy() + elif factor <= 0: + raise ValueError("the factor must be greater than 0") + else: + size = (round(factor * image.width), round(factor * image.height)) + return image.resize(size, resample) + + +def deform(image, deformer, resample=Image.BILINEAR): + """ + Deform the image. + + :param image: The image to deform. + :param deformer: A deformer object. Any object that implements a + ``getmesh`` method can be used. + :param resample: An optional resampling filter. Same values possible as + in the PIL.Image.transform function. + :return: An image. + """ + return image.transform(image.size, Image.MESH, deformer.getmesh(image), resample) + + +def equalize(image, mask=None): + """ + Equalize the image histogram. This function applies a non-linear + mapping to the input image, in order to create a uniform + distribution of grayscale values in the output image. + + :param image: The image to equalize. + :param mask: An optional mask. If given, only the pixels selected by + the mask are included in the analysis. + :return: An image. + """ + if image.mode == "P": + image = image.convert("RGB") + h = image.histogram(mask) + lut = [] + for b in range(0, len(h), 256): + histo = [_f for _f in h[b : b + 256] if _f] + if len(histo) <= 1: + lut.extend(list(range(256))) + else: + step = (functools.reduce(operator.add, histo) - histo[-1]) // 255 + if not step: + lut.extend(list(range(256))) + else: + n = step // 2 + for i in range(256): + lut.append(n // step) + n = n + h[i + b] + return _lut(image, lut) + + +def expand(image, border=0, fill=0): + """ + Add border to the image + + :param image: The image to expand. + :param border: Border width, in pixels. + :param fill: Pixel fill value (a color value). Default is 0 (black). + :return: An image. + """ + left, top, right, bottom = _border(border) + width = left + image.size[0] + right + height = top + image.size[1] + bottom + color = _color(fill, image.mode) + if image.mode == "P" and image.palette: + image.load() + palette = image.palette.copy() + if isinstance(color, tuple): + color = palette.getcolor(color) + else: + palette = None + out = Image.new(image.mode, (width, height), color) + if palette: + out.putpalette(palette.palette) + out.paste(image, (left, top)) + return out + + +def fit(image, size, method=Image.BICUBIC, bleed=0.0, centering=(0.5, 0.5)): + """ + Returns a resized and cropped version of the image, cropped to the + requested aspect ratio and size. + + This function was contributed by Kevin Cazabon. + + :param image: The image to resize and crop. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: Resampling method to use. Default is + :py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`. + :param bleed: Remove a border around the outside of the image from all + four edges. The value is a decimal percentage (use 0.01 for + one percent). The default value is 0 (no border). + Cannot be greater than or equal to 0.5. + :param centering: Control the cropping position. Use (0.5, 0.5) for + center cropping (e.g. if cropping the width, take 50% off + of the left side, and therefore 50% off the right side). + (0.0, 0.0) will crop from the top left corner (i.e. if + cropping the width, take all of the crop off of the right + side, and if cropping the height, take all of it off the + bottom). (1.0, 0.0) will crop from the bottom left + corner, etc. (i.e. if cropping the width, take all of the + crop off the left side, and if cropping the height take + none from the top, and therefore all off the bottom). + :return: An image. + """ + + # by Kevin Cazabon, Feb 17/2000 + # kevin@cazabon.com + # http://www.cazabon.com + + # ensure centering is mutable + centering = list(centering) + + if not 0.0 <= centering[0] <= 1.0: + centering[0] = 0.5 + if not 0.0 <= centering[1] <= 1.0: + centering[1] = 0.5 + + if not 0.0 <= bleed < 0.5: + bleed = 0.0 + + # calculate the area to use for resizing and cropping, subtracting + # the 'bleed' around the edges + + # number of pixels to trim off on Top and Bottom, Left and Right + bleed_pixels = (bleed * image.size[0], bleed * image.size[1]) + + live_size = ( + image.size[0] - bleed_pixels[0] * 2, + image.size[1] - bleed_pixels[1] * 2, + ) + + # calculate the aspect ratio of the live_size + live_size_ratio = live_size[0] / live_size[1] + + # calculate the aspect ratio of the output image + output_ratio = size[0] / size[1] + + # figure out if the sides or top/bottom will be cropped off + if live_size_ratio == output_ratio: + # live_size is already the needed ratio + crop_width = live_size[0] + crop_height = live_size[1] + elif live_size_ratio >= output_ratio: + # live_size is wider than what's needed, crop the sides + crop_width = output_ratio * live_size[1] + crop_height = live_size[1] + else: + # live_size is taller than what's needed, crop the top and bottom + crop_width = live_size[0] + crop_height = live_size[0] / output_ratio + + # make the crop + crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering[0] + crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering[1] + + crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height) + + # resize the image and return it + return image.resize(size, method, box=crop) + + +def flip(image): + """ + Flip the image vertically (top to bottom). + + :param image: The image to flip. + :return: An image. + """ + return image.transpose(Image.FLIP_TOP_BOTTOM) + + +def grayscale(image): + """ + Convert the image to grayscale. + + :param image: The image to convert. + :return: An image. + """ + return image.convert("L") + + +def invert(image): + """ + Invert (negate) the image. + + :param image: The image to invert. + :return: An image. + """ + lut = [] + for i in range(256): + lut.append(255 - i) + return _lut(image, lut) + + +def mirror(image): + """ + Flip image horizontally (left to right). + + :param image: The image to mirror. + :return: An image. + """ + return image.transpose(Image.FLIP_LEFT_RIGHT) + + +def posterize(image, bits): + """ + Reduce the number of bits for each color channel. + + :param image: The image to posterize. + :param bits: The number of bits to keep for each channel (1-8). + :return: An image. + """ + lut = [] + mask = ~(2 ** (8 - bits) - 1) + for i in range(256): + lut.append(i & mask) + return _lut(image, lut) + + +def solarize(image, threshold=128): + """ + Invert all pixel values above a threshold. + + :param image: The image to solarize. + :param threshold: All pixels above this greyscale level are inverted. + :return: An image. + """ + lut = [] + for i in range(256): + if i < threshold: + lut.append(i) + else: + lut.append(255 - i) + return _lut(image, lut) + + +def exif_transpose(image): + """ + If an image has an EXIF Orientation tag, return a new image that is + transposed accordingly. Otherwise, return a copy of the image. + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112) + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + if method is not None: + transposed_image = image.transpose(method) + transposed_exif = transposed_image.getexif() + if 0x0112 in transposed_exif: + del transposed_exif[0x0112] + if "exif" in transposed_image.info: + transposed_image.info["exif"] = transposed_exif.tobytes() + elif "Raw profile type exif" in transposed_image.info: + transposed_image.info[ + "Raw profile type exif" + ] = transposed_exif.tobytes().hex() + elif "XML:com.adobe.xmp" in transposed_image.info: + transposed_image.info["XML:com.adobe.xmp"] = re.sub( + r'tiff:Orientation="([0-9])"', + "", + transposed_image.info["XML:com.adobe.xmp"], + ) + return transposed_image + return image.copy() diff --git a/.venv/lib/python3.9/site-packages/PIL/ImagePalette.py b/.venv/lib/python3.9/site-packages/PIL/ImagePalette.py new file mode 100644 index 00000000..36826bdf --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImagePalette.py @@ -0,0 +1,261 @@ +# +# The Python Imaging Library. +# $Id$ +# +# image palette object +# +# History: +# 1996-03-11 fl Rewritten. +# 1997-01-03 fl Up and running. +# 1997-08-23 fl Added load hack +# 2001-04-16 fl Fixed randint shadow bug in random() +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import array +import warnings + +from . import GimpGradientFile, GimpPaletteFile, ImageColor, PaletteFile + + +class ImagePalette: + """ + Color palette for palette mapped images + + :param mode: The mode to use for the palette. See: + :ref:`concept-modes`. Defaults to "RGB" + :param palette: An optional palette. If given, it must be a bytearray, + an array or a list of ints between 0-255. The list must consist of + all channels for one color followed by the next color (e.g. RGBRGBRGB). + Defaults to an empty palette. + :param size: An optional palette size. If given, an error is raised + if ``palette`` is not of equal length. + """ + + def __init__(self, mode="RGB", palette=None, size=0): + self.mode = mode + self.rawmode = None # if set, palette contains raw data + self.palette = palette or bytearray() + self.dirty = None + if size != 0: + warnings.warn( + "The size parameter is deprecated and will be removed in Pillow 10 " + "(2023-01-02).", + DeprecationWarning, + ) + if size != len(self.palette): + raise ValueError("wrong palette size") + + @property + def palette(self): + return self._palette + + @palette.setter + def palette(self, palette): + self._palette = palette + + mode_len = len(self.mode) + self.colors = {} + for i in range(0, len(self.palette), mode_len): + color = tuple(self.palette[i : i + mode_len]) + if color in self.colors: + continue + self.colors[color] = i // mode_len + + def copy(self): + new = ImagePalette() + + new.mode = self.mode + new.rawmode = self.rawmode + if self.palette is not None: + new.palette = self.palette[:] + new.dirty = self.dirty + + return new + + def getdata(self): + """ + Get palette contents in format suitable for the low-level + ``im.putpalette`` primitive. + + .. warning:: This method is experimental. + """ + if self.rawmode: + return self.rawmode, self.palette + return self.mode, self.tobytes() + + def tobytes(self): + """Convert palette to bytes. + + .. warning:: This method is experimental. + """ + if self.rawmode: + raise ValueError("palette contains raw palette data") + if isinstance(self.palette, bytes): + return self.palette + arr = array.array("B", self.palette) + return arr.tobytes() + + # Declare tostring as an alias for tobytes + tostring = tobytes + + def getcolor(self, color, image=None): + """Given an rgb tuple, allocate palette entry. + + .. warning:: This method is experimental. + """ + if self.rawmode: + raise ValueError("palette contains raw palette data") + if isinstance(color, tuple): + if self.mode == "RGB": + if len(color) == 4 and color[3] == 255: + color = color[:3] + elif self.mode == "RGBA": + if len(color) == 3: + color += (255,) + try: + return self.colors[color] + except KeyError as e: + # allocate new color slot + if not isinstance(self.palette, bytearray): + self._palette = bytearray(self.palette) + index = len(self.palette) // 3 + special_colors = () + if image: + special_colors = ( + image.info.get("background"), + image.info.get("transparency"), + ) + while index in special_colors: + index += 1 + if index >= 256: + if image: + # Search for an unused index + for i, count in reversed(list(enumerate(image.histogram()))): + if count == 0 and i not in special_colors: + index = i + break + if index >= 256: + raise ValueError("cannot allocate more than 256 colors") from e + self.colors[color] = index + if index * 3 < len(self.palette): + self._palette = ( + self.palette[: index * 3] + + bytes(color) + + self.palette[index * 3 + 3 :] + ) + else: + self._palette += bytes(color) + self.dirty = 1 + return index + else: + raise ValueError(f"unknown color specifier: {repr(color)}") + + def save(self, fp): + """Save palette to text file. + + .. warning:: This method is experimental. + """ + if self.rawmode: + raise ValueError("palette contains raw palette data") + if isinstance(fp, str): + fp = open(fp, "w") + fp.write("# Palette\n") + fp.write(f"# Mode: {self.mode}\n") + for i in range(256): + fp.write(f"{i}") + for j in range(i * len(self.mode), (i + 1) * len(self.mode)): + try: + fp.write(f" {self.palette[j]}") + except IndexError: + fp.write(" 0") + fp.write("\n") + fp.close() + + +# -------------------------------------------------------------------- +# Internal + + +def raw(rawmode, data): + palette = ImagePalette() + palette.rawmode = rawmode + palette.palette = data + palette.dirty = 1 + return palette + + +# -------------------------------------------------------------------- +# Factories + + +def make_linear_lut(black, white): + lut = [] + if black == 0: + for i in range(256): + lut.append(white * i // 255) + else: + raise NotImplementedError # FIXME + return lut + + +def make_gamma_lut(exp): + lut = [] + for i in range(256): + lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5)) + return lut + + +def negative(mode="RGB"): + palette = list(range(256 * len(mode))) + palette.reverse() + return ImagePalette(mode, [i // len(mode) for i in palette]) + + +def random(mode="RGB"): + from random import randint + + palette = [] + for i in range(256 * len(mode)): + palette.append(randint(0, 255)) + return ImagePalette(mode, palette) + + +def sepia(white="#fff0c0"): + bands = [make_linear_lut(0, band) for band in ImageColor.getrgb(white)] + return ImagePalette("RGB", [bands[i % 3][i // 3] for i in range(256 * 3)]) + + +def wedge(mode="RGB"): + palette = list(range(256 * len(mode))) + return ImagePalette(mode, [i // len(mode) for i in palette]) + + +def load(filename): + + # FIXME: supports GIMP gradients only + + with open(filename, "rb") as fp: + + for paletteHandler in [ + GimpPaletteFile.GimpPaletteFile, + GimpGradientFile.GimpGradientFile, + PaletteFile.PaletteFile, + ]: + try: + fp.seek(0) + lut = paletteHandler(fp).getpalette() + if lut: + break + except (SyntaxError, ValueError): + # import traceback + # traceback.print_exc() + pass + else: + raise OSError("cannot load palette") + + return lut # data, rawmode diff --git a/.venv/lib/python3.9/site-packages/PIL/ImagePath.py b/.venv/lib/python3.9/site-packages/PIL/ImagePath.py new file mode 100644 index 00000000..3d3538c9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImagePath.py @@ -0,0 +1,19 @@ +# +# The Python Imaging Library +# $Id$ +# +# path interface +# +# History: +# 1996-11-04 fl Created +# 2002-04-14 fl Added documentation stub class +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +from . import Image + +Path = Image.core.path diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageQt.py b/.venv/lib/python3.9/site-packages/PIL/ImageQt.py new file mode 100644 index 00000000..32630f2c --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageQt.py @@ -0,0 +1,213 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a simple Qt image interface. +# +# history: +# 2006-06-03 fl: created +# 2006-06-04 fl: inherit from QImage instead of wrapping it +# 2006-06-05 fl: removed toimage helper; move string support to ImageQt +# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com) +# +# Copyright (c) 2006 by Secret Labs AB +# Copyright (c) 2006 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import sys +from io import BytesIO + +from . import Image +from ._util import isPath + +qt_versions = [ + ["6", "PyQt6"], + ["side6", "PySide6"], + ["5", "PyQt5"], + ["side2", "PySide2"], +] + +# If a version has already been imported, attempt it first +qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True) +for qt_version, qt_module in qt_versions: + try: + if qt_module == "PyQt6": + from PyQt6.QtCore import QBuffer, QIODevice + from PyQt6.QtGui import QImage, QPixmap, qRgba + elif qt_module == "PySide6": + from PySide6.QtCore import QBuffer, QIODevice + from PySide6.QtGui import QImage, QPixmap, qRgba + elif qt_module == "PyQt5": + from PyQt5.QtCore import QBuffer, QIODevice + from PyQt5.QtGui import QImage, QPixmap, qRgba + elif qt_module == "PySide2": + from PySide2.QtCore import QBuffer, QIODevice + from PySide2.QtGui import QImage, QPixmap, qRgba + except (ImportError, RuntimeError): + continue + qt_is_installed = True + break +else: + qt_is_installed = False + qt_version = None + + +def rgb(r, g, b, a=255): + """(Internal) Turns an RGB color into a Qt compatible color integer.""" + # use qRgb to pack the colors, and then turn the resulting long + # into a negative integer with the same bitpattern. + return qRgba(r, g, b, a) & 0xFFFFFFFF + + +def fromqimage(im): + """ + :param im: QImage or PIL ImageQt object + """ + buffer = QBuffer() + qt_openmode = QIODevice.OpenMode if qt_version == "6" else QIODevice + buffer.open(qt_openmode.ReadWrite) + # preserve alpha channel with png + # otherwise ppm is more friendly with Image.open + if im.hasAlphaChannel(): + im.save(buffer, "png") + else: + im.save(buffer, "ppm") + + b = BytesIO() + b.write(buffer.data()) + buffer.close() + b.seek(0) + + return Image.open(b) + + +def fromqpixmap(im): + return fromqimage(im) + # buffer = QBuffer() + # buffer.open(QIODevice.ReadWrite) + # # im.save(buffer) + # # What if png doesn't support some image features like animation? + # im.save(buffer, 'ppm') + # bytes_io = BytesIO() + # bytes_io.write(buffer.data()) + # buffer.close() + # bytes_io.seek(0) + # return Image.open(bytes_io) + + +def align8to32(bytes, width, mode): + """ + converts each scanline of data from 8 bit to 32 bit aligned + """ + + bits_per_pixel = {"1": 1, "L": 8, "P": 8}[mode] + + # calculate bytes per line and the extra padding if needed + bits_per_line = bits_per_pixel * width + full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8) + bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0) + + extra_padding = -bytes_per_line % 4 + + # already 32 bit aligned by luck + if not extra_padding: + return bytes + + new_data = [] + for i in range(len(bytes) // bytes_per_line): + new_data.append( + bytes[i * bytes_per_line : (i + 1) * bytes_per_line] + + b"\x00" * extra_padding + ) + + return b"".join(new_data) + + +def _toqclass_helper(im): + data = None + colortable = None + exclusive_fp = False + + # handle filename, if given instead of image name + if hasattr(im, "toUtf8"): + # FIXME - is this really the best way to do this? + im = str(im.toUtf8(), "utf-8") + if isPath(im): + im = Image.open(im) + exclusive_fp = True + + qt_format = QImage.Format if qt_version == "6" else QImage + if im.mode == "1": + format = qt_format.Format_Mono + elif im.mode == "L": + format = qt_format.Format_Indexed8 + colortable = [] + for i in range(256): + colortable.append(rgb(i, i, i)) + elif im.mode == "P": + format = qt_format.Format_Indexed8 + colortable = [] + palette = im.getpalette() + for i in range(0, len(palette), 3): + colortable.append(rgb(*palette[i : i + 3])) + elif im.mode == "RGB": + # Populate the 4th channel with 255 + im = im.convert("RGBA") + + data = im.tobytes("raw", "BGRA") + format = qt_format.Format_RGB32 + elif im.mode == "RGBA": + data = im.tobytes("raw", "BGRA") + format = qt_format.Format_ARGB32 + else: + if exclusive_fp: + im.close() + raise ValueError(f"unsupported image mode {repr(im.mode)}") + + size = im.size + __data = data or align8to32(im.tobytes(), size[0], im.mode) + if exclusive_fp: + im.close() + return {"data": __data, "size": size, "format": format, "colortable": colortable} + + +if qt_is_installed: + + class ImageQt(QImage): + def __init__(self, im): + """ + An PIL image wrapper for Qt. This is a subclass of PyQt's QImage + class. + + :param im: A PIL Image object, or a file name (given either as + Python string or a PyQt string object). + """ + im_data = _toqclass_helper(im) + # must keep a reference, or Qt will crash! + # All QImage constructors that take data operate on an existing + # buffer, so this buffer has to hang on for the life of the image. + # Fixes https://github.com/python-pillow/Pillow/issues/1370 + self.__data = im_data["data"] + super().__init__( + self.__data, + im_data["size"][0], + im_data["size"][1], + im_data["format"], + ) + if im_data["colortable"]: + self.setColorTable(im_data["colortable"]) + + +def toqimage(im): + return ImageQt(im) + + +def toqpixmap(im): + # # This doesn't work. For now using a dumb approach. + # im_data = _toqclass_helper(im) + # result = QPixmap(im_data["size"][0], im_data["size"][1]) + # result.loadFromData(im_data["data"]) + qimage = toqimage(im) + return QPixmap.fromImage(qimage) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageSequence.py b/.venv/lib/python3.9/site-packages/PIL/ImageSequence.py new file mode 100644 index 00000000..9df910a4 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageSequence.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library. +# $Id$ +# +# sequence support classes +# +# history: +# 1997-02-20 fl Created +# +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1997 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +## + + +class Iterator: + """ + This class implements an iterator object that can be used to loop + over an image sequence. + + You can use the ``[]`` operator to access elements by index. This operator + will raise an :py:exc:`IndexError` if you try to access a nonexistent + frame. + + :param im: An image object. + """ + + def __init__(self, im): + if not hasattr(im, "seek"): + raise AttributeError("im must have seek method") + self.im = im + self.position = getattr(self.im, "_min_frame", 0) + + def __getitem__(self, ix): + try: + self.im.seek(ix) + return self.im + except EOFError as e: + raise IndexError from e # end of sequence + + def __iter__(self): + return self + + def __next__(self): + try: + self.im.seek(self.position) + self.position += 1 + return self.im + except EOFError as e: + raise StopIteration from e + + +def all_frames(im, func=None): + """ + Applies a given function to all frames in an image or a list of images. + The frames are returned as a list of separate images. + + :param im: An image, or a list of images. + :param func: The function to apply to all of the image frames. + :returns: A list of images. + """ + if not isinstance(im, list): + im = [im] + + ims = [] + for imSequence in im: + current = imSequence.tell() + + ims += [im_frame.copy() for im_frame in Iterator(imSequence)] + + imSequence.seek(current) + return [func(im) for im in ims] if func else ims diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageShow.py b/.venv/lib/python3.9/site-packages/PIL/ImageShow.py new file mode 100644 index 00000000..60c97542 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageShow.py @@ -0,0 +1,264 @@ +# +# The Python Imaging Library. +# $Id$ +# +# im.show() drivers +# +# History: +# 2008-04-06 fl Created +# +# Copyright (c) Secret Labs AB 2008. +# +# See the README file for information on usage and redistribution. +# +import os +import shutil +import subprocess +import sys +import tempfile +from shlex import quote + +from PIL import Image + +_viewers = [] + + +def register(viewer, order=1): + """ + The :py:func:`register` function is used to register additional viewers. + + :param viewer: The viewer to be registered. + :param order: + Zero or a negative integer to prepend this viewer to the list, + a positive integer to append it. + """ + try: + if issubclass(viewer, Viewer): + viewer = viewer() + except TypeError: + pass # raised if viewer wasn't a class + if order > 0: + _viewers.append(viewer) + else: + _viewers.insert(0, viewer) + + +def show(image, title=None, **options): + r""" + Display a given image. + + :param image: An image object. + :param title: Optional title. Not all viewers can display the title. + :param \**options: Additional viewer options. + :returns: ``True`` if a suitable viewer was found, ``False`` otherwise. + """ + for viewer in _viewers: + if viewer.show(image, title=title, **options): + return 1 + return 0 + + +class Viewer: + """Base class for viewers.""" + + # main api + + def show(self, image, **options): + """ + The main function for displaying an image. + Converts the given image to the target format and displays it. + """ + + if not ( + image.mode in ("1", "RGBA") + or (self.format == "PNG" and image.mode in ("I;16", "LA")) + ): + base = Image.getmodebase(image.mode) + if image.mode != base: + image = image.convert(base) + + return self.show_image(image, **options) + + # hook methods + + format = None + """The format to convert the image into.""" + options = {} + """Additional options used to convert the image.""" + + def get_format(self, image): + """Return format name, or ``None`` to save as PGM/PPM.""" + return self.format + + def get_command(self, file, **options): + """ + Returns the command used to display the file. + Not implemented in the base class. + """ + raise NotImplementedError + + def save_image(self, image): + """Save to temporary file and return filename.""" + return image._dump(format=self.get_format(image), **self.options) + + def show_image(self, image, **options): + """Display the given image.""" + return self.show_file(self.save_image(image), **options) + + def show_file(self, file, **options): + """Display the given file.""" + os.system(self.get_command(file, **options)) + return 1 + + +# -------------------------------------------------------------------- + + +class WindowsViewer(Viewer): + """The default viewer on Windows is the default system application for PNG files.""" + + format = "PNG" + options = {"compress_level": 1} + + def get_command(self, file, **options): + return ( + f'start "Pillow" /WAIT "{file}" ' + "&& ping -n 2 127.0.0.1 >NUL " + f'&& del /f "{file}"' + ) + + +if sys.platform == "win32": + register(WindowsViewer) + + +class MacViewer(Viewer): + """The default viewer on macOS using ``Preview.app``.""" + + format = "PNG" + options = {"compress_level": 1} + + def get_command(self, file, **options): + # on darwin open returns immediately resulting in the temp + # file removal while app is opening + command = "open -a Preview.app" + command = f"({command} {quote(file)}; sleep 20; rm -f {quote(file)})&" + return command + + def show_file(self, file, **options): + """Display given file""" + fd, path = tempfile.mkstemp() + with os.fdopen(fd, "w") as f: + f.write(file) + with open(path) as f: + subprocess.Popen( + ["im=$(cat); open -a Preview.app $im; sleep 20; rm -f $im"], + shell=True, + stdin=f, + ) + os.remove(path) + return 1 + + +if sys.platform == "darwin": + register(MacViewer) + + +class UnixViewer(Viewer): + format = "PNG" + options = {"compress_level": 1} + + def get_command(self, file, **options): + command = self.get_command_ex(file, **options)[0] + return f"({command} {quote(file)}; rm -f {quote(file)})&" + + def show_file(self, file, **options): + """Display given file""" + fd, path = tempfile.mkstemp() + with os.fdopen(fd, "w") as f: + f.write(file) + with open(path) as f: + command = self.get_command_ex(file, **options)[0] + subprocess.Popen( + ["im=$(cat);" + command + " $im; rm -f $im"], shell=True, stdin=f + ) + os.remove(path) + return 1 + + +class DisplayViewer(UnixViewer): + """The ImageMagick ``display`` command.""" + + def get_command_ex(self, file, **options): + command = executable = "display" + return command, executable + + +class GmDisplayViewer(UnixViewer): + """The GraphicsMagick ``gm display`` command.""" + + def get_command_ex(self, file, **options): + executable = "gm" + command = "gm display" + return command, executable + + +class EogViewer(UnixViewer): + """The GNOME Image Viewer ``eog`` command.""" + + def get_command_ex(self, file, **options): + executable = "eog" + command = "eog -n" + return command, executable + + +class XVViewer(UnixViewer): + """ + The X Viewer ``xv`` command. + This viewer supports the ``title`` parameter. + """ + + def get_command_ex(self, file, title=None, **options): + # note: xv is pretty outdated. most modern systems have + # imagemagick's display command instead. + command = executable = "xv" + if title: + command += f" -name {quote(title)}" + return command, executable + + +if sys.platform not in ("win32", "darwin"): # unixoids + if shutil.which("display"): + register(DisplayViewer) + if shutil.which("gm"): + register(GmDisplayViewer) + if shutil.which("eog"): + register(EogViewer) + if shutil.which("xv"): + register(XVViewer) + + +class IPythonViewer(Viewer): + """The viewer for IPython frontends.""" + + def show_image(self, image, **options): + ipython_display(image) + return 1 + + +try: + from IPython.display import display as ipython_display +except ImportError: + pass +else: + register(IPythonViewer) + + +if __name__ == "__main__": + + if len(sys.argv) < 2: + print("Syntax: python3 ImageShow.py imagefile [title]") + sys.exit() + + with Image.open(sys.argv[1]) as im: + print(show(im, *sys.argv[2:])) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageStat.py b/.venv/lib/python3.9/site-packages/PIL/ImageStat.py new file mode 100644 index 00000000..50bafc97 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageStat.py @@ -0,0 +1,147 @@ +# +# The Python Imaging Library. +# $Id$ +# +# global image statistics +# +# History: +# 1996-04-05 fl Created +# 1997-05-21 fl Added mask; added rms, var, stddev attributes +# 1997-08-05 fl Added median +# 1998-07-05 hk Fixed integer overflow error +# +# Notes: +# This class shows how to implement delayed evaluation of attributes. +# To get a certain value, simply access the corresponding attribute. +# The __getattr__ dispatcher takes care of the rest. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996-97. +# +# See the README file for information on usage and redistribution. +# + +import functools +import math +import operator + + +class Stat: + def __init__(self, image_or_list, mask=None): + try: + if mask: + self.h = image_or_list.histogram(mask) + else: + self.h = image_or_list.histogram() + except AttributeError: + self.h = image_or_list # assume it to be a histogram list + if not isinstance(self.h, list): + raise TypeError("first argument must be image or list") + self.bands = list(range(len(self.h) // 256)) + + def __getattr__(self, id): + """Calculate missing attribute""" + if id[:4] == "_get": + raise AttributeError(id) + # calculate missing attribute + v = getattr(self, "_get" + id)() + setattr(self, id, v) + return v + + def _getextrema(self): + """Get min/max values for each band in the image""" + + def minmax(histogram): + n = 255 + x = 0 + for i in range(256): + if histogram[i]: + n = min(n, i) + x = max(x, i) + return n, x # returns (255, 0) if there's no data in the histogram + + v = [] + for i in range(0, len(self.h), 256): + v.append(minmax(self.h[i:])) + return v + + def _getcount(self): + """Get total number of pixels in each layer""" + + v = [] + for i in range(0, len(self.h), 256): + v.append(functools.reduce(operator.add, self.h[i : i + 256])) + return v + + def _getsum(self): + """Get sum of all pixels in each layer""" + + v = [] + for i in range(0, len(self.h), 256): + layerSum = 0.0 + for j in range(256): + layerSum += j * self.h[i + j] + v.append(layerSum) + return v + + def _getsum2(self): + """Get squared sum of all pixels in each layer""" + + v = [] + for i in range(0, len(self.h), 256): + sum2 = 0.0 + for j in range(256): + sum2 += (j ** 2) * float(self.h[i + j]) + v.append(sum2) + return v + + def _getmean(self): + """Get average pixel level for each layer""" + + v = [] + for i in self.bands: + v.append(self.sum[i] / self.count[i]) + return v + + def _getmedian(self): + """Get median pixel level for each layer""" + + v = [] + for i in self.bands: + s = 0 + half = self.count[i] // 2 + b = i * 256 + for j in range(256): + s = s + self.h[b + j] + if s > half: + break + v.append(j) + return v + + def _getrms(self): + """Get RMS for each layer""" + + v = [] + for i in self.bands: + v.append(math.sqrt(self.sum2[i] / self.count[i])) + return v + + def _getvar(self): + """Get variance for each layer""" + + v = [] + for i in self.bands: + n = self.count[i] + v.append((self.sum2[i] - (self.sum[i] ** 2.0) / n) / n) + return v + + def _getstddev(self): + """Get standard deviation for each layer""" + + v = [] + for i in self.bands: + v.append(math.sqrt(self.var[i])) + return v + + +Global = Stat # compatibility diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageTk.py b/.venv/lib/python3.9/site-packages/PIL/ImageTk.py new file mode 100644 index 00000000..62db7a71 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageTk.py @@ -0,0 +1,300 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a Tk display interface +# +# History: +# 96-04-08 fl Created +# 96-09-06 fl Added getimage method +# 96-11-01 fl Rewritten, removed image attribute and crop method +# 97-05-09 fl Use PyImagingPaste method instead of image type +# 97-05-12 fl Minor tweaks to match the IFUNC95 interface +# 97-05-17 fl Support the "pilbitmap" booster patch +# 97-06-05 fl Added file= and data= argument to image constructors +# 98-03-09 fl Added width and height methods to Image classes +# 98-07-02 fl Use default mode for "P" images without palette attribute +# 98-07-02 fl Explicitly destroy Tkinter image objects +# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch) +# 99-07-26 fl Automatically hook into Tkinter (if possible) +# 99-08-15 fl Hook uses _imagingtk instead of _imaging +# +# Copyright (c) 1997-1999 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import tkinter +from io import BytesIO + +from . import Image + +# -------------------------------------------------------------------- +# Check for Tkinter interface hooks + +_pilbitmap_ok = None + + +def _pilbitmap_check(): + global _pilbitmap_ok + if _pilbitmap_ok is None: + try: + im = Image.new("1", (1, 1)) + tkinter.BitmapImage(data=f"PIL:{im.im.id}") + _pilbitmap_ok = 1 + except tkinter.TclError: + _pilbitmap_ok = 0 + return _pilbitmap_ok + + +def _get_image_from_kw(kw): + source = None + if "file" in kw: + source = kw.pop("file") + elif "data" in kw: + source = BytesIO(kw.pop("data")) + if source: + return Image.open(source) + + +# -------------------------------------------------------------------- +# PhotoImage + + +class PhotoImage: + """ + A Tkinter-compatible photo image. This can be used + everywhere Tkinter expects an image object. If the image is an RGBA + image, pixels having alpha 0 are treated as transparent. + + The constructor takes either a PIL image, or a mode and a size. + Alternatively, you can use the ``file`` or ``data`` options to initialize + the photo image object. + + :param image: Either a PIL image, or a mode string. If a mode string is + used, a size must also be given. + :param size: If the first argument is a mode string, this defines the size + of the image. + :keyword file: A filename to load the image from (using + ``Image.open(file)``). + :keyword data: An 8-bit string containing image data (as loaded from an + image file). + """ + + def __init__(self, image=None, size=None, **kw): + + # Tk compatibility: file or data + if image is None: + image = _get_image_from_kw(kw) + + if hasattr(image, "mode") and hasattr(image, "size"): + # got an image instead of a mode + mode = image.mode + if mode == "P": + # palette mapped data + image.load() + try: + mode = image.palette.mode + except AttributeError: + mode = "RGB" # default + size = image.size + kw["width"], kw["height"] = size + else: + mode = image + image = None + + if mode not in ["1", "L", "RGB", "RGBA"]: + mode = Image.getmodebase(mode) + + self.__mode = mode + self.__size = size + self.__photo = tkinter.PhotoImage(**kw) + self.tk = self.__photo.tk + if image: + self.paste(image) + + def __del__(self): + name = self.__photo.name + self.__photo.name = None + try: + self.__photo.tk.call("image", "delete", name) + except Exception: + pass # ignore internal errors + + def __str__(self): + """ + Get the Tkinter photo image identifier. This method is automatically + called by Tkinter whenever a PhotoImage object is passed to a Tkinter + method. + + :return: A Tkinter photo image identifier (a string). + """ + return str(self.__photo) + + def width(self): + """ + Get the width of the image. + + :return: The width, in pixels. + """ + return self.__size[0] + + def height(self): + """ + Get the height of the image. + + :return: The height, in pixels. + """ + return self.__size[1] + + def paste(self, im, box=None): + """ + Paste a PIL image into the photo image. Note that this can + be very slow if the photo image is displayed. + + :param im: A PIL image. The size must match the target region. If the + mode does not match, the image is converted to the mode of + the bitmap image. + :param box: A 4-tuple defining the left, upper, right, and lower pixel + coordinate. See :ref:`coordinate-system`. If None is given + instead of a tuple, all of the image is assumed. + """ + + # convert to blittable + im.load() + image = im.im + if image.isblock() and im.mode == self.__mode: + block = image + else: + block = image.new_block(self.__mode, im.size) + image.convert2(block, image) # convert directly between buffers + + tk = self.__photo.tk + + try: + tk.call("PyImagingPhoto", self.__photo, block.id) + except tkinter.TclError: + # activate Tkinter hook + try: + from . import _imagingtk + + try: + if hasattr(tk, "interp"): + # Required for PyPy, which always has CFFI installed + from cffi import FFI + + ffi = FFI() + + # PyPy is using an FFI CDATA element + # (Pdb) self.tk.interp + # + _imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1) + else: + _imagingtk.tkinit(tk.interpaddr(), 1) + except AttributeError: + _imagingtk.tkinit(id(tk), 0) + tk.call("PyImagingPhoto", self.__photo, block.id) + except (ImportError, AttributeError, tkinter.TclError): + raise # configuration problem; cannot attach to Tkinter + + +# -------------------------------------------------------------------- +# BitmapImage + + +class BitmapImage: + """ + A Tkinter-compatible bitmap image. This can be used everywhere Tkinter + expects an image object. + + The given image must have mode "1". Pixels having value 0 are treated as + transparent. Options, if any, are passed on to Tkinter. The most commonly + used option is ``foreground``, which is used to specify the color for the + non-transparent parts. See the Tkinter documentation for information on + how to specify colours. + + :param image: A PIL image. + """ + + def __init__(self, image=None, **kw): + + # Tk compatibility: file or data + if image is None: + image = _get_image_from_kw(kw) + + self.__mode = image.mode + self.__size = image.size + + if _pilbitmap_check(): + # fast way (requires the pilbitmap booster patch) + image.load() + kw["data"] = f"PIL:{image.im.id}" + self.__im = image # must keep a reference + else: + # slow but safe way + kw["data"] = image.tobitmap() + self.__photo = tkinter.BitmapImage(**kw) + + def __del__(self): + name = self.__photo.name + self.__photo.name = None + try: + self.__photo.tk.call("image", "delete", name) + except Exception: + pass # ignore internal errors + + def width(self): + """ + Get the width of the image. + + :return: The width, in pixels. + """ + return self.__size[0] + + def height(self): + """ + Get the height of the image. + + :return: The height, in pixels. + """ + return self.__size[1] + + def __str__(self): + """ + Get the Tkinter bitmap image identifier. This method is automatically + called by Tkinter whenever a BitmapImage object is passed to a Tkinter + method. + + :return: A Tkinter bitmap image identifier (a string). + """ + return str(self.__photo) + + +def getimage(photo): + """Copies the contents of a PhotoImage to a PIL image memory.""" + im = Image.new("RGBA", (photo.width(), photo.height())) + block = im.im + + photo.tk.call("PyImagingPhotoGet", photo, block.id) + + return im + + +def _show(image, title): + """Helper for the Image.show method.""" + + class UI(tkinter.Label): + def __init__(self, master, im): + if im.mode == "1": + self.image = BitmapImage(im, foreground="white", master=master) + else: + self.image = PhotoImage(im, master=master) + super().__init__(master, image=self.image, bg="black", bd=0) + + if not tkinter._default_root: + raise OSError("tkinter not initialized") + top = tkinter.Toplevel() + if title: + top.title(title) + UI(top, image).pack() diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageTransform.py b/.venv/lib/python3.9/site-packages/PIL/ImageTransform.py new file mode 100644 index 00000000..77791ab7 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageTransform.py @@ -0,0 +1,102 @@ +# +# The Python Imaging Library. +# $Id$ +# +# transform wrappers +# +# History: +# 2002-04-08 fl Created +# +# Copyright (c) 2002 by Secret Labs AB +# Copyright (c) 2002 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +class Transform(Image.ImageTransformHandler): + def __init__(self, data): + self.data = data + + def getdata(self): + return self.method, self.data + + def transform(self, size, image, **options): + # can be overridden + method, data = self.getdata() + return image.transform(size, method, data, **options) + + +class AffineTransform(Transform): + """ + Define an affine image transform. + + This function takes a 6-tuple (a, b, c, d, e, f) which contain the first + two rows from an affine transform matrix. For each pixel (x, y) in the + output image, the new value is taken from a position (a x + b y + c, + d x + e y + f) in the input image, rounded to nearest pixel. + + This function can be used to scale, translate, rotate, and shear the + original image. + + See :py:meth:`~PIL.Image.Image.transform` + + :param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows + from an affine transform matrix. + """ + + method = Image.AFFINE + + +class ExtentTransform(Transform): + """ + Define a transform to extract a subregion from an image. + + Maps a rectangle (defined by two corners) from the image to a rectangle of + the given size. The resulting image will contain data sampled from between + the corners, such that (x0, y0) in the input image will end up at (0,0) in + the output image, and (x1, y1) at size. + + This method can be used to crop, stretch, shrink, or mirror an arbitrary + rectangle in the current image. It is slightly slower than crop, but about + as fast as a corresponding resize operation. + + See :py:meth:`~PIL.Image.Image.transform` + + :param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the + input image's coordinate system. See :ref:`coordinate-system`. + """ + + method = Image.EXTENT + + +class QuadTransform(Transform): + """ + Define a quad image transform. + + Maps a quadrilateral (a region defined by four corners) from the image to a + rectangle of the given size. + + See :py:meth:`~PIL.Image.Image.transform` + + :param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the + upper left, lower left, lower right, and upper right corner of the + source quadrilateral. + """ + + method = Image.QUAD + + +class MeshTransform(Transform): + """ + Define a mesh image transform. A mesh transform consists of one or more + individual quad transforms. + + See :py:meth:`~PIL.Image.Image.transform` + + :param data: A list of (bbox, quad) tuples. + """ + + method = Image.MESH diff --git a/.venv/lib/python3.9/site-packages/PIL/ImageWin.py b/.venv/lib/python3.9/site-packages/PIL/ImageWin.py new file mode 100644 index 00000000..ca9b14c8 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImageWin.py @@ -0,0 +1,230 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a Windows DIB display interface +# +# History: +# 1996-05-20 fl Created +# 1996-09-20 fl Fixed subregion exposure +# 1997-09-21 fl Added draw primitive (for tzPrint) +# 2003-05-21 fl Added experimental Window/ImageWindow classes +# 2003-09-05 fl Added fromstring/tostring methods +# +# Copyright (c) Secret Labs AB 1997-2003. +# Copyright (c) Fredrik Lundh 1996-2003. +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +class HDC: + """ + Wraps an HDC integer. The resulting object can be passed to the + :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` + methods. + """ + + def __init__(self, dc): + self.dc = dc + + def __int__(self): + return self.dc + + +class HWND: + """ + Wraps an HWND integer. The resulting object can be passed to the + :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` + methods, instead of a DC. + """ + + def __init__(self, wnd): + self.wnd = wnd + + def __int__(self): + return self.wnd + + +class Dib: + """ + A Windows bitmap with the given mode and size. The mode can be one of "1", + "L", "P", or "RGB". + + If the display requires a palette, this constructor creates a suitable + palette and associates it with the image. For an "L" image, 128 greylevels + are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together + with 20 greylevels. + + To make sure that palettes work properly under Windows, you must call the + ``palette`` method upon certain events from Windows. + + :param image: Either a PIL image, or a mode string. If a mode string is + used, a size must also be given. The mode can be one of "1", + "L", "P", or "RGB". + :param size: If the first argument is a mode string, this + defines the size of the image. + """ + + def __init__(self, image, size=None): + if hasattr(image, "mode") and hasattr(image, "size"): + mode = image.mode + size = image.size + else: + mode = image + image = None + if mode not in ["1", "L", "P", "RGB"]: + mode = Image.getmodebase(mode) + self.image = Image.core.display(mode, size) + self.mode = mode + self.size = size + if image: + self.paste(image) + + def expose(self, handle): + """ + Copy the bitmap contents to a device context. + + :param handle: Device context (HDC), cast to a Python integer, or an + HDC or HWND instance. In PythonWin, you can use + ``CDC.GetHandleAttrib()`` to get a suitable handle. + """ + if isinstance(handle, HWND): + dc = self.image.getdc(handle) + try: + result = self.image.expose(dc) + finally: + self.image.releasedc(handle, dc) + else: + result = self.image.expose(handle) + return result + + def draw(self, handle, dst, src=None): + """ + Same as expose, but allows you to specify where to draw the image, and + what part of it to draw. + + The destination and source areas are given as 4-tuple rectangles. If + the source is omitted, the entire image is copied. If the source and + the destination have different sizes, the image is resized as + necessary. + """ + if not src: + src = (0, 0) + self.size + if isinstance(handle, HWND): + dc = self.image.getdc(handle) + try: + result = self.image.draw(dc, dst, src) + finally: + self.image.releasedc(handle, dc) + else: + result = self.image.draw(handle, dst, src) + return result + + def query_palette(self, handle): + """ + Installs the palette associated with the image in the given device + context. + + This method should be called upon **QUERYNEWPALETTE** and + **PALETTECHANGED** events from Windows. If this method returns a + non-zero value, one or more display palette entries were changed, and + the image should be redrawn. + + :param handle: Device context (HDC), cast to a Python integer, or an + HDC or HWND instance. + :return: A true value if one or more entries were changed (this + indicates that the image should be redrawn). + """ + if isinstance(handle, HWND): + handle = self.image.getdc(handle) + try: + result = self.image.query_palette(handle) + finally: + self.image.releasedc(handle, handle) + else: + result = self.image.query_palette(handle) + return result + + def paste(self, im, box=None): + """ + Paste a PIL image into the bitmap image. + + :param im: A PIL image. The size must match the target region. + If the mode does not match, the image is converted to the + mode of the bitmap image. + :param box: A 4-tuple defining the left, upper, right, and + lower pixel coordinate. See :ref:`coordinate-system`. If + None is given instead of a tuple, all of the image is + assumed. + """ + im.load() + if self.mode != im.mode: + im = im.convert(self.mode) + if box: + self.image.paste(im.im, box) + else: + self.image.paste(im.im) + + def frombytes(self, buffer): + """ + Load display memory contents from byte data. + + :param buffer: A buffer containing display data (usually + data returned from :py:func:`~PIL.ImageWin.Dib.tobytes`) + """ + return self.image.frombytes(buffer) + + def tobytes(self): + """ + Copy display memory contents to bytes object. + + :return: A bytes object containing display data. + """ + return self.image.tobytes() + + +class Window: + """Create a Window with the given title size.""" + + def __init__(self, title="PIL", width=None, height=None): + self.hwnd = Image.core.createwindow( + title, self.__dispatcher, width or 0, height or 0 + ) + + def __dispatcher(self, action, *args): + return getattr(self, "ui_handle_" + action)(*args) + + def ui_handle_clear(self, dc, x0, y0, x1, y1): + pass + + def ui_handle_damage(self, x0, y0, x1, y1): + pass + + def ui_handle_destroy(self): + pass + + def ui_handle_repair(self, dc, x0, y0, x1, y1): + pass + + def ui_handle_resize(self, width, height): + pass + + def mainloop(self): + Image.core.eventloop() + + +class ImageWindow(Window): + """Create an image window which displays the given image.""" + + def __init__(self, image, title="PIL"): + if not isinstance(image, Dib): + image = Dib(image) + self.image = image + width, height = image.size + super().__init__(title, width=width, height=height) + + def ui_handle_repair(self, dc, x0, y0, x1, y1): + self.image.draw(dc, (x0, y0, x1, y1)) diff --git a/.venv/lib/python3.9/site-packages/PIL/ImtImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/ImtImagePlugin.py new file mode 100644 index 00000000..21ffd747 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/ImtImagePlugin.py @@ -0,0 +1,93 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IM Tools support for PIL +# +# history: +# 1996-05-27 fl Created (read 8-bit images only) +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2) +# +# Copyright (c) Secret Labs AB 1997-2001. +# Copyright (c) Fredrik Lundh 1996-2001. +# +# See the README file for information on usage and redistribution. +# + + +import re + +from . import Image, ImageFile + +# +# -------------------------------------------------------------------- + +field = re.compile(br"([a-z]*) ([^ \r\n]*)") + + +## +# Image plugin for IM Tools images. + + +class ImtImageFile(ImageFile.ImageFile): + + format = "IMT" + format_description = "IM Tools" + + def _open(self): + + # Quick rejection: if there's not a LF among the first + # 100 bytes, this is (probably) not a text header. + + if b"\n" not in self.fp.read(100): + raise SyntaxError("not an IM file") + self.fp.seek(0) + + xsize = ysize = 0 + + while True: + + s = self.fp.read(1) + if not s: + break + + if s == b"\x0C": + + # image data begins + self.tile = [ + ("raw", (0, 0) + self.size, self.fp.tell(), (self.mode, 0, 1)) + ] + + break + + else: + + # read key/value pair + # FIXME: dangerous, may read whole file + s = s + self.fp.readline() + if len(s) == 1 or len(s) > 100: + break + if s[0] == ord(b"*"): + continue # comment + + m = field.match(s) + if not m: + break + k, v = m.group(1, 2) + if k == "width": + xsize = int(v) + self._size = xsize, ysize + elif k == "height": + ysize = int(v) + self._size = xsize, ysize + elif k == "pixel" and v == "n8": + self.mode = "L" + + +# +# -------------------------------------------------------------------- + +Image.register_open(ImtImageFile.format, ImtImageFile) + +# +# no extension registered (".im" is simply too common) diff --git a/.venv/lib/python3.9/site-packages/PIL/IptcImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/IptcImagePlugin.py new file mode 100644 index 00000000..0bbe5066 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/IptcImagePlugin.py @@ -0,0 +1,230 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IPTC/NAA file handling +# +# history: +# 1995-10-01 fl Created +# 1998-03-09 fl Cleaned up and added to PIL +# 2002-06-18 fl Added getiptcinfo helper +# +# Copyright (c) Secret Labs AB 1997-2002. +# Copyright (c) Fredrik Lundh 1995. +# +# See the README file for information on usage and redistribution. +# +import os +import tempfile + +from . import Image, ImageFile +from ._binary import i8 +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._binary import o8 + +COMPRESSION = {1: "raw", 5: "jpeg"} + +PAD = o8(0) * 4 + + +# +# Helpers + + +def i(c): + return i32((PAD + c)[-4:]) + + +def dump(c): + for i in c: + print("%02x" % i8(i), end=" ") + print() + + +## +# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields +# from TIFF and JPEG files, use the getiptcinfo function. + + +class IptcImageFile(ImageFile.ImageFile): + + format = "IPTC" + format_description = "IPTC/NAA" + + def getint(self, key): + return i(self.info[key]) + + def field(self): + # + # get a IPTC field header + s = self.fp.read(5) + if not len(s): + return None, 0 + + tag = s[1], s[2] + + # syntax + if s[0] != 0x1C or tag[0] < 1 or tag[0] > 9: + raise SyntaxError("invalid IPTC/NAA file") + + # field size + size = s[3] + if size > 132: + raise OSError("illegal field length in IPTC/NAA file") + elif size == 128: + size = 0 + elif size > 128: + size = i(self.fp.read(size - 128)) + else: + size = i16(s, 3) + + return tag, size + + def _open(self): + + # load descriptive fields + while True: + offset = self.fp.tell() + tag, size = self.field() + if not tag or tag == (8, 10): + break + if size: + tagdata = self.fp.read(size) + else: + tagdata = None + if tag in self.info: + if isinstance(self.info[tag], list): + self.info[tag].append(tagdata) + else: + self.info[tag] = [self.info[tag], tagdata] + else: + self.info[tag] = tagdata + + # mode + layers = i8(self.info[(3, 60)][0]) + component = i8(self.info[(3, 60)][1]) + if (3, 65) in self.info: + id = i8(self.info[(3, 65)][0]) - 1 + else: + id = 0 + if layers == 1 and not component: + self.mode = "L" + elif layers == 3 and component: + self.mode = "RGB"[id] + elif layers == 4 and component: + self.mode = "CMYK"[id] + + # size + self._size = self.getint((3, 20)), self.getint((3, 30)) + + # compression + try: + compression = COMPRESSION[self.getint((3, 120))] + except KeyError as e: + raise OSError("Unknown IPTC image compression") from e + + # tile + if tag == (8, 10): + self.tile = [ + ("iptc", (compression, offset), (0, 0, self.size[0], self.size[1])) + ] + + def load(self): + + if len(self.tile) != 1 or self.tile[0][0] != "iptc": + return ImageFile.ImageFile.load(self) + + type, tile, box = self.tile[0] + + encoding, offset = tile + + self.fp.seek(offset) + + # Copy image data to temporary file + o_fd, outfile = tempfile.mkstemp(text=False) + o = os.fdopen(o_fd) + if encoding == "raw": + # To simplify access to the extracted file, + # prepend a PPM header + o.write("P5\n%d %d\n255\n" % self.size) + while True: + type, size = self.field() + if type != (8, 10): + break + while size > 0: + s = self.fp.read(min(size, 8192)) + if not s: + break + o.write(s) + size -= len(s) + o.close() + + try: + with Image.open(outfile) as _im: + _im.load() + self.im = _im.im + finally: + try: + os.unlink(outfile) + except OSError: + pass + + +Image.register_open(IptcImageFile.format, IptcImageFile) + +Image.register_extension(IptcImageFile.format, ".iim") + + +def getiptcinfo(im): + """ + Get IPTC information from TIFF, JPEG, or IPTC file. + + :param im: An image containing IPTC data. + :returns: A dictionary containing IPTC information, or None if + no IPTC information block was found. + """ + import io + + from . import JpegImagePlugin, TiffImagePlugin + + data = None + + if isinstance(im, IptcImageFile): + # return info dictionary right away + return im.info + + elif isinstance(im, JpegImagePlugin.JpegImageFile): + # extract the IPTC/NAA resource + photoshop = im.info.get("photoshop") + if photoshop: + data = photoshop.get(0x0404) + + elif isinstance(im, TiffImagePlugin.TiffImageFile): + # get raw data from the IPTC/NAA tag (PhotoShop tags the data + # as 4-byte integers, so we cannot use the get method...) + try: + data = im.tag.tagdata[TiffImagePlugin.IPTC_NAA_CHUNK] + except (AttributeError, KeyError): + pass + + if data is None: + return None # no properties + + # create an IptcImagePlugin object without initializing it + class FakeImage: + pass + + im = FakeImage() + im.__class__ = IptcImageFile + + # parse the IPTC information chunk + im.info = {} + im.fp = io.BytesIO(data) + + try: + im._open() + except (IndexError, KeyError): + pass # expected failure + + return im.info diff --git a/.venv/lib/python3.9/site-packages/PIL/Jpeg2KImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/Jpeg2KImagePlugin.py new file mode 100644 index 00000000..cc798027 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/Jpeg2KImagePlugin.py @@ -0,0 +1,360 @@ +# +# The Python Imaging Library +# $Id$ +# +# JPEG2000 file handling +# +# History: +# 2014-03-12 ajh Created +# 2021-06-30 rogermb Extract dpi information from the 'resc' header box +# +# Copyright (c) 2014 Coriolis Systems Limited +# Copyright (c) 2014 Alastair Houghton +# +# See the README file for information on usage and redistribution. +# +import io +import os +import struct + +from . import Image, ImageFile + + +class BoxReader: + """ + A small helper class to read fields stored in JPEG2000 header boxes + and to easily step into and read sub-boxes. + """ + + def __init__(self, fp, length=-1): + self.fp = fp + self.has_length = length >= 0 + self.length = length + self.remaining_in_box = -1 + + def _can_read(self, num_bytes): + if self.has_length and self.fp.tell() + num_bytes > self.length: + # Outside box: ensure we don't read past the known file length + return False + if self.remaining_in_box >= 0: + # Inside box contents: ensure read does not go past box boundaries + return num_bytes <= self.remaining_in_box + else: + return True # No length known, just read + + def _read_bytes(self, num_bytes): + if not self._can_read(num_bytes): + raise SyntaxError("Not enough data in header") + + data = self.fp.read(num_bytes) + if len(data) < num_bytes: + raise OSError( + f"Expected to read {num_bytes} bytes but only got {len(data)}." + ) + + if self.remaining_in_box > 0: + self.remaining_in_box -= num_bytes + return data + + def read_fields(self, field_format): + size = struct.calcsize(field_format) + data = self._read_bytes(size) + return struct.unpack(field_format, data) + + def read_boxes(self): + size = self.remaining_in_box + data = self._read_bytes(size) + return BoxReader(io.BytesIO(data), size) + + def has_next_box(self): + if self.has_length: + return self.fp.tell() + self.remaining_in_box < self.length + else: + return True + + def next_box_type(self): + # Skip the rest of the box if it has not been read + if self.remaining_in_box > 0: + self.fp.seek(self.remaining_in_box, os.SEEK_CUR) + self.remaining_in_box = -1 + + # Read the length and type of the next box + lbox, tbox = self.read_fields(">I4s") + if lbox == 1: + lbox = self.read_fields(">Q")[0] + hlen = 16 + else: + hlen = 8 + + if lbox < hlen or not self._can_read(lbox - hlen): + raise SyntaxError("Invalid header length") + + self.remaining_in_box = lbox - hlen + return tbox + + +def _parse_codestream(fp): + """Parse the JPEG 2000 codestream to extract the size and component + count from the SIZ marker segment, returning a PIL (size, mode) tuple.""" + + hdr = fp.read(2) + lsiz = struct.unpack(">H", hdr)[0] + siz = hdr + fp.read(lsiz - 2) + lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from( + ">HHIIIIIIIIH", siz + ) + ssiz = [None] * csiz + xrsiz = [None] * csiz + yrsiz = [None] * csiz + for i in range(csiz): + ssiz[i], xrsiz[i], yrsiz[i] = struct.unpack_from(">BBB", siz, 36 + 3 * i) + + size = (xsiz - xosiz, ysiz - yosiz) + if csiz == 1: + if (yrsiz[0] & 0x7F) > 8: + mode = "I;16" + else: + mode = "L" + elif csiz == 2: + mode = "LA" + elif csiz == 3: + mode = "RGB" + elif csiz == 4: + mode = "RGBA" + else: + mode = None + + return (size, mode) + + +def _res_to_dpi(num, denom, exp): + """Convert JPEG2000's (numerator, denominator, exponent-base-10) resolution, + calculated as (num / denom) * 10^exp and stored in dots per meter, + to floating-point dots per inch.""" + if denom != 0: + return (254 * num * (10 ** exp)) / (10000 * denom) + + +def _parse_jp2_header(fp): + """Parse the JP2 header box to extract size, component count, + color space information, and optionally DPI information, + returning a (size, mode, mimetype, dpi) tuple.""" + + # Find the JP2 header box + reader = BoxReader(fp) + header = None + mimetype = None + while reader.has_next_box(): + tbox = reader.next_box_type() + + if tbox == b"jp2h": + header = reader.read_boxes() + break + elif tbox == b"ftyp": + if reader.read_fields(">4s")[0] == b"jpx ": + mimetype = "image/jpx" + + size = None + mode = None + bpc = None + nc = None + dpi = None # 2-tuple of DPI info, or None + + while header.has_next_box(): + tbox = header.next_box_type() + + if tbox == b"ihdr": + height, width, nc, bpc = header.read_fields(">IIHB") + size = (width, height) + if nc == 1 and (bpc & 0x7F) > 8: + mode = "I;16" + elif nc == 1: + mode = "L" + elif nc == 2: + mode = "LA" + elif nc == 3: + mode = "RGB" + elif nc == 4: + mode = "RGBA" + elif tbox == b"res ": + res = header.read_boxes() + while res.has_next_box(): + tres = res.next_box_type() + if tres == b"resc": + vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(">HHHHBB") + hres = _res_to_dpi(hrcn, hrcd, hrce) + vres = _res_to_dpi(vrcn, vrcd, vrce) + if hres is not None and vres is not None: + dpi = (hres, vres) + break + + if size is None or mode is None: + raise SyntaxError("Malformed JP2 header") + + return (size, mode, mimetype, dpi) + + +## +# Image plugin for JPEG2000 images. + + +class Jpeg2KImageFile(ImageFile.ImageFile): + format = "JPEG2000" + format_description = "JPEG 2000 (ISO 15444)" + + def _open(self): + sig = self.fp.read(4) + if sig == b"\xff\x4f\xff\x51": + self.codec = "j2k" + self._size, self.mode = _parse_codestream(self.fp) + else: + sig = sig + self.fp.read(8) + + if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a": + self.codec = "jp2" + header = _parse_jp2_header(self.fp) + self._size, self.mode, self.custom_mimetype, dpi = header + if dpi is not None: + self.info["dpi"] = dpi + else: + raise SyntaxError("not a JPEG 2000 file") + + if self.size is None or self.mode is None: + raise SyntaxError("unable to determine size/mode") + + self._reduce = 0 + self.layers = 0 + + fd = -1 + length = -1 + + try: + fd = self.fp.fileno() + length = os.fstat(fd).st_size + except Exception: + fd = -1 + try: + pos = self.fp.tell() + self.fp.seek(0, io.SEEK_END) + length = self.fp.tell() + self.fp.seek(pos) + except Exception: + length = -1 + + self.tile = [ + ( + "jpeg2k", + (0, 0) + self.size, + 0, + (self.codec, self._reduce, self.layers, fd, length), + ) + ] + + @property + def reduce(self): + # https://github.com/python-pillow/Pillow/issues/4343 found that the + # new Image 'reduce' method was shadowed by this plugin's 'reduce' + # property. This attempts to allow for both scenarios + return self._reduce or super().reduce + + @reduce.setter + def reduce(self, value): + self._reduce = value + + def load(self): + if self.tile and self._reduce: + power = 1 << self._reduce + adjust = power >> 1 + self._size = ( + int((self.size[0] + adjust) / power), + int((self.size[1] + adjust) / power), + ) + + # Update the reduce and layers settings + t = self.tile[0] + t3 = (t[3][0], self._reduce, self.layers, t[3][3], t[3][4]) + self.tile = [(t[0], (0, 0) + self.size, t[2], t3)] + + return ImageFile.ImageFile.load(self) + + +def _accept(prefix): + return ( + prefix[:4] == b"\xff\x4f\xff\x51" + or prefix[:12] == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a" + ) + + +# ------------------------------------------------------------ +# Save support + + +def _save(im, fp, filename): + if filename.endswith(".j2k"): + kind = "j2k" + else: + kind = "jp2" + + # Get the keyword arguments + info = im.encoderinfo + + offset = info.get("offset", None) + tile_offset = info.get("tile_offset", None) + tile_size = info.get("tile_size", None) + quality_mode = info.get("quality_mode", "rates") + quality_layers = info.get("quality_layers", None) + if quality_layers is not None and not ( + isinstance(quality_layers, (list, tuple)) + and all( + [ + isinstance(quality_layer, (int, float)) + for quality_layer in quality_layers + ] + ) + ): + raise ValueError("quality_layers must be a sequence of numbers") + + num_resolutions = info.get("num_resolutions", 0) + cblk_size = info.get("codeblock_size", None) + precinct_size = info.get("precinct_size", None) + irreversible = info.get("irreversible", False) + progression = info.get("progression", "LRCP") + cinema_mode = info.get("cinema_mode", "no") + fd = -1 + + if hasattr(fp, "fileno"): + try: + fd = fp.fileno() + except Exception: + fd = -1 + + im.encoderconfig = ( + offset, + tile_offset, + tile_size, + quality_mode, + quality_layers, + num_resolutions, + cblk_size, + precinct_size, + irreversible, + progression, + cinema_mode, + fd, + ) + + ImageFile._save(im, fp, [("jpeg2k", (0, 0) + im.size, 0, kind)]) + + +# ------------------------------------------------------------ +# Registry stuff + + +Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept) +Image.register_save(Jpeg2KImageFile.format, _save) + +Image.register_extensions( + Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"] +) + +Image.register_mime(Jpeg2KImageFile.format, "image/jp2") diff --git a/.venv/lib/python3.9/site-packages/PIL/JpegImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/JpegImagePlugin.py new file mode 100644 index 00000000..b8674eee --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/JpegImagePlugin.py @@ -0,0 +1,826 @@ +# +# The Python Imaging Library. +# $Id$ +# +# JPEG (JFIF) file handling +# +# See "Digital Compression and Coding of Continuous-Tone Still Images, +# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1) +# +# History: +# 1995-09-09 fl Created +# 1995-09-13 fl Added full parser +# 1996-03-25 fl Added hack to use the IJG command line utilities +# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug +# 1996-05-28 fl Added draft support, JFIF version (0.1) +# 1996-12-30 fl Added encoder options, added progression property (0.2) +# 1997-08-27 fl Save mode 1 images as BW (0.3) +# 1998-07-12 fl Added YCbCr to draft and save methods (0.4) +# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1) +# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2) +# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3) +# 2003-04-25 fl Added experimental EXIF decoder (0.5) +# 2003-06-06 fl Added experimental EXIF GPSinfo decoder +# 2003-09-13 fl Extract COM markers +# 2009-09-06 fl Added icc_profile support (from Florian Hoech) +# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6) +# 2009-03-08 fl Added subsampling support (from Justin Huff). +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +import array +import io +import math +import os +import struct +import subprocess +import sys +import tempfile +import warnings + +from . import Image, ImageFile, TiffImagePlugin +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._binary import o8 +from .JpegPresets import presets + +# +# Parser + + +def Skip(self, marker): + n = i16(self.fp.read(2)) - 2 + ImageFile._safe_read(self.fp, n) + + +def APP(self, marker): + # + # Application marker. Store these in the APP dictionary. + # Also look for well-known application markers. + + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + + app = "APP%d" % (marker & 15) + + self.app[app] = s # compatibility + self.applist.append((app, s)) + + if marker == 0xFFE0 and s[:4] == b"JFIF": + # extract JFIF information + self.info["jfif"] = version = i16(s, 5) # version + self.info["jfif_version"] = divmod(version, 256) + # extract JFIF properties + try: + jfif_unit = s[7] + jfif_density = i16(s, 8), i16(s, 10) + except Exception: + pass + else: + if jfif_unit == 1: + self.info["dpi"] = jfif_density + self.info["jfif_unit"] = jfif_unit + self.info["jfif_density"] = jfif_density + elif marker == 0xFFE1 and s[:5] == b"Exif\0": + if "exif" not in self.info: + # extract EXIF information (incomplete) + self.info["exif"] = s # FIXME: value will change + elif marker == 0xFFE2 and s[:5] == b"FPXR\0": + # extract FlashPix information (incomplete) + self.info["flashpix"] = s # FIXME: value will change + elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0": + # Since an ICC profile can be larger than the maximum size of + # a JPEG marker (64K), we need provisions to split it into + # multiple markers. The format defined by the ICC specifies + # one or more APP2 markers containing the following data: + # Identifying string ASCII "ICC_PROFILE\0" (12 bytes) + # Marker sequence number 1, 2, etc (1 byte) + # Number of markers Total of APP2's used (1 byte) + # Profile data (remainder of APP2 data) + # Decoders should use the marker sequence numbers to + # reassemble the profile, rather than assuming that the APP2 + # markers appear in the correct sequence. + self.icclist.append(s) + elif marker == 0xFFED and s[:14] == b"Photoshop 3.0\x00": + # parse the image resource block + offset = 14 + photoshop = self.info.setdefault("photoshop", {}) + while s[offset : offset + 4] == b"8BIM": + try: + offset += 4 + # resource code + code = i16(s, offset) + offset += 2 + # resource name (usually empty) + name_len = s[offset] + # name = s[offset+1:offset+1+name_len] + offset += 1 + name_len + offset += offset & 1 # align + # resource data block + size = i32(s, offset) + offset += 4 + data = s[offset : offset + size] + if code == 0x03ED: # ResolutionInfo + data = { + "XResolution": i32(data, 0) / 65536, + "DisplayedUnitsX": i16(data, 4), + "YResolution": i32(data, 8) / 65536, + "DisplayedUnitsY": i16(data, 12), + } + photoshop[code] = data + offset += size + offset += offset & 1 # align + except struct.error: + break # insufficient data + + elif marker == 0xFFEE and s[:5] == b"Adobe": + self.info["adobe"] = i16(s, 5) + # extract Adobe custom properties + try: + adobe_transform = s[11] + except IndexError: + pass + else: + self.info["adobe_transform"] = adobe_transform + elif marker == 0xFFE2 and s[:4] == b"MPF\0": + # extract MPO information + self.info["mp"] = s[4:] + # offset is current location minus buffer size + # plus constant header size + self.info["mpoffset"] = self.fp.tell() - n + 4 + + # If DPI isn't in JPEG header, fetch from EXIF + if "dpi" not in self.info and "exif" in self.info: + try: + exif = self.getexif() + resolution_unit = exif[0x0128] + x_resolution = exif[0x011A] + try: + dpi = float(x_resolution[0]) / x_resolution[1] + except TypeError: + dpi = x_resolution + if math.isnan(dpi): + raise ValueError + if resolution_unit == 3: # cm + # 1 dpcm = 2.54 dpi + dpi *= 2.54 + self.info["dpi"] = dpi, dpi + except (TypeError, KeyError, SyntaxError, ValueError, ZeroDivisionError): + # SyntaxError for invalid/unreadable EXIF + # KeyError for dpi not included + # ZeroDivisionError for invalid dpi rational value + # ValueError or TypeError for dpi being an invalid float + self.info["dpi"] = 72, 72 + + +def COM(self, marker): + # + # Comment marker. Store these in the APP dictionary. + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + + self.info["comment"] = s + self.app["COM"] = s # compatibility + self.applist.append(("COM", s)) + + +def SOF(self, marker): + # + # Start of frame marker. Defines the size and mode of the + # image. JPEG is colour blind, so we use some simple + # heuristics to map the number of layers to an appropriate + # mode. Note that this could be made a bit brighter, by + # looking for JFIF and Adobe APP markers. + + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + self._size = i16(s, 3), i16(s, 1) + + self.bits = s[0] + if self.bits != 8: + raise SyntaxError(f"cannot handle {self.bits}-bit layers") + + self.layers = s[5] + if self.layers == 1: + self.mode = "L" + elif self.layers == 3: + self.mode = "RGB" + elif self.layers == 4: + self.mode = "CMYK" + else: + raise SyntaxError(f"cannot handle {self.layers}-layer images") + + if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]: + self.info["progressive"] = self.info["progression"] = 1 + + if self.icclist: + # fixup icc profile + self.icclist.sort() # sort by sequence number + if self.icclist[0][13] == len(self.icclist): + profile = [] + for p in self.icclist: + profile.append(p[14:]) + icc_profile = b"".join(profile) + else: + icc_profile = None # wrong number of fragments + self.info["icc_profile"] = icc_profile + self.icclist = [] + + for i in range(6, len(s), 3): + t = s[i : i + 3] + # 4-tuples: id, vsamp, hsamp, qtable + self.layer.append((t[0], t[1] // 16, t[1] & 15, t[2])) + + +def DQT(self, marker): + # + # Define quantization table. Note that there might be more + # than one table in each marker. + + # FIXME: The quantization tables can be used to estimate the + # compression quality. + + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + while len(s): + v = s[0] + precision = 1 if (v // 16 == 0) else 2 # in bytes + qt_length = 1 + precision * 64 + if len(s) < qt_length: + raise SyntaxError("bad quantization table marker") + data = array.array("B" if precision == 1 else "H", s[1:qt_length]) + if sys.byteorder == "little" and precision > 1: + data.byteswap() # the values are always big-endian + self.quantization[v & 15] = [data[i] for i in zigzag_index] + s = s[qt_length:] + + +# +# JPEG marker table + +MARKER = { + 0xFFC0: ("SOF0", "Baseline DCT", SOF), + 0xFFC1: ("SOF1", "Extended Sequential DCT", SOF), + 0xFFC2: ("SOF2", "Progressive DCT", SOF), + 0xFFC3: ("SOF3", "Spatial lossless", SOF), + 0xFFC4: ("DHT", "Define Huffman table", Skip), + 0xFFC5: ("SOF5", "Differential sequential DCT", SOF), + 0xFFC6: ("SOF6", "Differential progressive DCT", SOF), + 0xFFC7: ("SOF7", "Differential spatial", SOF), + 0xFFC8: ("JPG", "Extension", None), + 0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF), + 0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF), + 0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF), + 0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip), + 0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF), + 0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF), + 0xFFCF: ("SOF15", "Differential spatial (AC)", SOF), + 0xFFD0: ("RST0", "Restart 0", None), + 0xFFD1: ("RST1", "Restart 1", None), + 0xFFD2: ("RST2", "Restart 2", None), + 0xFFD3: ("RST3", "Restart 3", None), + 0xFFD4: ("RST4", "Restart 4", None), + 0xFFD5: ("RST5", "Restart 5", None), + 0xFFD6: ("RST6", "Restart 6", None), + 0xFFD7: ("RST7", "Restart 7", None), + 0xFFD8: ("SOI", "Start of image", None), + 0xFFD9: ("EOI", "End of image", None), + 0xFFDA: ("SOS", "Start of scan", Skip), + 0xFFDB: ("DQT", "Define quantization table", DQT), + 0xFFDC: ("DNL", "Define number of lines", Skip), + 0xFFDD: ("DRI", "Define restart interval", Skip), + 0xFFDE: ("DHP", "Define hierarchical progression", SOF), + 0xFFDF: ("EXP", "Expand reference component", Skip), + 0xFFE0: ("APP0", "Application segment 0", APP), + 0xFFE1: ("APP1", "Application segment 1", APP), + 0xFFE2: ("APP2", "Application segment 2", APP), + 0xFFE3: ("APP3", "Application segment 3", APP), + 0xFFE4: ("APP4", "Application segment 4", APP), + 0xFFE5: ("APP5", "Application segment 5", APP), + 0xFFE6: ("APP6", "Application segment 6", APP), + 0xFFE7: ("APP7", "Application segment 7", APP), + 0xFFE8: ("APP8", "Application segment 8", APP), + 0xFFE9: ("APP9", "Application segment 9", APP), + 0xFFEA: ("APP10", "Application segment 10", APP), + 0xFFEB: ("APP11", "Application segment 11", APP), + 0xFFEC: ("APP12", "Application segment 12", APP), + 0xFFED: ("APP13", "Application segment 13", APP), + 0xFFEE: ("APP14", "Application segment 14", APP), + 0xFFEF: ("APP15", "Application segment 15", APP), + 0xFFF0: ("JPG0", "Extension 0", None), + 0xFFF1: ("JPG1", "Extension 1", None), + 0xFFF2: ("JPG2", "Extension 2", None), + 0xFFF3: ("JPG3", "Extension 3", None), + 0xFFF4: ("JPG4", "Extension 4", None), + 0xFFF5: ("JPG5", "Extension 5", None), + 0xFFF6: ("JPG6", "Extension 6", None), + 0xFFF7: ("JPG7", "Extension 7", None), + 0xFFF8: ("JPG8", "Extension 8", None), + 0xFFF9: ("JPG9", "Extension 9", None), + 0xFFFA: ("JPG10", "Extension 10", None), + 0xFFFB: ("JPG11", "Extension 11", None), + 0xFFFC: ("JPG12", "Extension 12", None), + 0xFFFD: ("JPG13", "Extension 13", None), + 0xFFFE: ("COM", "Comment", COM), +} + + +def _accept(prefix): + # Magic number was taken from https://en.wikipedia.org/wiki/JPEG + return prefix[0:3] == b"\xFF\xD8\xFF" + + +## +# Image plugin for JPEG and JFIF images. + + +class JpegImageFile(ImageFile.ImageFile): + + format = "JPEG" + format_description = "JPEG (ISO 10918)" + + def _open(self): + + s = self.fp.read(3) + + if not _accept(s): + raise SyntaxError("not a JPEG file") + s = b"\xFF" + + # Create attributes + self.bits = self.layers = 0 + + # JPEG specifics (internal) + self.layer = [] + self.huffman_dc = {} + self.huffman_ac = {} + self.quantization = {} + self.app = {} # compatibility + self.applist = [] + self.icclist = [] + + while True: + + i = s[0] + if i == 0xFF: + s = s + self.fp.read(1) + i = i16(s) + else: + # Skip non-0xFF junk + s = self.fp.read(1) + continue + + if i in MARKER: + name, description, handler = MARKER[i] + if handler is not None: + handler(self, i) + if i == 0xFFDA: # start of scan + rawmode = self.mode + if self.mode == "CMYK": + rawmode = "CMYK;I" # assume adobe conventions + self.tile = [("jpeg", (0, 0) + self.size, 0, (rawmode, ""))] + # self.__offset = self.fp.tell() + break + s = self.fp.read(1) + elif i == 0 or i == 0xFFFF: + # padded marker or junk; move on + s = b"\xff" + elif i == 0xFF00: # Skip extraneous data (escaped 0xFF) + s = self.fp.read(1) + else: + raise SyntaxError("no marker found") + + def load_read(self, read_bytes): + """ + internal: read more image data + For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker + so libjpeg can finish decoding + """ + s = self.fp.read(read_bytes) + + if not s and ImageFile.LOAD_TRUNCATED_IMAGES: + # Premature EOF. + # Pretend file is finished adding EOI marker + return b"\xFF\xD9" + + return s + + def draft(self, mode, size): + + if len(self.tile) != 1: + return + + # Protect from second call + if self.decoderconfig: + return + + d, e, o, a = self.tile[0] + scale = 1 + original_size = self.size + + if a[0] == "RGB" and mode in ["L", "YCbCr"]: + self.mode = mode + a = mode, "" + + if size: + scale = min(self.size[0] // size[0], self.size[1] // size[1]) + for s in [8, 4, 2, 1]: + if scale >= s: + break + e = ( + e[0], + e[1], + (e[2] - e[0] + s - 1) // s + e[0], + (e[3] - e[1] + s - 1) // s + e[1], + ) + self._size = ((self.size[0] + s - 1) // s, (self.size[1] + s - 1) // s) + scale = s + + self.tile = [(d, e, o, a)] + self.decoderconfig = (scale, 0) + + box = (0, 0, original_size[0] / scale, original_size[1] / scale) + return (self.mode, box) + + def load_djpeg(self): + + # ALTERNATIVE: handle JPEGs via the IJG command line utilities + + f, path = tempfile.mkstemp() + os.close(f) + if os.path.exists(self.filename): + subprocess.check_call(["djpeg", "-outfile", path, self.filename]) + else: + raise ValueError("Invalid Filename") + + try: + with Image.open(path) as _im: + _im.load() + self.im = _im.im + finally: + try: + os.unlink(path) + except OSError: + pass + + self.mode = self.im.mode + self._size = self.im.size + + self.tile = [] + + def _getexif(self): + return _getexif(self) + + def _getmp(self): + return _getmp(self) + + def getxmp(self): + """ + Returns a dictionary containing the XMP tags. + Requires defusedxml to be installed. + :returns: XMP tags in a dictionary. + """ + + for segment, content in self.applist: + if segment == "APP1": + marker, xmp_tags = content.rsplit(b"\x00", 1) + if marker == b"http://ns.adobe.com/xap/1.0/": + return self._getxmp(xmp_tags) + return {} + + +def _getexif(self): + if "exif" not in self.info: + return None + return self.getexif()._get_merged_dict() + + +def _getmp(self): + # Extract MP information. This method was inspired by the "highly + # experimental" _getexif version that's been in use for years now, + # itself based on the ImageFileDirectory class in the TIFF plugin. + + # The MP record essentially consists of a TIFF file embedded in a JPEG + # application marker. + try: + data = self.info["mp"] + except KeyError: + return None + file_contents = io.BytesIO(data) + head = file_contents.read(8) + endianness = ">" if head[:4] == b"\x4d\x4d\x00\x2a" else "<" + # process dictionary + try: + info = TiffImagePlugin.ImageFileDirectory_v2(head) + file_contents.seek(info.next) + info.load(file_contents) + mp = dict(info) + except Exception as e: + raise SyntaxError("malformed MP Index (unreadable directory)") from e + # it's an error not to have a number of images + try: + quant = mp[0xB001] + except KeyError as e: + raise SyntaxError("malformed MP Index (no number of images)") from e + # get MP entries + mpentries = [] + try: + rawmpentries = mp[0xB002] + for entrynum in range(0, quant): + unpackedentry = struct.unpack_from( + f"{endianness}LLLHH", rawmpentries, entrynum * 16 + ) + labels = ("Attribute", "Size", "DataOffset", "EntryNo1", "EntryNo2") + mpentry = dict(zip(labels, unpackedentry)) + mpentryattr = { + "DependentParentImageFlag": bool(mpentry["Attribute"] & (1 << 31)), + "DependentChildImageFlag": bool(mpentry["Attribute"] & (1 << 30)), + "RepresentativeImageFlag": bool(mpentry["Attribute"] & (1 << 29)), + "Reserved": (mpentry["Attribute"] & (3 << 27)) >> 27, + "ImageDataFormat": (mpentry["Attribute"] & (7 << 24)) >> 24, + "MPType": mpentry["Attribute"] & 0x00FFFFFF, + } + if mpentryattr["ImageDataFormat"] == 0: + mpentryattr["ImageDataFormat"] = "JPEG" + else: + raise SyntaxError("unsupported picture format in MPO") + mptypemap = { + 0x000000: "Undefined", + 0x010001: "Large Thumbnail (VGA Equivalent)", + 0x010002: "Large Thumbnail (Full HD Equivalent)", + 0x020001: "Multi-Frame Image (Panorama)", + 0x020002: "Multi-Frame Image: (Disparity)", + 0x020003: "Multi-Frame Image: (Multi-Angle)", + 0x030000: "Baseline MP Primary Image", + } + mpentryattr["MPType"] = mptypemap.get(mpentryattr["MPType"], "Unknown") + mpentry["Attribute"] = mpentryattr + mpentries.append(mpentry) + mp[0xB002] = mpentries + except KeyError as e: + raise SyntaxError("malformed MP Index (bad MP Entry)") from e + # Next we should try and parse the individual image unique ID list; + # we don't because I've never seen this actually used in a real MPO + # file and so can't test it. + return mp + + +# -------------------------------------------------------------------- +# stuff to save JPEG files + +RAWMODE = { + "1": "L", + "L": "L", + "RGB": "RGB", + "RGBX": "RGB", + "CMYK": "CMYK;I", # assume adobe conventions + "YCbCr": "YCbCr", +} + +# fmt: off +zigzag_index = ( + 0, 1, 5, 6, 14, 15, 27, 28, + 2, 4, 7, 13, 16, 26, 29, 42, + 3, 8, 12, 17, 25, 30, 41, 43, + 9, 11, 18, 24, 31, 40, 44, 53, + 10, 19, 23, 32, 39, 45, 52, 54, + 20, 22, 33, 38, 46, 51, 55, 60, + 21, 34, 37, 47, 50, 56, 59, 61, + 35, 36, 48, 49, 57, 58, 62, 63, +) + +samplings = { + (1, 1, 1, 1, 1, 1): 0, + (2, 1, 1, 1, 1, 1): 1, + (2, 2, 1, 1, 1, 1): 2, +} +# fmt: on + + +def convert_dict_qtables(qtables): + warnings.warn( + "convert_dict_qtables is deprecated and will be removed in Pillow 10" + "(2023-01-02). Conversion is no longer needed.", + DeprecationWarning, + ) + return qtables + + +def get_sampling(im): + # There's no subsampling when images have only 1 layer + # (grayscale images) or when they are CMYK (4 layers), + # so set subsampling to the default value. + # + # NOTE: currently Pillow can't encode JPEG to YCCK format. + # If YCCK support is added in the future, subsampling code will have + # to be updated (here and in JpegEncode.c) to deal with 4 layers. + if not hasattr(im, "layers") or im.layers in (1, 4): + return -1 + sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3] + return samplings.get(sampling, -1) + + +def _save(im, fp, filename): + + try: + rawmode = RAWMODE[im.mode] + except KeyError as e: + raise OSError(f"cannot write mode {im.mode} as JPEG") from e + + info = im.encoderinfo + + dpi = [round(x) for x in info.get("dpi", (0, 0))] + + quality = info.get("quality", -1) + subsampling = info.get("subsampling", -1) + qtables = info.get("qtables") + + if quality == "keep": + quality = -1 + subsampling = "keep" + qtables = "keep" + elif quality in presets: + preset = presets[quality] + quality = -1 + subsampling = preset.get("subsampling", -1) + qtables = preset.get("quantization") + elif not isinstance(quality, int): + raise ValueError("Invalid quality setting") + else: + if subsampling in presets: + subsampling = presets[subsampling].get("subsampling", -1) + if isinstance(qtables, str) and qtables in presets: + qtables = presets[qtables].get("quantization") + + if subsampling == "4:4:4": + subsampling = 0 + elif subsampling == "4:2:2": + subsampling = 1 + elif subsampling == "4:2:0": + subsampling = 2 + elif subsampling == "4:1:1": + # For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0. + # Set 4:2:0 if someone is still using that value. + subsampling = 2 + elif subsampling == "keep": + if im.format != "JPEG": + raise ValueError("Cannot use 'keep' when original image is not a JPEG") + subsampling = get_sampling(im) + + def validate_qtables(qtables): + if qtables is None: + return qtables + if isinstance(qtables, str): + try: + lines = [ + int(num) + for line in qtables.splitlines() + for num in line.split("#", 1)[0].split() + ] + except ValueError as e: + raise ValueError("Invalid quantization table") from e + else: + qtables = [lines[s : s + 64] for s in range(0, len(lines), 64)] + if isinstance(qtables, (tuple, list, dict)): + if isinstance(qtables, dict): + qtables = [ + qtables[key] for key in range(len(qtables)) if key in qtables + ] + elif isinstance(qtables, tuple): + qtables = list(qtables) + if not (0 < len(qtables) < 5): + raise ValueError("None or too many quantization tables") + for idx, table in enumerate(qtables): + try: + if len(table) != 64: + raise TypeError + table = array.array("H", table) + except TypeError as e: + raise ValueError("Invalid quantization table") from e + else: + qtables[idx] = list(table) + return qtables + + if qtables == "keep": + if im.format != "JPEG": + raise ValueError("Cannot use 'keep' when original image is not a JPEG") + qtables = getattr(im, "quantization", None) + qtables = validate_qtables(qtables) + + extra = b"" + + icc_profile = info.get("icc_profile") + if icc_profile: + ICC_OVERHEAD_LEN = 14 + MAX_BYTES_IN_MARKER = 65533 + MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN + markers = [] + while icc_profile: + markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER]) + icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:] + i = 1 + for marker in markers: + size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker)) + extra += ( + b"\xFF\xE2" + + size + + b"ICC_PROFILE\0" + + o8(i) + + o8(len(markers)) + + marker + ) + i += 1 + + # "progressive" is the official name, but older documentation + # says "progression" + # FIXME: issue a warning if the wrong form is used (post-1.1.7) + progressive = info.get("progressive", False) or info.get("progression", False) + + optimize = info.get("optimize", False) + + exif = info.get("exif", b"") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + + # get keyword arguments + im.encoderconfig = ( + quality, + progressive, + info.get("smooth", 0), + optimize, + info.get("streamtype", 0), + dpi[0], + dpi[1], + subsampling, + qtables, + extra, + exif, + ) + + # if we optimize, libjpeg needs a buffer big enough to hold the whole image + # in a shot. Guessing on the size, at im.size bytes. (raw pixel size is + # channels*size, this is a value that's been used in a django patch. + # https://github.com/matthewwithanm/django-imagekit/issues/50 + bufsize = 0 + if optimize or progressive: + # CMYK can be bigger + if im.mode == "CMYK": + bufsize = 4 * im.size[0] * im.size[1] + # keep sets quality to -1, but the actual value may be high. + elif quality >= 95 or quality == -1: + bufsize = 2 * im.size[0] * im.size[1] + else: + bufsize = im.size[0] * im.size[1] + + # The EXIF info needs to be written as one block, + APP1, + one spare byte. + # Ensure that our buffer is big enough. Same with the icc_profile block. + bufsize = max(ImageFile.MAXBLOCK, bufsize, len(exif) + 5, len(extra) + 1) + + ImageFile._save(im, fp, [("jpeg", (0, 0) + im.size, 0, rawmode)], bufsize) + + +def _save_cjpeg(im, fp, filename): + # ALTERNATIVE: handle JPEGs via the IJG command line utilities. + tempfile = im._dump() + subprocess.check_call(["cjpeg", "-outfile", filename, tempfile]) + try: + os.unlink(tempfile) + except OSError: + pass + + +## +# Factory for making JPEG and MPO instances +def jpeg_factory(fp=None, filename=None): + im = JpegImageFile(fp, filename) + try: + mpheader = im._getmp() + if mpheader[45057] > 1: + # It's actually an MPO + from .MpoImagePlugin import MpoImageFile + + # Don't reload everything, just convert it. + im = MpoImageFile.adopt(im, mpheader) + except (TypeError, IndexError): + # It is really a JPEG + pass + except SyntaxError: + warnings.warn( + "Image appears to be a malformed MPO file, it will be " + "interpreted as a base JPEG file" + ) + return im + + +# --------------------------------------------------------------------- +# Registry stuff + +Image.register_open(JpegImageFile.format, jpeg_factory, _accept) +Image.register_save(JpegImageFile.format, _save) + +Image.register_extensions(JpegImageFile.format, [".jfif", ".jpe", ".jpg", ".jpeg"]) + +Image.register_mime(JpegImageFile.format, "image/jpeg") diff --git a/.venv/lib/python3.9/site-packages/PIL/JpegPresets.py b/.venv/lib/python3.9/site-packages/PIL/JpegPresets.py new file mode 100644 index 00000000..e5a5d178 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/JpegPresets.py @@ -0,0 +1,240 @@ +""" +JPEG quality settings equivalent to the Photoshop settings. +Can be used when saving JPEG files. + +The following presets are available by default: +``web_low``, ``web_medium``, ``web_high``, ``web_very_high``, ``web_maximum``, +``low``, ``medium``, ``high``, ``maximum``. +More presets can be added to the :py:data:`presets` dict if needed. + +To apply the preset, specify:: + + quality="preset_name" + +To apply only the quantization table:: + + qtables="preset_name" + +To apply only the subsampling setting:: + + subsampling="preset_name" + +Example:: + + im.save("image_name.jpg", quality="web_high") + +Subsampling +----------- + +Subsampling is the practice of encoding images by implementing less resolution +for chroma information than for luma information. +(ref.: https://en.wikipedia.org/wiki/Chroma_subsampling) + +Possible subsampling values are 0, 1 and 2 that correspond to 4:4:4, 4:2:2 and +4:2:0. + +You can get the subsampling of a JPEG with the +:func:`.JpegImagePlugin.get_sampling` function. + +In JPEG compressed data a JPEG marker is used instead of an EXIF tag. +(ref.: https://www.exiv2.org/tags.html) + + +Quantization tables +------------------- + +They are values use by the DCT (Discrete cosine transform) to remove +*unnecessary* information from the image (the lossy part of the compression). +(ref.: https://en.wikipedia.org/wiki/Quantization_matrix#Quantization_matrices, +https://en.wikipedia.org/wiki/JPEG#Quantization) + +You can get the quantization tables of a JPEG with:: + + im.quantization + +This will return a dict with a number of lists. You can pass this dict +directly as the qtables argument when saving a JPEG. + +The quantization table format in presets is a list with sublists. These formats +are interchangeable. + +Libjpeg ref.: +https://web.archive.org/web/20120328125543/http://www.jpegcameras.com/libjpeg/libjpeg-3.html + +""" + +# fmt: off +presets = { + 'web_low': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [20, 16, 25, 39, 50, 46, 62, 68, + 16, 18, 23, 38, 38, 53, 65, 68, + 25, 23, 31, 38, 53, 65, 68, 68, + 39, 38, 38, 53, 65, 68, 68, 68, + 50, 38, 53, 65, 68, 68, 68, 68, + 46, 53, 65, 68, 68, 68, 68, 68, + 62, 65, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68], + [21, 25, 32, 38, 54, 68, 68, 68, + 25, 28, 24, 38, 54, 68, 68, 68, + 32, 24, 32, 43, 66, 68, 68, 68, + 38, 38, 43, 53, 68, 68, 68, 68, + 54, 54, 66, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68] + ]}, + 'web_medium': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [16, 11, 11, 16, 23, 27, 31, 30, + 11, 12, 12, 15, 20, 23, 23, 30, + 11, 12, 13, 16, 23, 26, 35, 47, + 16, 15, 16, 23, 26, 37, 47, 64, + 23, 20, 23, 26, 39, 51, 64, 64, + 27, 23, 26, 37, 51, 64, 64, 64, + 31, 23, 35, 47, 64, 64, 64, 64, + 30, 30, 47, 64, 64, 64, 64, 64], + [17, 15, 17, 21, 20, 26, 38, 48, + 15, 19, 18, 17, 20, 26, 35, 43, + 17, 18, 20, 22, 26, 30, 46, 53, + 21, 17, 22, 28, 30, 39, 53, 64, + 20, 20, 26, 30, 39, 48, 64, 64, + 26, 26, 30, 39, 48, 63, 64, 64, + 38, 35, 46, 53, 64, 64, 64, 64, + 48, 43, 53, 64, 64, 64, 64, 64] + ]}, + 'web_high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [6, 4, 4, 6, 9, 11, 12, 16, + 4, 5, 5, 6, 8, 10, 12, 12, + 4, 5, 5, 6, 10, 12, 14, 19, + 6, 6, 6, 11, 12, 15, 19, 28, + 9, 8, 10, 12, 16, 20, 27, 31, + 11, 10, 12, 15, 20, 27, 31, 31, + 12, 12, 14, 19, 27, 31, 31, 31, + 16, 12, 19, 28, 31, 31, 31, 31], + [7, 7, 13, 24, 26, 31, 31, 31, + 7, 12, 16, 21, 31, 31, 31, 31, + 13, 16, 17, 31, 31, 31, 31, 31, + 24, 21, 31, 31, 31, 31, 31, 31, + 26, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31] + ]}, + 'web_very_high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 4, 5, 7, 9, + 2, 2, 2, 4, 5, 7, 9, 12, + 3, 3, 4, 5, 8, 10, 12, 12, + 4, 4, 5, 7, 10, 12, 12, 12, + 5, 5, 7, 9, 12, 12, 12, 12, + 6, 6, 9, 12, 12, 12, 12, 12], + [3, 3, 5, 9, 13, 15, 15, 15, + 3, 4, 6, 11, 14, 12, 12, 12, + 5, 6, 9, 14, 12, 12, 12, 12, + 9, 11, 14, 12, 12, 12, 12, 12, + 13, 14, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'web_maximum': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 1, 1, 2, 2, 3, + 1, 1, 1, 1, 2, 2, 3, 3, + 1, 1, 1, 2, 2, 3, 3, 3, + 1, 1, 2, 2, 3, 3, 3, 3], + [1, 1, 1, 2, 2, 3, 3, 3, + 1, 1, 1, 2, 3, 3, 3, 3, + 1, 1, 1, 3, 3, 3, 3, 3, + 2, 2, 3, 3, 3, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3] + ]}, + 'low': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [18, 14, 14, 21, 30, 35, 34, 17, + 14, 16, 16, 19, 26, 23, 12, 12, + 14, 16, 17, 21, 23, 12, 12, 12, + 21, 19, 21, 23, 12, 12, 12, 12, + 30, 26, 23, 12, 12, 12, 12, 12, + 35, 23, 12, 12, 12, 12, 12, 12, + 34, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12], + [20, 19, 22, 27, 20, 20, 17, 17, + 19, 25, 23, 14, 14, 12, 12, 12, + 22, 23, 14, 14, 12, 12, 12, 12, + 27, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'medium': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [12, 8, 8, 12, 17, 21, 24, 17, + 8, 9, 9, 11, 15, 19, 12, 12, + 8, 9, 10, 12, 19, 12, 12, 12, + 12, 11, 12, 21, 12, 12, 12, 12, + 17, 15, 19, 12, 12, 12, 12, 12, + 21, 19, 12, 12, 12, 12, 12, 12, + 24, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12], + [13, 11, 13, 16, 20, 20, 17, 17, + 11, 14, 14, 14, 14, 12, 12, 12, + 13, 14, 14, 14, 12, 12, 12, 12, + 16, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [6, 4, 4, 6, 9, 11, 12, 16, + 4, 5, 5, 6, 8, 10, 12, 12, + 4, 5, 5, 6, 10, 12, 12, 12, + 6, 6, 6, 11, 12, 12, 12, 12, + 9, 8, 10, 12, 12, 12, 12, 12, + 11, 10, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, + 16, 12, 12, 12, 12, 12, 12, 12], + [7, 7, 13, 24, 20, 20, 17, 17, + 7, 12, 16, 14, 14, 12, 12, 12, + 13, 16, 14, 14, 12, 12, 12, 12, + 24, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'maximum': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 4, 5, 7, 9, + 2, 2, 2, 4, 5, 7, 9, 12, + 3, 3, 4, 5, 8, 10, 12, 12, + 4, 4, 5, 7, 10, 12, 12, 12, + 5, 5, 7, 9, 12, 12, 12, 12, + 6, 6, 9, 12, 12, 12, 12, 12], + [3, 3, 5, 9, 13, 15, 15, 15, + 3, 4, 6, 10, 14, 12, 12, 12, + 5, 6, 9, 14, 12, 12, 12, 12, + 9, 10, 14, 12, 12, 12, 12, 12, + 13, 14, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12] + ]}, +} +# fmt: on diff --git a/.venv/lib/python3.9/site-packages/PIL/McIdasImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/McIdasImagePlugin.py new file mode 100644 index 00000000..cd047fe9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/McIdasImagePlugin.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Basic McIdas support for PIL +# +# History: +# 1997-05-05 fl Created (8-bit images only) +# 2009-03-08 fl Added 16/32-bit support. +# +# Thanks to Richard Jones and Craig Swank for specs and samples. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +import struct + +from . import Image, ImageFile + + +def _accept(s): + return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04" + + +## +# Image plugin for McIdas area images. + + +class McIdasImageFile(ImageFile.ImageFile): + + format = "MCIDAS" + format_description = "McIdas area file" + + def _open(self): + + # parse area file directory + s = self.fp.read(256) + if not _accept(s) or len(s) != 256: + raise SyntaxError("not an McIdas area file") + + self.area_descriptor_raw = s + self.area_descriptor = w = [0] + list(struct.unpack("!64i", s)) + + # get mode + if w[11] == 1: + mode = rawmode = "L" + elif w[11] == 2: + # FIXME: add memory map support + mode = "I" + rawmode = "I;16B" + elif w[11] == 4: + # FIXME: add memory map support + mode = "I" + rawmode = "I;32B" + else: + raise SyntaxError("unsupported McIdas format") + + self.mode = mode + self._size = w[10], w[9] + + offset = w[34] + w[15] + stride = w[15] + w[10] * w[11] * w[14] + + self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))] + + +# -------------------------------------------------------------------- +# registry + +Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept) + +# no default extension diff --git a/.venv/lib/python3.9/site-packages/PIL/MicImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/MicImagePlugin.py new file mode 100644 index 00000000..9248b1b6 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/MicImagePlugin.py @@ -0,0 +1,107 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Microsoft Image Composer support for PIL +# +# Notes: +# uses TiffImagePlugin.py to read the actual image streams +# +# History: +# 97-01-20 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + + +import olefile + +from . import Image, TiffImagePlugin + +# +# -------------------------------------------------------------------- + + +def _accept(prefix): + return prefix[:8] == olefile.MAGIC + + +## +# Image plugin for Microsoft's Image Composer file format. + + +class MicImageFile(TiffImagePlugin.TiffImageFile): + + format = "MIC" + format_description = "Microsoft Image Composer" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # read the OLE directory and see if this is a likely + # to be a Microsoft Image Composer file + + try: + self.ole = olefile.OleFileIO(self.fp) + except OSError as e: + raise SyntaxError("not an MIC file; invalid OLE file") from e + + # find ACI subfiles with Image members (maybe not the + # best way to identify MIC files, but what the... ;-) + + self.images = [] + for path in self.ole.listdir(): + if path[1:] and path[0][-4:] == ".ACI" and path[1] == "Image": + self.images.append(path) + + # if we didn't find any images, this is probably not + # an MIC file. + if not self.images: + raise SyntaxError("not an MIC file; no image entries") + + self.__fp = self.fp + self.frame = None + self._n_frames = len(self.images) + self.is_animated = self._n_frames > 1 + + if len(self.images) > 1: + self._category = Image.CONTAINER + + self.seek(0) + + def seek(self, frame): + if not self._seek_check(frame): + return + try: + filename = self.images[frame] + except IndexError as e: + raise EOFError("no such frame") from e + + self.fp = self.ole.openstream(filename) + + TiffImagePlugin.TiffImageFile._open(self) + + self.frame = frame + + def tell(self): + return self.frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# +# -------------------------------------------------------------------- + +Image.register_open(MicImageFile.format, MicImageFile, _accept) + +Image.register_extension(MicImageFile.format, ".mic") diff --git a/.venv/lib/python3.9/site-packages/PIL/MpegImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/MpegImagePlugin.py new file mode 100644 index 00000000..a358dfdc --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/MpegImagePlugin.py @@ -0,0 +1,83 @@ +# +# The Python Imaging Library. +# $Id$ +# +# MPEG file handling +# +# History: +# 95-09-09 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile +from ._binary import i8 + +# +# Bitstream parser + + +class BitStream: + def __init__(self, fp): + self.fp = fp + self.bits = 0 + self.bitbuffer = 0 + + def next(self): + return i8(self.fp.read(1)) + + def peek(self, bits): + while self.bits < bits: + c = self.next() + if c < 0: + self.bits = 0 + continue + self.bitbuffer = (self.bitbuffer << 8) + c + self.bits += 8 + return self.bitbuffer >> (self.bits - bits) & (1 << bits) - 1 + + def skip(self, bits): + while self.bits < bits: + self.bitbuffer = (self.bitbuffer << 8) + i8(self.fp.read(1)) + self.bits += 8 + self.bits = self.bits - bits + + def read(self, bits): + v = self.peek(bits) + self.bits = self.bits - bits + return v + + +## +# Image plugin for MPEG streams. This plugin can identify a stream, +# but it cannot read it. + + +class MpegImageFile(ImageFile.ImageFile): + + format = "MPEG" + format_description = "MPEG" + + def _open(self): + + s = BitStream(self.fp) + + if s.read(32) != 0x1B3: + raise SyntaxError("not an MPEG file") + + self.mode = "RGB" + self._size = s.read(12), s.read(12) + + +# -------------------------------------------------------------------- +# Registry stuff + +Image.register_open(MpegImageFile.format, MpegImageFile) + +Image.register_extensions(MpegImageFile.format, [".mpg", ".mpeg"]) + +Image.register_mime(MpegImageFile.format, "video/mpeg") diff --git a/.venv/lib/python3.9/site-packages/PIL/MpoImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/MpoImagePlugin.py new file mode 100644 index 00000000..7ccf27c4 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/MpoImagePlugin.py @@ -0,0 +1,135 @@ +# +# The Python Imaging Library. +# $Id$ +# +# MPO file handling +# +# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the +# Camera & Imaging Products Association) +# +# The multi-picture object combines multiple JPEG images (with a modified EXIF +# data format) into a single file. While it can theoretically be used much like +# a GIF animation, it is commonly used to represent 3D photographs and is (as +# of this writing) the most commonly used format by 3D cameras. +# +# History: +# 2014-03-13 Feneric Created +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile, JpegImagePlugin +from ._binary import i16be as i16 + +# def _accept(prefix): +# return JpegImagePlugin._accept(prefix) + + +def _save(im, fp, filename): + # Note that we can only save the current frame at present + return JpegImagePlugin._save(im, fp, filename) + + +## +# Image plugin for MPO images. + + +class MpoImageFile(JpegImagePlugin.JpegImageFile): + + format = "MPO" + format_description = "MPO (CIPA DC-007)" + _close_exclusive_fp_after_loading = False + + def _open(self): + self.fp.seek(0) # prep the fp in order to pass the JPEG test + JpegImagePlugin.JpegImageFile._open(self) + self._after_jpeg_open() + + def _after_jpeg_open(self, mpheader=None): + self.mpinfo = mpheader if mpheader is not None else self._getmp() + self.n_frames = self.mpinfo[0xB001] + self.__mpoffsets = [ + mpent["DataOffset"] + self.info["mpoffset"] for mpent in self.mpinfo[0xB002] + ] + self.__mpoffsets[0] = 0 + # Note that the following assertion will only be invalid if something + # gets broken within JpegImagePlugin. + assert self.n_frames == len(self.__mpoffsets) + del self.info["mpoffset"] # no longer needed + self.is_animated = self.n_frames > 1 + self.__fp = self.fp # FIXME: hack + self.__fp.seek(self.__mpoffsets[0]) # get ready to read first frame + self.__frame = 0 + self.offset = 0 + # for now we can only handle reading and individual frame extraction + self.readonly = 1 + + def load_seek(self, pos): + self.__fp.seek(pos) + + def seek(self, frame): + if not self._seek_check(frame): + return + self.fp = self.__fp + self.offset = self.__mpoffsets[frame] + + self.fp.seek(self.offset + 2) # skip SOI marker + segment = self.fp.read(2) + if not segment: + raise ValueError("No data found for frame") + if i16(segment) == 0xFFE1: # APP1 + n = i16(self.fp.read(2)) - 2 + self.info["exif"] = ImageFile._safe_read(self.fp, n) + + mptype = self.mpinfo[0xB002][frame]["Attribute"]["MPType"] + if mptype.startswith("Large Thumbnail"): + exif = self.getexif().get_ifd(0x8769) + if 40962 in exif and 40963 in exif: + self._size = (exif[40962], exif[40963]) + elif "exif" in self.info: + del self.info["exif"] + + self.tile = [("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))] + self.__frame = frame + + def tell(self): + return self.__frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + @staticmethod + def adopt(jpeg_instance, mpheader=None): + """ + Transform the instance of JpegImageFile into + an instance of MpoImageFile. + After the call, the JpegImageFile is extended + to be an MpoImageFile. + + This is essentially useful when opening a JPEG + file that reveals itself as an MPO, to avoid + double call to _open. + """ + jpeg_instance.__class__ = MpoImageFile + jpeg_instance._after_jpeg_open(mpheader) + return jpeg_instance + + +# --------------------------------------------------------------------- +# Registry stuff + +# Note that since MPO shares a factory with JPEG, we do not need to do a +# separate registration for it here. +# Image.register_open(MpoImageFile.format, +# JpegImagePlugin.jpeg_factory, _accept) +Image.register_save(MpoImageFile.format, _save) + +Image.register_extension(MpoImageFile.format, ".mpo") + +Image.register_mime(MpoImageFile.format, "image/mpo") diff --git a/.venv/lib/python3.9/site-packages/PIL/MspImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/MspImagePlugin.py new file mode 100644 index 00000000..32b28d44 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/MspImagePlugin.py @@ -0,0 +1,194 @@ +# +# The Python Imaging Library. +# +# MSP file handling +# +# This is the format used by the Paint program in Windows 1 and 2. +# +# History: +# 95-09-05 fl Created +# 97-01-03 fl Read/write MSP images +# 17-02-21 es Fixed RLE interpretation +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995-97. +# Copyright (c) Eric Soroos 2017. +# +# See the README file for information on usage and redistribution. +# +# More info on this format: https://archive.org/details/gg243631 +# Page 313: +# Figure 205. Windows Paint Version 1: "DanM" Format +# Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03 +# +# See also: https://www.fileformat.info/format/mspaint/egff.htm + +import io +import struct + +from . import Image, ImageFile +from ._binary import i16le as i16 +from ._binary import o16le as o16 + +# +# read MSP files + + +def _accept(prefix): + return prefix[:4] in [b"DanM", b"LinS"] + + +## +# Image plugin for Windows MSP images. This plugin supports both +# uncompressed (Windows 1.0). + + +class MspImageFile(ImageFile.ImageFile): + + format = "MSP" + format_description = "Windows Paint" + + def _open(self): + + # Header + s = self.fp.read(32) + if not _accept(s): + raise SyntaxError("not an MSP file") + + # Header checksum + checksum = 0 + for i in range(0, 32, 2): + checksum = checksum ^ i16(s, i) + if checksum != 0: + raise SyntaxError("bad MSP checksum") + + self.mode = "1" + self._size = i16(s, 4), i16(s, 6) + + if s[:4] == b"DanM": + self.tile = [("raw", (0, 0) + self.size, 32, ("1", 0, 1))] + else: + self.tile = [("MSP", (0, 0) + self.size, 32, None)] + + +class MspDecoder(ImageFile.PyDecoder): + # The algo for the MSP decoder is from + # https://www.fileformat.info/format/mspaint/egff.htm + # cc-by-attribution -- That page references is taken from the + # Encyclopedia of Graphics File Formats and is licensed by + # O'Reilly under the Creative Common/Attribution license + # + # For RLE encoded files, the 32byte header is followed by a scan + # line map, encoded as one 16bit word of encoded byte length per + # line. + # + # NOTE: the encoded length of the line can be 0. This was not + # handled in the previous version of this encoder, and there's no + # mention of how to handle it in the documentation. From the few + # examples I've seen, I've assumed that it is a fill of the + # background color, in this case, white. + # + # + # Pseudocode of the decoder: + # Read a BYTE value as the RunType + # If the RunType value is zero + # Read next byte as the RunCount + # Read the next byte as the RunValue + # Write the RunValue byte RunCount times + # If the RunType value is non-zero + # Use this value as the RunCount + # Read and write the next RunCount bytes literally + # + # e.g.: + # 0x00 03 ff 05 00 01 02 03 04 + # would yield the bytes: + # 0xff ff ff 00 01 02 03 04 + # + # which are then interpreted as a bit packed mode '1' image + + _pulls_fd = True + + def decode(self, buffer): + + img = io.BytesIO() + blank_line = bytearray((0xFF,) * ((self.state.xsize + 7) // 8)) + try: + self.fd.seek(32) + rowmap = struct.unpack_from( + f"<{self.state.ysize}H", self.fd.read(self.state.ysize * 2) + ) + except struct.error as e: + raise OSError("Truncated MSP file in row map") from e + + for x, rowlen in enumerate(rowmap): + try: + if rowlen == 0: + img.write(blank_line) + continue + row = self.fd.read(rowlen) + if len(row) != rowlen: + raise OSError( + "Truncated MSP file, expected %d bytes on row %s", (rowlen, x) + ) + idx = 0 + while idx < rowlen: + runtype = row[idx] + idx += 1 + if runtype == 0: + (runcount, runval) = struct.unpack_from("Bc", row, idx) + img.write(runval * runcount) + idx += 2 + else: + runcount = runtype + img.write(row[idx : idx + runcount]) + idx += runcount + + except struct.error as e: + raise OSError(f"Corrupted MSP file in row {x}") from e + + self.set_as_raw(img.getvalue(), ("1", 0, 1)) + + return 0, 0 + + +Image.register_decoder("MSP", MspDecoder) + + +# +# write MSP files (uncompressed only) + + +def _save(im, fp, filename): + + if im.mode != "1": + raise OSError(f"cannot write mode {im.mode} as MSP") + + # create MSP header + header = [0] * 16 + + header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1 + header[2], header[3] = im.size + header[4], header[5] = 1, 1 + header[6], header[7] = 1, 1 + header[8], header[9] = im.size + + checksum = 0 + for h in header: + checksum = checksum ^ h + header[12] = checksum # FIXME: is this the right field? + + # header + for h in header: + fp.write(o16(h)) + + # image body + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 32, ("1", 0, 1))]) + + +# +# registry + +Image.register_open(MspImageFile.format, MspImageFile, _accept) +Image.register_save(MspImageFile.format, _save) + +Image.register_extension(MspImageFile.format, ".msp") diff --git a/.venv/lib/python3.9/site-packages/PIL/PSDraw.py b/.venv/lib/python3.9/site-packages/PIL/PSDraw.py new file mode 100644 index 00000000..743c35f0 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PSDraw.py @@ -0,0 +1,235 @@ +# +# The Python Imaging Library +# $Id$ +# +# Simple PostScript graphics interface +# +# History: +# 1996-04-20 fl Created +# 1999-01-10 fl Added gsave/grestore to image method +# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge) +# +# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved. +# Copyright (c) 1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +import sys + +from . import EpsImagePlugin + +## +# Simple PostScript graphics interface. + + +class PSDraw: + """ + Sets up printing to the given file. If ``fp`` is omitted, + ``sys.stdout.buffer`` or ``sys.stdout`` is assumed. + """ + + def __init__(self, fp=None): + if not fp: + try: + fp = sys.stdout.buffer + except AttributeError: + fp = sys.stdout + self.fp = fp + + def begin_document(self, id=None): + """Set up printing of a document. (Write PostScript DSC header.)""" + # FIXME: incomplete + self.fp.write( + b"%!PS-Adobe-3.0\n" + b"save\n" + b"/showpage { } def\n" + b"%%EndComments\n" + b"%%BeginDocument\n" + ) + # self.fp.write(ERROR_PS) # debugging! + self.fp.write(EDROFF_PS) + self.fp.write(VDI_PS) + self.fp.write(b"%%EndProlog\n") + self.isofont = {} + + def end_document(self): + """Ends printing. (Write PostScript DSC footer.)""" + self.fp.write(b"%%EndDocument\nrestore showpage\n%%End\n") + if hasattr(self.fp, "flush"): + self.fp.flush() + + def setfont(self, font, size): + """ + Selects which font to use. + + :param font: A PostScript font name + :param size: Size in points. + """ + font = bytes(font, "UTF-8") + if font not in self.isofont: + # reencode font + self.fp.write(b"/PSDraw-%s ISOLatin1Encoding /%s E\n" % (font, font)) + self.isofont[font] = 1 + # rough + self.fp.write(b"/F0 %d /PSDraw-%s F\n" % (size, font)) + + def line(self, xy0, xy1): + """ + Draws a line between the two points. Coordinates are given in + PostScript point coordinates (72 points per inch, (0, 0) is the lower + left corner of the page). + """ + self.fp.write(b"%d %d %d %d Vl\n" % (*xy0, *xy1)) + + def rectangle(self, box): + """ + Draws a rectangle. + + :param box: A 4-tuple of integers whose order and function is currently + undocumented. + + Hint: the tuple is passed into this format string: + + .. code-block:: python + + %d %d M %d %d 0 Vr\n + """ + self.fp.write(b"%d %d M %d %d 0 Vr\n" % box) + + def text(self, xy, text): + """ + Draws text at the given position. You must use + :py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method. + """ + text = bytes(text, "UTF-8") + text = b"\\(".join(text.split(b"(")) + text = b"\\)".join(text.split(b")")) + xy += (text,) + self.fp.write(b"%d %d M (%s) S\n" % xy) + + def image(self, box, im, dpi=None): + """Draw a PIL image, centered in the given box.""" + # default resolution depends on mode + if not dpi: + if im.mode == "1": + dpi = 200 # fax + else: + dpi = 100 # greyscale + # image size (on paper) + x = im.size[0] * 72 / dpi + y = im.size[1] * 72 / dpi + # max allowed size + xmax = float(box[2] - box[0]) + ymax = float(box[3] - box[1]) + if x > xmax: + y = y * xmax / x + x = xmax + if y > ymax: + x = x * ymax / y + y = ymax + dx = (xmax - x) / 2 + box[0] + dy = (ymax - y) / 2 + box[1] + self.fp.write(b"gsave\n%f %f translate\n" % (dx, dy)) + if (x, y) != im.size: + # EpsImagePlugin._save prints the image at (0,0,xsize,ysize) + sx = x / im.size[0] + sy = y / im.size[1] + self.fp.write(b"%f %f scale\n" % (sx, sy)) + EpsImagePlugin._save(im, self.fp, None, 0) + self.fp.write(b"\ngrestore\n") + + +# -------------------------------------------------------------------- +# PostScript driver + +# +# EDROFF.PS -- PostScript driver for Edroff 2 +# +# History: +# 94-01-25 fl: created (edroff 2.04) +# +# Copyright (c) Fredrik Lundh 1994. +# + + +EDROFF_PS = b"""\ +/S { show } bind def +/P { moveto show } bind def +/M { moveto } bind def +/X { 0 rmoveto } bind def +/Y { 0 exch rmoveto } bind def +/E { findfont + dup maxlength dict begin + { + 1 index /FID ne { def } { pop pop } ifelse + } forall + /Encoding exch def + dup /FontName exch def + currentdict end definefont pop +} bind def +/F { findfont exch scalefont dup setfont + [ exch /setfont cvx ] cvx bind def +} bind def +""" + +# +# VDI.PS -- PostScript driver for VDI meta commands +# +# History: +# 94-01-25 fl: created (edroff 2.04) +# +# Copyright (c) Fredrik Lundh 1994. +# + +VDI_PS = b"""\ +/Vm { moveto } bind def +/Va { newpath arcn stroke } bind def +/Vl { moveto lineto stroke } bind def +/Vc { newpath 0 360 arc closepath } bind def +/Vr { exch dup 0 rlineto + exch dup neg 0 exch rlineto + exch neg 0 rlineto + 0 exch rlineto + 100 div setgray fill 0 setgray } bind def +/Tm matrix def +/Ve { Tm currentmatrix pop + translate scale newpath 0 0 .5 0 360 arc closepath + Tm setmatrix +} bind def +/Vf { currentgray exch setgray fill setgray } bind def +""" + +# +# ERROR.PS -- Error handler +# +# History: +# 89-11-21 fl: created (pslist 1.10) +# + +ERROR_PS = b"""\ +/landscape false def +/errorBUF 200 string def +/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def +errordict begin /handleerror { + initmatrix /Courier findfont 10 scalefont setfont + newpath 72 720 moveto $error begin /newerror false def + (PostScript Error) show errorNL errorNL + (Error: ) show + /errorname load errorBUF cvs show errorNL errorNL + (Command: ) show + /command load dup type /stringtype ne { errorBUF cvs } if show + errorNL errorNL + (VMstatus: ) show + vmstatus errorBUF cvs show ( bytes available, ) show + errorBUF cvs show ( bytes used at level ) show + errorBUF cvs show errorNL errorNL + (Operand stargck: ) show errorNL /ostargck load { + dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL + } forall errorNL + (Execution stargck: ) show errorNL /estargck load { + dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL + } forall + end showpage +} def end +""" diff --git a/.venv/lib/python3.9/site-packages/PIL/PaletteFile.py b/.venv/lib/python3.9/site-packages/PIL/PaletteFile.py new file mode 100644 index 00000000..6ccaa1f5 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PaletteFile.py @@ -0,0 +1,53 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read simple, teragon-style palette files +# +# History: +# 97-08-23 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +from ._binary import o8 + + +class PaletteFile: + """File handler for Teragon-style palette files.""" + + rawmode = "RGB" + + def __init__(self, fp): + + self.palette = [(i, i, i) for i in range(256)] + + while True: + + s = fp.readline() + + if not s: + break + if s[0:1] == b"#": + continue + if len(s) > 100: + raise SyntaxError("bad palette file") + + v = [int(x) for x in s.split()] + try: + [i, r, g, b] = v + except ValueError: + [i, r] = v + g = b = r + + if 0 <= i <= 255: + self.palette[i] = o8(r) + o8(g) + o8(b) + + self.palette = b"".join(self.palette) + + def getpalette(self): + + return self.palette, self.rawmode diff --git a/.venv/lib/python3.9/site-packages/PIL/PalmImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/PalmImagePlugin.py new file mode 100644 index 00000000..700f10e3 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PalmImagePlugin.py @@ -0,0 +1,227 @@ +# +# The Python Imaging Library. +# $Id$ +# + +## +# Image plugin for Palm pixmap images (output only). +## + +from . import Image, ImageFile +from ._binary import o8 +from ._binary import o16be as o16b + +# fmt: off +_Palm8BitColormapValues = ( + (255, 255, 255), (255, 204, 255), (255, 153, 255), (255, 102, 255), + (255, 51, 255), (255, 0, 255), (255, 255, 204), (255, 204, 204), + (255, 153, 204), (255, 102, 204), (255, 51, 204), (255, 0, 204), + (255, 255, 153), (255, 204, 153), (255, 153, 153), (255, 102, 153), + (255, 51, 153), (255, 0, 153), (204, 255, 255), (204, 204, 255), + (204, 153, 255), (204, 102, 255), (204, 51, 255), (204, 0, 255), + (204, 255, 204), (204, 204, 204), (204, 153, 204), (204, 102, 204), + (204, 51, 204), (204, 0, 204), (204, 255, 153), (204, 204, 153), + (204, 153, 153), (204, 102, 153), (204, 51, 153), (204, 0, 153), + (153, 255, 255), (153, 204, 255), (153, 153, 255), (153, 102, 255), + (153, 51, 255), (153, 0, 255), (153, 255, 204), (153, 204, 204), + (153, 153, 204), (153, 102, 204), (153, 51, 204), (153, 0, 204), + (153, 255, 153), (153, 204, 153), (153, 153, 153), (153, 102, 153), + (153, 51, 153), (153, 0, 153), (102, 255, 255), (102, 204, 255), + (102, 153, 255), (102, 102, 255), (102, 51, 255), (102, 0, 255), + (102, 255, 204), (102, 204, 204), (102, 153, 204), (102, 102, 204), + (102, 51, 204), (102, 0, 204), (102, 255, 153), (102, 204, 153), + (102, 153, 153), (102, 102, 153), (102, 51, 153), (102, 0, 153), + (51, 255, 255), (51, 204, 255), (51, 153, 255), (51, 102, 255), + (51, 51, 255), (51, 0, 255), (51, 255, 204), (51, 204, 204), + (51, 153, 204), (51, 102, 204), (51, 51, 204), (51, 0, 204), + (51, 255, 153), (51, 204, 153), (51, 153, 153), (51, 102, 153), + (51, 51, 153), (51, 0, 153), (0, 255, 255), (0, 204, 255), + (0, 153, 255), (0, 102, 255), (0, 51, 255), (0, 0, 255), + (0, 255, 204), (0, 204, 204), (0, 153, 204), (0, 102, 204), + (0, 51, 204), (0, 0, 204), (0, 255, 153), (0, 204, 153), + (0, 153, 153), (0, 102, 153), (0, 51, 153), (0, 0, 153), + (255, 255, 102), (255, 204, 102), (255, 153, 102), (255, 102, 102), + (255, 51, 102), (255, 0, 102), (255, 255, 51), (255, 204, 51), + (255, 153, 51), (255, 102, 51), (255, 51, 51), (255, 0, 51), + (255, 255, 0), (255, 204, 0), (255, 153, 0), (255, 102, 0), + (255, 51, 0), (255, 0, 0), (204, 255, 102), (204, 204, 102), + (204, 153, 102), (204, 102, 102), (204, 51, 102), (204, 0, 102), + (204, 255, 51), (204, 204, 51), (204, 153, 51), (204, 102, 51), + (204, 51, 51), (204, 0, 51), (204, 255, 0), (204, 204, 0), + (204, 153, 0), (204, 102, 0), (204, 51, 0), (204, 0, 0), + (153, 255, 102), (153, 204, 102), (153, 153, 102), (153, 102, 102), + (153, 51, 102), (153, 0, 102), (153, 255, 51), (153, 204, 51), + (153, 153, 51), (153, 102, 51), (153, 51, 51), (153, 0, 51), + (153, 255, 0), (153, 204, 0), (153, 153, 0), (153, 102, 0), + (153, 51, 0), (153, 0, 0), (102, 255, 102), (102, 204, 102), + (102, 153, 102), (102, 102, 102), (102, 51, 102), (102, 0, 102), + (102, 255, 51), (102, 204, 51), (102, 153, 51), (102, 102, 51), + (102, 51, 51), (102, 0, 51), (102, 255, 0), (102, 204, 0), + (102, 153, 0), (102, 102, 0), (102, 51, 0), (102, 0, 0), + (51, 255, 102), (51, 204, 102), (51, 153, 102), (51, 102, 102), + (51, 51, 102), (51, 0, 102), (51, 255, 51), (51, 204, 51), + (51, 153, 51), (51, 102, 51), (51, 51, 51), (51, 0, 51), + (51, 255, 0), (51, 204, 0), (51, 153, 0), (51, 102, 0), + (51, 51, 0), (51, 0, 0), (0, 255, 102), (0, 204, 102), + (0, 153, 102), (0, 102, 102), (0, 51, 102), (0, 0, 102), + (0, 255, 51), (0, 204, 51), (0, 153, 51), (0, 102, 51), + (0, 51, 51), (0, 0, 51), (0, 255, 0), (0, 204, 0), + (0, 153, 0), (0, 102, 0), (0, 51, 0), (17, 17, 17), + (34, 34, 34), (68, 68, 68), (85, 85, 85), (119, 119, 119), + (136, 136, 136), (170, 170, 170), (187, 187, 187), (221, 221, 221), + (238, 238, 238), (192, 192, 192), (128, 0, 0), (128, 0, 128), + (0, 128, 0), (0, 128, 128), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)) +# fmt: on + + +# so build a prototype image to be used for palette resampling +def build_prototype_image(): + image = Image.new("L", (1, len(_Palm8BitColormapValues))) + image.putdata(list(range(len(_Palm8BitColormapValues)))) + palettedata = () + for colormapValue in _Palm8BitColormapValues: + palettedata += colormapValue + palettedata += (0, 0, 0) * (256 - len(_Palm8BitColormapValues)) + image.putpalette(palettedata) + return image + + +Palm8BitColormapImage = build_prototype_image() + +# OK, we now have in Palm8BitColormapImage, +# a "P"-mode image with the right palette +# +# -------------------------------------------------------------------- + +_FLAGS = {"custom-colormap": 0x4000, "is-compressed": 0x8000, "has-transparent": 0x2000} + +_COMPRESSION_TYPES = {"none": 0xFF, "rle": 0x01, "scanline": 0x00} + + +# +# -------------------------------------------------------------------- + +## +# (Internal) Image save plugin for the Palm format. + + +def _save(im, fp, filename): + + if im.mode == "P": + + # we assume this is a color Palm image with the standard colormap, + # unless the "info" dict has a "custom-colormap" field + + rawmode = "P" + bpp = 8 + version = 1 + + elif im.mode == "L": + if im.encoderinfo.get("bpp") in (1, 2, 4): + # this is 8-bit grayscale, so we shift it to get the high-order bits, + # and invert it because + # Palm does greyscale from white (0) to black (1) + bpp = im.encoderinfo["bpp"] + im = im.point( + lambda x, shift=8 - bpp, maxval=(1 << bpp) - 1: maxval - (x >> shift) + ) + elif im.info.get("bpp") in (1, 2, 4): + # here we assume that even though the inherent mode is 8-bit grayscale, + # only the lower bpp bits are significant. + # We invert them to match the Palm. + bpp = im.info["bpp"] + im = im.point(lambda x, maxval=(1 << bpp) - 1: maxval - (x & maxval)) + else: + raise OSError(f"cannot write mode {im.mode} as Palm") + + # we ignore the palette here + im.mode = "P" + rawmode = "P;" + str(bpp) + version = 1 + + elif im.mode == "1": + + # monochrome -- write it inverted, as is the Palm standard + rawmode = "1;I" + bpp = 1 + version = 0 + + else: + + raise OSError(f"cannot write mode {im.mode} as Palm") + + # + # make sure image data is available + im.load() + + # write header + + cols = im.size[0] + rows = im.size[1] + + rowbytes = int((cols + (16 // bpp - 1)) / (16 // bpp)) * 2 + transparent_index = 0 + compression_type = _COMPRESSION_TYPES["none"] + + flags = 0 + if im.mode == "P" and "custom-colormap" in im.info: + flags = flags & _FLAGS["custom-colormap"] + colormapsize = 4 * 256 + 2 + colormapmode = im.palette.mode + colormap = im.getdata().getpalette() + else: + colormapsize = 0 + + if "offset" in im.info: + offset = (rowbytes * rows + 16 + 3 + colormapsize) // 4 + else: + offset = 0 + + fp.write(o16b(cols) + o16b(rows) + o16b(rowbytes) + o16b(flags)) + fp.write(o8(bpp)) + fp.write(o8(version)) + fp.write(o16b(offset)) + fp.write(o8(transparent_index)) + fp.write(o8(compression_type)) + fp.write(o16b(0)) # reserved by Palm + + # now write colormap if necessary + + if colormapsize > 0: + fp.write(o16b(256)) + for i in range(256): + fp.write(o8(i)) + if colormapmode == "RGB": + fp.write( + o8(colormap[3 * i]) + + o8(colormap[3 * i + 1]) + + o8(colormap[3 * i + 2]) + ) + elif colormapmode == "RGBA": + fp.write( + o8(colormap[4 * i]) + + o8(colormap[4 * i + 1]) + + o8(colormap[4 * i + 2]) + ) + + # now convert data to raw form + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, rowbytes, 1))]) + + if hasattr(fp, "flush"): + fp.flush() + + +# +# -------------------------------------------------------------------- + +Image.register_save("Palm", _save) + +Image.register_extension("Palm", ".palm") + +Image.register_mime("Palm", "image/palm") diff --git a/.venv/lib/python3.9/site-packages/PIL/PcdImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/PcdImagePlugin.py new file mode 100644 index 00000000..38caf5c6 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PcdImagePlugin.py @@ -0,0 +1,63 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PCD file handling +# +# History: +# 96-05-10 fl Created +# 96-05-27 fl Added draft mode (128x192, 256x384) +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile + +## +# Image plugin for PhotoCD images. This plugin only reads the 768x512 +# image from the file; higher resolutions are encoded in a proprietary +# encoding. + + +class PcdImageFile(ImageFile.ImageFile): + + format = "PCD" + format_description = "Kodak PhotoCD" + + def _open(self): + + # rough + self.fp.seek(2048) + s = self.fp.read(2048) + + if s[:4] != b"PCD_": + raise SyntaxError("not a PCD file") + + orientation = s[1538] & 3 + self.tile_post_rotate = None + if orientation == 1: + self.tile_post_rotate = 90 + elif orientation == 3: + self.tile_post_rotate = -90 + + self.mode = "RGB" + self._size = 768, 512 # FIXME: not correct for rotated images! + self.tile = [("pcd", (0, 0) + self.size, 96 * 2048, None)] + + def load_end(self): + if self.tile_post_rotate: + # Handle rotated PCDs + self.im = self.im.rotate(self.tile_post_rotate) + self._size = self.im.size + + +# +# registry + +Image.register_open(PcdImageFile.format, PcdImageFile) + +Image.register_extension(PcdImageFile.format, ".pcd") diff --git a/.venv/lib/python3.9/site-packages/PIL/PcfFontFile.py b/.venv/lib/python3.9/site-packages/PIL/PcfFontFile.py new file mode 100644 index 00000000..6a4eb22a --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PcfFontFile.py @@ -0,0 +1,248 @@ +# +# THIS IS WORK IN PROGRESS +# +# The Python Imaging Library +# $Id$ +# +# portable compiled font file parser +# +# history: +# 1997-08-19 fl created +# 2003-09-13 fl fixed loading of unicode fonts +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1997-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +import io + +from . import FontFile, Image +from ._binary import i8 +from ._binary import i16be as b16 +from ._binary import i16le as l16 +from ._binary import i32be as b32 +from ._binary import i32le as l32 + +# -------------------------------------------------------------------- +# declarations + +PCF_MAGIC = 0x70636601 # "\x01fcp" + +PCF_PROPERTIES = 1 << 0 +PCF_ACCELERATORS = 1 << 1 +PCF_METRICS = 1 << 2 +PCF_BITMAPS = 1 << 3 +PCF_INK_METRICS = 1 << 4 +PCF_BDF_ENCODINGS = 1 << 5 +PCF_SWIDTHS = 1 << 6 +PCF_GLYPH_NAMES = 1 << 7 +PCF_BDF_ACCELERATORS = 1 << 8 + +BYTES_PER_ROW = [ + lambda bits: ((bits + 7) >> 3), + lambda bits: ((bits + 15) >> 3) & ~1, + lambda bits: ((bits + 31) >> 3) & ~3, + lambda bits: ((bits + 63) >> 3) & ~7, +] + + +def sz(s, o): + return s[o : s.index(b"\0", o)] + + +class PcfFontFile(FontFile.FontFile): + """Font file plugin for the X11 PCF format.""" + + name = "name" + + def __init__(self, fp, charset_encoding="iso8859-1"): + + self.charset_encoding = charset_encoding + + magic = l32(fp.read(4)) + if magic != PCF_MAGIC: + raise SyntaxError("not a PCF file") + + super().__init__() + + count = l32(fp.read(4)) + self.toc = {} + for i in range(count): + type = l32(fp.read(4)) + self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4)) + + self.fp = fp + + self.info = self._load_properties() + + metrics = self._load_metrics() + bitmaps = self._load_bitmaps(metrics) + encoding = self._load_encoding() + + # + # create glyph structure + + for ch in range(256): + ix = encoding[ch] + if ix is not None: + x, y, l, r, w, a, d, f = metrics[ix] + glyph = (w, 0), (l, d - y, x + l, d), (0, 0, x, y), bitmaps[ix] + self.glyph[ch] = glyph + + def _getformat(self, tag): + + format, size, offset = self.toc[tag] + + fp = self.fp + fp.seek(offset) + + format = l32(fp.read(4)) + + if format & 4: + i16, i32 = b16, b32 + else: + i16, i32 = l16, l32 + + return fp, format, i16, i32 + + def _load_properties(self): + + # + # font properties + + properties = {} + + fp, format, i16, i32 = self._getformat(PCF_PROPERTIES) + + nprops = i32(fp.read(4)) + + # read property description + p = [] + for i in range(nprops): + p.append((i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4)))) + if nprops & 3: + fp.seek(4 - (nprops & 3), io.SEEK_CUR) # pad + + data = fp.read(i32(fp.read(4))) + + for k, s, v in p: + k = sz(data, k) + if s: + v = sz(data, v) + properties[k] = v + + return properties + + def _load_metrics(self): + + # + # font metrics + + metrics = [] + + fp, format, i16, i32 = self._getformat(PCF_METRICS) + + append = metrics.append + + if (format & 0xFF00) == 0x100: + + # "compressed" metrics + for i in range(i16(fp.read(2))): + left = i8(fp.read(1)) - 128 + right = i8(fp.read(1)) - 128 + width = i8(fp.read(1)) - 128 + ascent = i8(fp.read(1)) - 128 + descent = i8(fp.read(1)) - 128 + xsize = right - left + ysize = ascent + descent + append((xsize, ysize, left, right, width, ascent, descent, 0)) + + else: + + # "jumbo" metrics + for i in range(i32(fp.read(4))): + left = i16(fp.read(2)) + right = i16(fp.read(2)) + width = i16(fp.read(2)) + ascent = i16(fp.read(2)) + descent = i16(fp.read(2)) + attributes = i16(fp.read(2)) + xsize = right - left + ysize = ascent + descent + append((xsize, ysize, left, right, width, ascent, descent, attributes)) + + return metrics + + def _load_bitmaps(self, metrics): + + # + # bitmap data + + bitmaps = [] + + fp, format, i16, i32 = self._getformat(PCF_BITMAPS) + + nbitmaps = i32(fp.read(4)) + + if nbitmaps != len(metrics): + raise OSError("Wrong number of bitmaps") + + offsets = [] + for i in range(nbitmaps): + offsets.append(i32(fp.read(4))) + + bitmapSizes = [] + for i in range(4): + bitmapSizes.append(i32(fp.read(4))) + + # byteorder = format & 4 # non-zero => MSB + bitorder = format & 8 # non-zero => MSB + padindex = format & 3 + + bitmapsize = bitmapSizes[padindex] + offsets.append(bitmapsize) + + data = fp.read(bitmapsize) + + pad = BYTES_PER_ROW[padindex] + mode = "1;R" + if bitorder: + mode = "1" + + for i in range(nbitmaps): + x, y, l, r, w, a, d, f = metrics[i] + b, e = offsets[i], offsets[i + 1] + bitmaps.append(Image.frombytes("1", (x, y), data[b:e], "raw", mode, pad(x))) + + return bitmaps + + def _load_encoding(self): + + # map character code to bitmap index + encoding = [None] * 256 + + fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS) + + firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2)) + firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2)) + + i16(fp.read(2)) # default + + nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1) + + encodingOffsets = [i16(fp.read(2)) for _ in range(nencoding)] + + for i in range(firstCol, len(encoding)): + try: + encodingOffset = encodingOffsets[ + ord(bytearray([i]).decode(self.charset_encoding)) + ] + if encodingOffset != 0xFFFF: + encoding[i] = encodingOffset + except UnicodeDecodeError: + # character is not supported in selected encoding + pass + + return encoding diff --git a/.venv/lib/python3.9/site-packages/PIL/PcxImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/PcxImagePlugin.py new file mode 100644 index 00000000..d2e166bd --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PcxImagePlugin.py @@ -0,0 +1,218 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PCX file handling +# +# This format was originally used by ZSoft's popular PaintBrush +# program for the IBM PC. It is also supported by many MS-DOS and +# Windows applications, including the Windows PaintBrush program in +# Windows 3. +# +# history: +# 1995-09-01 fl Created +# 1996-05-20 fl Fixed RGB support +# 1997-01-03 fl Fixed 2-bit and 4-bit support +# 1999-02-03 fl Fixed 8-bit support (broken in 1.0b1) +# 1999-02-07 fl Added write support +# 2002-06-09 fl Made 2-bit and 4-bit support a bit more robust +# 2002-07-30 fl Seek from to current position, not beginning of file +# 2003-06-03 fl Extract DPI settings (info["dpi"]) +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +import io +import logging + +from . import Image, ImageFile, ImagePalette +from ._binary import i16le as i16 +from ._binary import o8 +from ._binary import o16le as o16 + +logger = logging.getLogger(__name__) + + +def _accept(prefix): + return prefix[0] == 10 and prefix[1] in [0, 2, 3, 5] + + +## +# Image plugin for Paintbrush images. + + +class PcxImageFile(ImageFile.ImageFile): + + format = "PCX" + format_description = "Paintbrush" + + def _open(self): + + # header + s = self.fp.read(128) + if not _accept(s): + raise SyntaxError("not a PCX file") + + # image + bbox = i16(s, 4), i16(s, 6), i16(s, 8) + 1, i16(s, 10) + 1 + if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]: + raise SyntaxError("bad PCX image size") + logger.debug("BBox: %s %s %s %s", *bbox) + + # format + version = s[1] + bits = s[3] + planes = s[65] + provided_stride = i16(s, 66) + logger.debug( + "PCX version %s, bits %s, planes %s, stride %s", + version, + bits, + planes, + provided_stride, + ) + + self.info["dpi"] = i16(s, 12), i16(s, 14) + + if bits == 1 and planes == 1: + mode = rawmode = "1" + + elif bits == 1 and planes in (2, 4): + mode = "P" + rawmode = "P;%dL" % planes + self.palette = ImagePalette.raw("RGB", s[16:64]) + + elif version == 5 and bits == 8 and planes == 1: + mode = rawmode = "L" + # FIXME: hey, this doesn't work with the incremental loader !!! + self.fp.seek(-769, io.SEEK_END) + s = self.fp.read(769) + if len(s) == 769 and s[0] == 12: + # check if the palette is linear greyscale + for i in range(256): + if s[i * 3 + 1 : i * 3 + 4] != o8(i) * 3: + mode = rawmode = "P" + break + if mode == "P": + self.palette = ImagePalette.raw("RGB", s[1:]) + self.fp.seek(128) + + elif version == 5 and bits == 8 and planes == 3: + mode = "RGB" + rawmode = "RGB;L" + + else: + raise OSError("unknown PCX mode") + + self.mode = mode + self._size = bbox[2] - bbox[0], bbox[3] - bbox[1] + + # Don't trust the passed in stride. + # Calculate the approximate position for ourselves. + # CVE-2020-35653 + stride = (self._size[0] * bits + 7) // 8 + + # While the specification states that this must be even, + # not all images follow this + if provided_stride != stride: + stride += stride % 2 + + bbox = (0, 0) + self.size + logger.debug("size: %sx%s", *self.size) + + self.tile = [("pcx", bbox, self.fp.tell(), (rawmode, planes * stride))] + + +# -------------------------------------------------------------------- +# save PCX files + + +SAVE = { + # mode: (version, bits, planes, raw mode) + "1": (2, 1, 1, "1"), + "L": (5, 8, 1, "L"), + "P": (5, 8, 1, "P"), + "RGB": (5, 8, 3, "RGB;L"), +} + + +def _save(im, fp, filename): + + try: + version, bits, planes, rawmode = SAVE[im.mode] + except KeyError as e: + raise ValueError(f"Cannot save {im.mode} images as PCX") from e + + # bytes per plane + stride = (im.size[0] * bits + 7) // 8 + # stride should be even + stride += stride % 2 + # Stride needs to be kept in sync with the PcxEncode.c version. + # Ideally it should be passed in in the state, but the bytes value + # gets overwritten. + + logger.debug( + "PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d", + im.size[0], + bits, + stride, + ) + + # under windows, we could determine the current screen size with + # "Image.core.display_mode()[1]", but I think that's overkill... + + screen = im.size + + dpi = 100, 100 + + # PCX header + fp.write( + o8(10) + + o8(version) + + o8(1) + + o8(bits) + + o16(0) + + o16(0) + + o16(im.size[0] - 1) + + o16(im.size[1] - 1) + + o16(dpi[0]) + + o16(dpi[1]) + + b"\0" * 24 + + b"\xFF" * 24 + + b"\0" + + o8(planes) + + o16(stride) + + o16(1) + + o16(screen[0]) + + o16(screen[1]) + + b"\0" * 54 + ) + + assert fp.tell() == 128 + + ImageFile._save(im, fp, [("pcx", (0, 0) + im.size, 0, (rawmode, bits * planes))]) + + if im.mode == "P": + # colour palette + fp.write(o8(12)) + fp.write(im.im.getpalette("RGB", "RGB")) # 768 bytes + elif im.mode == "L": + # greyscale palette + fp.write(o8(12)) + for i in range(256): + fp.write(o8(i) * 3) + + +# -------------------------------------------------------------------- +# registry + + +Image.register_open(PcxImageFile.format, PcxImageFile, _accept) +Image.register_save(PcxImageFile.format, _save) + +Image.register_extension(PcxImageFile.format, ".pcx") + +Image.register_mime(PcxImageFile.format, "image/x-pcx") diff --git a/.venv/lib/python3.9/site-packages/PIL/PdfImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/PdfImagePlugin.py new file mode 100644 index 00000000..49ba077e --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PdfImagePlugin.py @@ -0,0 +1,240 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PDF (Acrobat) file handling +# +# History: +# 1996-07-16 fl Created +# 1997-01-18 fl Fixed header +# 2004-02-21 fl Fixes for 1/L/CMYK images, etc. +# 2004-02-24 fl Fixes for 1 and P images. +# +# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved. +# Copyright (c) 1996-1997 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +## +# Image plugin for PDF images (output only). +## + +import io +import os +import time + +from . import Image, ImageFile, ImageSequence, PdfParser, __version__ + +# +# -------------------------------------------------------------------- + +# object ids: +# 1. catalogue +# 2. pages +# 3. image +# 4. page +# 5. page contents + + +def _save_all(im, fp, filename): + _save(im, fp, filename, save_all=True) + + +## +# (Internal) Image save plugin for the PDF format. + + +def _save(im, fp, filename, save_all=False): + is_appending = im.encoderinfo.get("append", False) + if is_appending: + existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="r+b") + else: + existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="w+b") + + resolution = im.encoderinfo.get("resolution", 72.0) + + info = { + "title": None + if is_appending + else os.path.splitext(os.path.basename(filename))[0], + "author": None, + "subject": None, + "keywords": None, + "creator": None, + "producer": None, + "creationDate": None if is_appending else time.gmtime(), + "modDate": None if is_appending else time.gmtime(), + } + for k, default in info.items(): + v = im.encoderinfo.get(k) if k in im.encoderinfo else default + if v: + existing_pdf.info[k[0].upper() + k[1:]] = v + + # + # make sure image data is available + im.load() + + existing_pdf.start_writing() + existing_pdf.write_header() + existing_pdf.write_comment(f"created by Pillow {__version__} PDF driver") + + # + # pages + ims = [im] + if save_all: + append_images = im.encoderinfo.get("append_images", []) + for append_im in append_images: + append_im.encoderinfo = im.encoderinfo.copy() + ims.append(append_im) + numberOfPages = 0 + image_refs = [] + page_refs = [] + contents_refs = [] + for im in ims: + im_numberOfPages = 1 + if save_all: + try: + im_numberOfPages = im.n_frames + except AttributeError: + # Image format does not have n_frames. + # It is a single frame image + pass + numberOfPages += im_numberOfPages + for i in range(im_numberOfPages): + image_refs.append(existing_pdf.next_object_id(0)) + page_refs.append(existing_pdf.next_object_id(0)) + contents_refs.append(existing_pdf.next_object_id(0)) + existing_pdf.pages.append(page_refs[-1]) + + # + # catalog and list of pages + existing_pdf.write_catalog() + + pageNumber = 0 + for imSequence in ims: + im_pages = ImageSequence.Iterator(imSequence) if save_all else [imSequence] + for im in im_pages: + # FIXME: Should replace ASCIIHexDecode with RunLengthDecode + # (packbits) or LZWDecode (tiff/lzw compression). Note that + # PDF 1.2 also supports Flatedecode (zip compression). + + bits = 8 + params = None + decode = None + + if im.mode == "1": + filter = "DCTDecode" + colorspace = PdfParser.PdfName("DeviceGray") + procset = "ImageB" # grayscale + bits = 1 + elif im.mode == "L": + filter = "DCTDecode" + # params = f"<< /Predictor 15 /Columns {width-2} >>" + colorspace = PdfParser.PdfName("DeviceGray") + procset = "ImageB" # grayscale + elif im.mode == "P": + filter = "ASCIIHexDecode" + palette = im.im.getpalette("RGB") + colorspace = [ + PdfParser.PdfName("Indexed"), + PdfParser.PdfName("DeviceRGB"), + 255, + PdfParser.PdfBinary(palette), + ] + procset = "ImageI" # indexed color + elif im.mode == "RGB": + filter = "DCTDecode" + colorspace = PdfParser.PdfName("DeviceRGB") + procset = "ImageC" # color images + elif im.mode == "CMYK": + filter = "DCTDecode" + colorspace = PdfParser.PdfName("DeviceCMYK") + procset = "ImageC" # color images + decode = [1, 0, 1, 0, 1, 0, 1, 0] + else: + raise ValueError(f"cannot save mode {im.mode}") + + # + # image + + op = io.BytesIO() + + if filter == "ASCIIHexDecode": + ImageFile._save(im, op, [("hex", (0, 0) + im.size, 0, im.mode)]) + elif filter == "DCTDecode": + Image.SAVE["JPEG"](im, op, filename) + elif filter == "FlateDecode": + ImageFile._save(im, op, [("zip", (0, 0) + im.size, 0, im.mode)]) + elif filter == "RunLengthDecode": + ImageFile._save(im, op, [("packbits", (0, 0) + im.size, 0, im.mode)]) + else: + raise ValueError(f"unsupported PDF filter ({filter})") + + # + # Get image characteristics + + width, height = im.size + + existing_pdf.write_obj( + image_refs[pageNumber], + stream=op.getvalue(), + Type=PdfParser.PdfName("XObject"), + Subtype=PdfParser.PdfName("Image"), + Width=width, # * 72.0 / resolution, + Height=height, # * 72.0 / resolution, + Filter=PdfParser.PdfName(filter), + BitsPerComponent=bits, + Decode=decode, + DecodeParams=params, + ColorSpace=colorspace, + ) + + # + # page + + existing_pdf.write_page( + page_refs[pageNumber], + Resources=PdfParser.PdfDict( + ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)], + XObject=PdfParser.PdfDict(image=image_refs[pageNumber]), + ), + MediaBox=[ + 0, + 0, + width * 72.0 / resolution, + height * 72.0 / resolution, + ], + Contents=contents_refs[pageNumber], + ) + + # + # page contents + + page_contents = b"q %f 0 0 %f 0 0 cm /image Do Q\n" % ( + width * 72.0 / resolution, + height * 72.0 / resolution, + ) + + existing_pdf.write_obj(contents_refs[pageNumber], stream=page_contents) + + pageNumber += 1 + + # + # trailer + existing_pdf.write_xref_and_trailer() + if hasattr(fp, "flush"): + fp.flush() + existing_pdf.close() + + +# +# -------------------------------------------------------------------- + + +Image.register_save("PDF", _save) +Image.register_save_all("PDF", _save_all) + +Image.register_extension("PDF", ".pdf") + +Image.register_mime("PDF", "application/pdf") diff --git a/.venv/lib/python3.9/site-packages/PIL/PdfParser.py b/.venv/lib/python3.9/site-packages/PIL/PdfParser.py new file mode 100644 index 00000000..b5279e0d --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PdfParser.py @@ -0,0 +1,997 @@ +import calendar +import codecs +import collections +import mmap +import os +import re +import time +import zlib + + +# see 7.9.2.2 Text String Type on page 86 and D.3 PDFDocEncoding Character Set +# on page 656 +def encode_text(s): + return codecs.BOM_UTF16_BE + s.encode("utf_16_be") + + +PDFDocEncoding = { + 0x16: "\u0017", + 0x18: "\u02D8", + 0x19: "\u02C7", + 0x1A: "\u02C6", + 0x1B: "\u02D9", + 0x1C: "\u02DD", + 0x1D: "\u02DB", + 0x1E: "\u02DA", + 0x1F: "\u02DC", + 0x80: "\u2022", + 0x81: "\u2020", + 0x82: "\u2021", + 0x83: "\u2026", + 0x84: "\u2014", + 0x85: "\u2013", + 0x86: "\u0192", + 0x87: "\u2044", + 0x88: "\u2039", + 0x89: "\u203A", + 0x8A: "\u2212", + 0x8B: "\u2030", + 0x8C: "\u201E", + 0x8D: "\u201C", + 0x8E: "\u201D", + 0x8F: "\u2018", + 0x90: "\u2019", + 0x91: "\u201A", + 0x92: "\u2122", + 0x93: "\uFB01", + 0x94: "\uFB02", + 0x95: "\u0141", + 0x96: "\u0152", + 0x97: "\u0160", + 0x98: "\u0178", + 0x99: "\u017D", + 0x9A: "\u0131", + 0x9B: "\u0142", + 0x9C: "\u0153", + 0x9D: "\u0161", + 0x9E: "\u017E", + 0xA0: "\u20AC", +} + + +def decode_text(b): + if b[: len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE: + return b[len(codecs.BOM_UTF16_BE) :].decode("utf_16_be") + else: + return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b) + + +class PdfFormatError(RuntimeError): + """An error that probably indicates a syntactic or semantic error in the + PDF file structure""" + + pass + + +def check_format_condition(condition, error_message): + if not condition: + raise PdfFormatError(error_message) + + +class IndirectReference( + collections.namedtuple("IndirectReferenceTuple", ["object_id", "generation"]) +): + def __str__(self): + return "%s %s R" % self + + def __bytes__(self): + return self.__str__().encode("us-ascii") + + def __eq__(self, other): + return ( + other.__class__ is self.__class__ + and other.object_id == self.object_id + and other.generation == self.generation + ) + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash((self.object_id, self.generation)) + + +class IndirectObjectDef(IndirectReference): + def __str__(self): + return "%s %s obj" % self + + +class XrefTable: + def __init__(self): + self.existing_entries = {} # object ID => (offset, generation) + self.new_entries = {} # object ID => (offset, generation) + self.deleted_entries = {0: 65536} # object ID => generation + self.reading_finished = False + + def __setitem__(self, key, value): + if self.reading_finished: + self.new_entries[key] = value + else: + self.existing_entries[key] = value + if key in self.deleted_entries: + del self.deleted_entries[key] + + def __getitem__(self, key): + try: + return self.new_entries[key] + except KeyError: + return self.existing_entries[key] + + def __delitem__(self, key): + if key in self.new_entries: + generation = self.new_entries[key][1] + 1 + del self.new_entries[key] + self.deleted_entries[key] = generation + elif key in self.existing_entries: + generation = self.existing_entries[key][1] + 1 + self.deleted_entries[key] = generation + elif key in self.deleted_entries: + generation = self.deleted_entries[key] + else: + raise IndexError( + "object ID " + str(key) + " cannot be deleted because it doesn't exist" + ) + + def __contains__(self, key): + return key in self.existing_entries or key in self.new_entries + + def __len__(self): + return len( + set(self.existing_entries.keys()) + | set(self.new_entries.keys()) + | set(self.deleted_entries.keys()) + ) + + def keys(self): + return ( + set(self.existing_entries.keys()) - set(self.deleted_entries.keys()) + ) | set(self.new_entries.keys()) + + def write(self, f): + keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys())) + deleted_keys = sorted(set(self.deleted_entries.keys())) + startxref = f.tell() + f.write(b"xref\n") + while keys: + # find a contiguous sequence of object IDs + prev = None + for index, key in enumerate(keys): + if prev is None or prev + 1 == key: + prev = key + else: + contiguous_keys = keys[:index] + keys = keys[index:] + break + else: + contiguous_keys = keys + keys = None + f.write(b"%d %d\n" % (contiguous_keys[0], len(contiguous_keys))) + for object_id in contiguous_keys: + if object_id in self.new_entries: + f.write(b"%010d %05d n \n" % self.new_entries[object_id]) + else: + this_deleted_object_id = deleted_keys.pop(0) + check_format_condition( + object_id == this_deleted_object_id, + f"expected the next deleted object ID to be {object_id}, " + f"instead found {this_deleted_object_id}", + ) + try: + next_in_linked_list = deleted_keys[0] + except IndexError: + next_in_linked_list = 0 + f.write( + b"%010d %05d f \n" + % (next_in_linked_list, self.deleted_entries[object_id]) + ) + return startxref + + +class PdfName: + def __init__(self, name): + if isinstance(name, PdfName): + self.name = name.name + elif isinstance(name, bytes): + self.name = name + else: + self.name = name.encode("us-ascii") + + def name_as_str(self): + return self.name.decode("us-ascii") + + def __eq__(self, other): + return ( + isinstance(other, PdfName) and other.name == self.name + ) or other == self.name + + def __hash__(self): + return hash(self.name) + + def __repr__(self): + return f"PdfName({repr(self.name)})" + + @classmethod + def from_pdf_stream(cls, data): + return cls(PdfParser.interpret_name(data)) + + allowed_chars = set(range(33, 127)) - {ord(c) for c in "#%/()<>[]{}"} + + def __bytes__(self): + result = bytearray(b"/") + for b in self.name: + if b in self.allowed_chars: + result.append(b) + else: + result.extend(b"#%02X" % b) + return bytes(result) + + +class PdfArray(list): + def __bytes__(self): + return b"[ " + b" ".join(pdf_repr(x) for x in self) + b" ]" + + +class PdfDict(collections.UserDict): + def __setattr__(self, key, value): + if key == "data": + collections.UserDict.__setattr__(self, key, value) + else: + self[key.encode("us-ascii")] = value + + def __getattr__(self, key): + try: + value = self[key.encode("us-ascii")] + except KeyError as e: + raise AttributeError(key) from e + if isinstance(value, bytes): + value = decode_text(value) + if key.endswith("Date"): + if value.startswith("D:"): + value = value[2:] + + relationship = "Z" + if len(value) > 17: + relationship = value[14] + offset = int(value[15:17]) * 60 + if len(value) > 20: + offset += int(value[18:20]) + + format = "%Y%m%d%H%M%S"[: len(value) - 2] + value = time.strptime(value[: len(format) + 2], format) + if relationship in ["+", "-"]: + offset *= 60 + if relationship == "+": + offset *= -1 + value = time.gmtime(calendar.timegm(value) + offset) + return value + + def __bytes__(self): + out = bytearray(b"<<") + for key, value in self.items(): + if value is None: + continue + value = pdf_repr(value) + out.extend(b"\n") + out.extend(bytes(PdfName(key))) + out.extend(b" ") + out.extend(value) + out.extend(b"\n>>") + return bytes(out) + + +class PdfBinary: + def __init__(self, data): + self.data = data + + def __bytes__(self): + return b"<%s>" % b"".join(b"%02X" % b for b in self.data) + + +class PdfStream: + def __init__(self, dictionary, buf): + self.dictionary = dictionary + self.buf = buf + + def decode(self): + try: + filter = self.dictionary.Filter + except AttributeError: + return self.buf + if filter == b"FlateDecode": + try: + expected_length = self.dictionary.DL + except AttributeError: + expected_length = self.dictionary.Length + return zlib.decompress(self.buf, bufsize=int(expected_length)) + else: + raise NotImplementedError( + f"stream filter {repr(self.dictionary.Filter)} unknown/unsupported" + ) + + +def pdf_repr(x): + if x is True: + return b"true" + elif x is False: + return b"false" + elif x is None: + return b"null" + elif isinstance(x, (PdfName, PdfDict, PdfArray, PdfBinary)): + return bytes(x) + elif isinstance(x, int): + return str(x).encode("us-ascii") + elif isinstance(x, float): + return str(x).encode("us-ascii") + elif isinstance(x, time.struct_time): + return b"(D:" + time.strftime("%Y%m%d%H%M%SZ", x).encode("us-ascii") + b")" + elif isinstance(x, dict): + return bytes(PdfDict(x)) + elif isinstance(x, list): + return bytes(PdfArray(x)) + elif isinstance(x, str): + return pdf_repr(encode_text(x)) + elif isinstance(x, bytes): + # XXX escape more chars? handle binary garbage + x = x.replace(b"\\", b"\\\\") + x = x.replace(b"(", b"\\(") + x = x.replace(b")", b"\\)") + return b"(" + x + b")" + else: + return bytes(x) + + +class PdfParser: + """Based on + https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf + Supports PDF up to 1.4 + """ + + def __init__(self, filename=None, f=None, buf=None, start_offset=0, mode="rb"): + if buf and f: + raise RuntimeError("specify buf or f or filename, but not both buf and f") + self.filename = filename + self.buf = buf + self.f = f + self.start_offset = start_offset + self.should_close_buf = False + self.should_close_file = False + if filename is not None and f is None: + self.f = f = open(filename, mode) + self.should_close_file = True + if f is not None: + self.buf = buf = self.get_buf_from_file(f) + self.should_close_buf = True + if not filename and hasattr(f, "name"): + self.filename = f.name + self.cached_objects = {} + if buf: + self.read_pdf_info() + else: + self.file_size_total = self.file_size_this = 0 + self.root = PdfDict() + self.root_ref = None + self.info = PdfDict() + self.info_ref = None + self.page_tree_root = {} + self.pages = [] + self.orig_pages = [] + self.pages_ref = None + self.last_xref_section_offset = None + self.trailer_dict = {} + self.xref_table = XrefTable() + self.xref_table.reading_finished = True + if f: + self.seek_end() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + return False # do not suppress exceptions + + def start_writing(self): + self.close_buf() + self.seek_end() + + def close_buf(self): + try: + self.buf.close() + except AttributeError: + pass + self.buf = None + + def close(self): + if self.should_close_buf: + self.close_buf() + if self.f is not None and self.should_close_file: + self.f.close() + self.f = None + + def seek_end(self): + self.f.seek(0, os.SEEK_END) + + def write_header(self): + self.f.write(b"%PDF-1.4\n") + + def write_comment(self, s): + self.f.write(f"% {s}\n".encode("utf-8")) + + def write_catalog(self): + self.del_root() + self.root_ref = self.next_object_id(self.f.tell()) + self.pages_ref = self.next_object_id(0) + self.rewrite_pages() + self.write_obj(self.root_ref, Type=PdfName(b"Catalog"), Pages=self.pages_ref) + self.write_obj( + self.pages_ref, + Type=PdfName(b"Pages"), + Count=len(self.pages), + Kids=self.pages, + ) + return self.root_ref + + def rewrite_pages(self): + pages_tree_nodes_to_delete = [] + for i, page_ref in enumerate(self.orig_pages): + page_info = self.cached_objects[page_ref] + del self.xref_table[page_ref.object_id] + pages_tree_nodes_to_delete.append(page_info[PdfName(b"Parent")]) + if page_ref not in self.pages: + # the page has been deleted + continue + # make dict keys into strings for passing to write_page + stringified_page_info = {} + for key, value in page_info.items(): + # key should be a PdfName + stringified_page_info[key.name_as_str()] = value + stringified_page_info["Parent"] = self.pages_ref + new_page_ref = self.write_page(None, **stringified_page_info) + for j, cur_page_ref in enumerate(self.pages): + if cur_page_ref == page_ref: + # replace the page reference with the new one + self.pages[j] = new_page_ref + # delete redundant Pages tree nodes from xref table + for pages_tree_node_ref in pages_tree_nodes_to_delete: + while pages_tree_node_ref: + pages_tree_node = self.cached_objects[pages_tree_node_ref] + if pages_tree_node_ref.object_id in self.xref_table: + del self.xref_table[pages_tree_node_ref.object_id] + pages_tree_node_ref = pages_tree_node.get(b"Parent", None) + self.orig_pages = [] + + def write_xref_and_trailer(self, new_root_ref=None): + if new_root_ref: + self.del_root() + self.root_ref = new_root_ref + if self.info: + self.info_ref = self.write_obj(None, self.info) + start_xref = self.xref_table.write(self.f) + num_entries = len(self.xref_table) + trailer_dict = {b"Root": self.root_ref, b"Size": num_entries} + if self.last_xref_section_offset is not None: + trailer_dict[b"Prev"] = self.last_xref_section_offset + if self.info: + trailer_dict[b"Info"] = self.info_ref + self.last_xref_section_offset = start_xref + self.f.write( + b"trailer\n" + + bytes(PdfDict(trailer_dict)) + + b"\nstartxref\n%d\n%%%%EOF" % start_xref + ) + + def write_page(self, ref, *objs, **dict_obj): + if isinstance(ref, int): + ref = self.pages[ref] + if "Type" not in dict_obj: + dict_obj["Type"] = PdfName(b"Page") + if "Parent" not in dict_obj: + dict_obj["Parent"] = self.pages_ref + return self.write_obj(ref, *objs, **dict_obj) + + def write_obj(self, ref, *objs, **dict_obj): + f = self.f + if ref is None: + ref = self.next_object_id(f.tell()) + else: + self.xref_table[ref.object_id] = (f.tell(), ref.generation) + f.write(bytes(IndirectObjectDef(*ref))) + stream = dict_obj.pop("stream", None) + if stream is not None: + dict_obj["Length"] = len(stream) + if dict_obj: + f.write(pdf_repr(dict_obj)) + for obj in objs: + f.write(pdf_repr(obj)) + if stream is not None: + f.write(b"stream\n") + f.write(stream) + f.write(b"\nendstream\n") + f.write(b"endobj\n") + return ref + + def del_root(self): + if self.root_ref is None: + return + del self.xref_table[self.root_ref.object_id] + del self.xref_table[self.root[b"Pages"].object_id] + + @staticmethod + def get_buf_from_file(f): + if hasattr(f, "getbuffer"): + return f.getbuffer() + elif hasattr(f, "getvalue"): + return f.getvalue() + else: + try: + return mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + except ValueError: # cannot mmap an empty file + return b"" + + def read_pdf_info(self): + self.file_size_total = len(self.buf) + self.file_size_this = self.file_size_total - self.start_offset + self.read_trailer() + self.root_ref = self.trailer_dict[b"Root"] + self.info_ref = self.trailer_dict.get(b"Info", None) + self.root = PdfDict(self.read_indirect(self.root_ref)) + if self.info_ref is None: + self.info = PdfDict() + else: + self.info = PdfDict(self.read_indirect(self.info_ref)) + check_format_condition(b"Type" in self.root, "/Type missing in Root") + check_format_condition( + self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog" + ) + check_format_condition(b"Pages" in self.root, "/Pages missing in Root") + check_format_condition( + isinstance(self.root[b"Pages"], IndirectReference), + "/Pages in Root is not an indirect reference", + ) + self.pages_ref = self.root[b"Pages"] + self.page_tree_root = self.read_indirect(self.pages_ref) + self.pages = self.linearize_page_tree(self.page_tree_root) + # save the original list of page references + # in case the user modifies, adds or deletes some pages + # and we need to rewrite the pages and their list + self.orig_pages = self.pages[:] + + def next_object_id(self, offset=None): + try: + # TODO: support reuse of deleted objects + reference = IndirectReference(max(self.xref_table.keys()) + 1, 0) + except ValueError: + reference = IndirectReference(1, 0) + if offset is not None: + self.xref_table[reference.object_id] = (offset, 0) + return reference + + delimiter = br"[][()<>{}/%]" + delimiter_or_ws = br"[][()<>{}/%\000\011\012\014\015\040]" + whitespace = br"[\000\011\012\014\015\040]" + whitespace_or_hex = br"[\000\011\012\014\015\0400-9a-fA-F]" + whitespace_optional = whitespace + b"*" + whitespace_mandatory = whitespace + b"+" + whitespace_optional_no_nl = br"[\000\011\014\015\040]*" # no "\012" aka "\n" + newline_only = br"[\r\n]+" + newline = whitespace_optional_no_nl + newline_only + whitespace_optional_no_nl + re_trailer_end = re.compile( + whitespace_mandatory + + br"trailer" + + whitespace_optional + + br"\<\<(.*\>\>)" + + newline + + br"startxref" + + newline + + br"([0-9]+)" + + newline + + br"%%EOF" + + whitespace_optional + + br"$", + re.DOTALL, + ) + re_trailer_prev = re.compile( + whitespace_optional + + br"trailer" + + whitespace_optional + + br"\<\<(.*?\>\>)" + + newline + + br"startxref" + + newline + + br"([0-9]+)" + + newline + + br"%%EOF" + + whitespace_optional, + re.DOTALL, + ) + + def read_trailer(self): + search_start_offset = len(self.buf) - 16384 + if search_start_offset < self.start_offset: + search_start_offset = self.start_offset + m = self.re_trailer_end.search(self.buf, search_start_offset) + check_format_condition(m, "trailer end not found") + # make sure we found the LAST trailer + last_match = m + while m: + last_match = m + m = self.re_trailer_end.search(self.buf, m.start() + 16) + if not m: + m = last_match + trailer_data = m.group(1) + self.last_xref_section_offset = int(m.group(2)) + self.trailer_dict = self.interpret_trailer(trailer_data) + self.xref_table = XrefTable() + self.read_xref_table(xref_section_offset=self.last_xref_section_offset) + if b"Prev" in self.trailer_dict: + self.read_prev_trailer(self.trailer_dict[b"Prev"]) + + def read_prev_trailer(self, xref_section_offset): + trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset) + m = self.re_trailer_prev.search( + self.buf[trailer_offset : trailer_offset + 16384] + ) + check_format_condition(m, "previous trailer not found") + trailer_data = m.group(1) + check_format_condition( + int(m.group(2)) == xref_section_offset, + "xref section offset in previous trailer doesn't match what was expected", + ) + trailer_dict = self.interpret_trailer(trailer_data) + if b"Prev" in trailer_dict: + self.read_prev_trailer(trailer_dict[b"Prev"]) + + re_whitespace_optional = re.compile(whitespace_optional) + re_name = re.compile( + whitespace_optional + + br"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?=" + + delimiter_or_ws + + br")" + ) + re_dict_start = re.compile(whitespace_optional + br"\<\<") + re_dict_end = re.compile(whitespace_optional + br"\>\>" + whitespace_optional) + + @classmethod + def interpret_trailer(cls, trailer_data): + trailer = {} + offset = 0 + while True: + m = cls.re_name.match(trailer_data, offset) + if not m: + m = cls.re_dict_end.match(trailer_data, offset) + check_format_condition( + m and m.end() == len(trailer_data), + "name not found in trailer, remaining data: " + + repr(trailer_data[offset:]), + ) + break + key = cls.interpret_name(m.group(1)) + value, offset = cls.get_value(trailer_data, m.end()) + trailer[key] = value + check_format_condition( + b"Size" in trailer and isinstance(trailer[b"Size"], int), + "/Size not in trailer or not an integer", + ) + check_format_condition( + b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference), + "/Root not in trailer or not an indirect reference", + ) + return trailer + + re_hashes_in_name = re.compile(br"([^#]*)(#([0-9a-fA-F]{2}))?") + + @classmethod + def interpret_name(cls, raw, as_text=False): + name = b"" + for m in cls.re_hashes_in_name.finditer(raw): + if m.group(3): + name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii")) + else: + name += m.group(1) + if as_text: + return name.decode("utf-8") + else: + return bytes(name) + + re_null = re.compile(whitespace_optional + br"null(?=" + delimiter_or_ws + br")") + re_true = re.compile(whitespace_optional + br"true(?=" + delimiter_or_ws + br")") + re_false = re.compile(whitespace_optional + br"false(?=" + delimiter_or_ws + br")") + re_int = re.compile( + whitespace_optional + br"([-+]?[0-9]+)(?=" + delimiter_or_ws + br")" + ) + re_real = re.compile( + whitespace_optional + + br"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?=" + + delimiter_or_ws + + br")" + ) + re_array_start = re.compile(whitespace_optional + br"\[") + re_array_end = re.compile(whitespace_optional + br"]") + re_string_hex = re.compile( + whitespace_optional + br"\<(" + whitespace_or_hex + br"*)\>" + ) + re_string_lit = re.compile(whitespace_optional + br"\(") + re_indirect_reference = re.compile( + whitespace_optional + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"R(?=" + + delimiter_or_ws + + br")" + ) + re_indirect_def_start = re.compile( + whitespace_optional + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"obj(?=" + + delimiter_or_ws + + br")" + ) + re_indirect_def_end = re.compile( + whitespace_optional + br"endobj(?=" + delimiter_or_ws + br")" + ) + re_comment = re.compile( + br"(" + whitespace_optional + br"%[^\r\n]*" + newline + br")*" + ) + re_stream_start = re.compile(whitespace_optional + br"stream\r?\n") + re_stream_end = re.compile( + whitespace_optional + br"endstream(?=" + delimiter_or_ws + br")" + ) + + @classmethod + def get_value(cls, data, offset, expect_indirect=None, max_nesting=-1): + if max_nesting == 0: + return None, None + m = cls.re_comment.match(data, offset) + if m: + offset = m.end() + m = cls.re_indirect_def_start.match(data, offset) + if m: + check_format_condition( + int(m.group(1)) > 0, + "indirect object definition: object ID must be greater than 0", + ) + check_format_condition( + int(m.group(2)) >= 0, + "indirect object definition: generation must be non-negative", + ) + check_format_condition( + expect_indirect is None + or expect_indirect + == IndirectReference(int(m.group(1)), int(m.group(2))), + "indirect object definition different than expected", + ) + object, offset = cls.get_value(data, m.end(), max_nesting=max_nesting - 1) + if offset is None: + return object, None + m = cls.re_indirect_def_end.match(data, offset) + check_format_condition(m, "indirect object definition end not found") + return object, m.end() + check_format_condition( + not expect_indirect, "indirect object definition not found" + ) + m = cls.re_indirect_reference.match(data, offset) + if m: + check_format_condition( + int(m.group(1)) > 0, + "indirect object reference: object ID must be greater than 0", + ) + check_format_condition( + int(m.group(2)) >= 0, + "indirect object reference: generation must be non-negative", + ) + return IndirectReference(int(m.group(1)), int(m.group(2))), m.end() + m = cls.re_dict_start.match(data, offset) + if m: + offset = m.end() + result = {} + m = cls.re_dict_end.match(data, offset) + while not m: + key, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) + if offset is None: + return result, None + value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) + result[key] = value + if offset is None: + return result, None + m = cls.re_dict_end.match(data, offset) + offset = m.end() + m = cls.re_stream_start.match(data, offset) + if m: + try: + stream_len = int(result[b"Length"]) + except (TypeError, KeyError, ValueError) as e: + raise PdfFormatError( + "bad or missing Length in stream dict (%r)" + % result.get(b"Length", None) + ) from e + stream_data = data[m.end() : m.end() + stream_len] + m = cls.re_stream_end.match(data, m.end() + stream_len) + check_format_condition(m, "stream end not found") + offset = m.end() + result = PdfStream(PdfDict(result), stream_data) + else: + result = PdfDict(result) + return result, offset + m = cls.re_array_start.match(data, offset) + if m: + offset = m.end() + result = [] + m = cls.re_array_end.match(data, offset) + while not m: + value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) + result.append(value) + if offset is None: + return result, None + m = cls.re_array_end.match(data, offset) + return result, m.end() + m = cls.re_null.match(data, offset) + if m: + return None, m.end() + m = cls.re_true.match(data, offset) + if m: + return True, m.end() + m = cls.re_false.match(data, offset) + if m: + return False, m.end() + m = cls.re_name.match(data, offset) + if m: + return PdfName(cls.interpret_name(m.group(1))), m.end() + m = cls.re_int.match(data, offset) + if m: + return int(m.group(1)), m.end() + m = cls.re_real.match(data, offset) + if m: + # XXX Decimal instead of float??? + return float(m.group(1)), m.end() + m = cls.re_string_hex.match(data, offset) + if m: + # filter out whitespace + hex_string = bytearray( + [b for b in m.group(1) if b in b"0123456789abcdefABCDEF"] + ) + if len(hex_string) % 2 == 1: + # append a 0 if the length is not even - yes, at the end + hex_string.append(ord(b"0")) + return bytearray.fromhex(hex_string.decode("us-ascii")), m.end() + m = cls.re_string_lit.match(data, offset) + if m: + return cls.get_literal_string(data, m.end()) + # return None, offset # fallback (only for debugging) + raise PdfFormatError("unrecognized object: " + repr(data[offset : offset + 32])) + + re_lit_str_token = re.compile( + br"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))" + ) + escaped_chars = { + b"n": b"\n", + b"r": b"\r", + b"t": b"\t", + b"b": b"\b", + b"f": b"\f", + b"(": b"(", + b")": b")", + b"\\": b"\\", + ord(b"n"): b"\n", + ord(b"r"): b"\r", + ord(b"t"): b"\t", + ord(b"b"): b"\b", + ord(b"f"): b"\f", + ord(b"("): b"(", + ord(b")"): b")", + ord(b"\\"): b"\\", + } + + @classmethod + def get_literal_string(cls, data, offset): + nesting_depth = 0 + result = bytearray() + for m in cls.re_lit_str_token.finditer(data, offset): + result.extend(data[offset : m.start()]) + if m.group(1): + result.extend(cls.escaped_chars[m.group(1)[1]]) + elif m.group(2): + result.append(int(m.group(2)[1:], 8)) + elif m.group(3): + pass + elif m.group(5): + result.extend(b"\n") + elif m.group(6): + result.extend(b"(") + nesting_depth += 1 + elif m.group(7): + if nesting_depth == 0: + return bytes(result), m.end() + result.extend(b")") + nesting_depth -= 1 + offset = m.end() + raise PdfFormatError("unfinished literal string") + + re_xref_section_start = re.compile(whitespace_optional + br"xref" + newline) + re_xref_subsection_start = re.compile( + whitespace_optional + + br"([0-9]+)" + + whitespace_mandatory + + br"([0-9]+)" + + whitespace_optional + + newline_only + ) + re_xref_entry = re.compile(br"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)") + + def read_xref_table(self, xref_section_offset): + subsection_found = False + m = self.re_xref_section_start.match( + self.buf, xref_section_offset + self.start_offset + ) + check_format_condition(m, "xref section start not found") + offset = m.end() + while True: + m = self.re_xref_subsection_start.match(self.buf, offset) + if not m: + check_format_condition( + subsection_found, "xref subsection start not found" + ) + break + subsection_found = True + offset = m.end() + first_object = int(m.group(1)) + num_objects = int(m.group(2)) + for i in range(first_object, first_object + num_objects): + m = self.re_xref_entry.match(self.buf, offset) + check_format_condition(m, "xref entry not found") + offset = m.end() + is_free = m.group(3) == b"f" + generation = int(m.group(2)) + if not is_free: + new_entry = (int(m.group(1)), generation) + check_format_condition( + i not in self.xref_table or self.xref_table[i] == new_entry, + "xref entry duplicated (and not identical)", + ) + self.xref_table[i] = new_entry + return offset + + def read_indirect(self, ref, max_nesting=-1): + offset, generation = self.xref_table[ref[0]] + check_format_condition( + generation == ref[1], + f"expected to find generation {ref[1]} for object ID {ref[0]} in xref " + f"table, instead found generation {generation} at offset {offset}", + ) + value = self.get_value( + self.buf, + offset + self.start_offset, + expect_indirect=IndirectReference(*ref), + max_nesting=max_nesting, + )[0] + self.cached_objects[ref] = value + return value + + def linearize_page_tree(self, node=None): + if node is None: + node = self.page_tree_root + check_format_condition( + node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages" + ) + pages = [] + for kid in node[b"Kids"]: + kid_object = self.read_indirect(kid) + if kid_object[b"Type"] == b"Page": + pages.append(kid) + else: + pages.extend(self.linearize_page_tree(node=kid_object)) + return pages diff --git a/.venv/lib/python3.9/site-packages/PIL/PixarImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/PixarImagePlugin.py new file mode 100644 index 00000000..c4860b6c --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PixarImagePlugin.py @@ -0,0 +1,70 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PIXAR raster support for PIL +# +# history: +# 97-01-29 fl Created +# +# notes: +# This is incomplete; it is based on a few samples created with +# Photoshop 2.5 and 3.0, and a summary description provided by +# Greg Coats . Hopefully, "L" and +# "RGBA" support will be added in future versions. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile +from ._binary import i16le as i16 + +# +# helpers + + +def _accept(prefix): + return prefix[:4] == b"\200\350\000\000" + + +## +# Image plugin for PIXAR raster images. + + +class PixarImageFile(ImageFile.ImageFile): + + format = "PIXAR" + format_description = "PIXAR raster image" + + def _open(self): + + # assuming a 4-byte magic label + s = self.fp.read(4) + if not _accept(s): + raise SyntaxError("not a PIXAR file") + + # read rest of header + s = s + self.fp.read(508) + + self._size = i16(s, 418), i16(s, 416) + + # get channel/depth descriptions + mode = i16(s, 424), i16(s, 426) + + if mode == (14, 2): + self.mode = "RGB" + # FIXME: to be continued... + + # create tile descriptor (assuming "dumped") + self.tile = [("raw", (0, 0) + self.size, 1024, (self.mode, 0, 1))] + + +# +# -------------------------------------------------------------------- + +Image.register_open(PixarImageFile.format, PixarImageFile, _accept) + +Image.register_extension(PixarImageFile.format, ".pxr") diff --git a/.venv/lib/python3.9/site-packages/PIL/PngImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/PngImagePlugin.py new file mode 100644 index 00000000..0f596f1f --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PngImagePlugin.py @@ -0,0 +1,1406 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PNG support code +# +# See "PNG (Portable Network Graphics) Specification, version 1.0; +# W3C Recommendation", 1996-10-01, Thomas Boutell (ed.). +# +# history: +# 1996-05-06 fl Created (couldn't resist it) +# 1996-12-14 fl Upgraded, added read and verify support (0.2) +# 1996-12-15 fl Separate PNG stream parser +# 1996-12-29 fl Added write support, added getchunks +# 1996-12-30 fl Eliminated circular references in decoder (0.3) +# 1998-07-12 fl Read/write 16-bit images as mode I (0.4) +# 2001-02-08 fl Added transparency support (from Zircon) (0.5) +# 2001-04-16 fl Don't close data source in "open" method (0.6) +# 2004-02-24 fl Don't even pretend to support interlaced files (0.7) +# 2004-08-31 fl Do basic sanity check on chunk identifiers (0.8) +# 2004-09-20 fl Added PngInfo chunk container +# 2004-12-18 fl Added DPI read support (based on code by Niki Spahiev) +# 2008-08-13 fl Added tRNS support for RGB images +# 2009-03-06 fl Support for preserving ICC profiles (by Florian Hoech) +# 2009-03-08 fl Added zTXT support (from Lowell Alleman) +# 2009-03-29 fl Read interlaced PNG files (from Conrado Porto Lopes Gouvua) +# +# Copyright (c) 1997-2009 by Secret Labs AB +# Copyright (c) 1996 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import itertools +import logging +import re +import struct +import warnings +import zlib + +from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._binary import o8 +from ._binary import o16be as o16 +from ._binary import o32be as o32 + +logger = logging.getLogger(__name__) + +is_cid = re.compile(br"\w\w\w\w").match + + +_MAGIC = b"\211PNG\r\n\032\n" + + +_MODES = { + # supported bits/color combinations, and corresponding modes/rawmodes + # Greyscale + (1, 0): ("1", "1"), + (2, 0): ("L", "L;2"), + (4, 0): ("L", "L;4"), + (8, 0): ("L", "L"), + (16, 0): ("I", "I;16B"), + # Truecolour + (8, 2): ("RGB", "RGB"), + (16, 2): ("RGB", "RGB;16B"), + # Indexed-colour + (1, 3): ("P", "P;1"), + (2, 3): ("P", "P;2"), + (4, 3): ("P", "P;4"), + (8, 3): ("P", "P"), + # Greyscale with alpha + (8, 4): ("LA", "LA"), + (16, 4): ("RGBA", "LA;16B"), # LA;16B->LA not yet available + # Truecolour with alpha + (8, 6): ("RGBA", "RGBA"), + (16, 6): ("RGBA", "RGBA;16B"), +} + + +_simple_palette = re.compile(b"^\xff*\x00\xff*$") + +MAX_TEXT_CHUNK = ImageFile.SAFEBLOCK +""" +Maximum decompressed size for a iTXt or zTXt chunk. +Eliminates decompression bombs where compressed chunks can expand 1000x. +See :ref:`Text in PNG File Format`. +""" +MAX_TEXT_MEMORY = 64 * MAX_TEXT_CHUNK +""" +Set the maximum total text chunk size. +See :ref:`Text in PNG File Format`. +""" + + +# APNG frame disposal modes +APNG_DISPOSE_OP_NONE = 0 +""" +No disposal is done on this frame before rendering the next frame. +See :ref:`Saving APNG sequences`. +""" +APNG_DISPOSE_OP_BACKGROUND = 1 +""" +This frame’s modified region is cleared to fully transparent black before rendering +the next frame. +See :ref:`Saving APNG sequences`. +""" +APNG_DISPOSE_OP_PREVIOUS = 2 +""" +This frame’s modified region is reverted to the previous frame’s contents before +rendering the next frame. +See :ref:`Saving APNG sequences`. +""" + +# APNG frame blend modes +APNG_BLEND_OP_SOURCE = 0 +""" +All color components of this frame, including alpha, overwrite the previous output +image contents. +See :ref:`Saving APNG sequences`. +""" +APNG_BLEND_OP_OVER = 1 +""" +This frame should be alpha composited with the previous output image contents. +See :ref:`Saving APNG sequences`. +""" + + +def _safe_zlib_decompress(s): + dobj = zlib.decompressobj() + plaintext = dobj.decompress(s, MAX_TEXT_CHUNK) + if dobj.unconsumed_tail: + raise ValueError("Decompressed Data Too Large") + return plaintext + + +def _crc32(data, seed=0): + return zlib.crc32(data, seed) & 0xFFFFFFFF + + +# -------------------------------------------------------------------- +# Support classes. Suitable for PNG and related formats like MNG etc. + + +class ChunkStream: + def __init__(self, fp): + + self.fp = fp + self.queue = [] + + def read(self): + """Fetch a new chunk. Returns header information.""" + cid = None + + if self.queue: + cid, pos, length = self.queue.pop() + self.fp.seek(pos) + else: + s = self.fp.read(8) + cid = s[4:] + pos = self.fp.tell() + length = i32(s) + + if not is_cid(cid): + if not ImageFile.LOAD_TRUNCATED_IMAGES: + raise SyntaxError(f"broken PNG file (chunk {repr(cid)})") + + return cid, pos, length + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + self.queue = self.crc = self.fp = None + + def push(self, cid, pos, length): + + self.queue.append((cid, pos, length)) + + def call(self, cid, pos, length): + """Call the appropriate chunk handler""" + + logger.debug("STREAM %r %s %s", cid, pos, length) + return getattr(self, "chunk_" + cid.decode("ascii"))(pos, length) + + def crc(self, cid, data): + """Read and verify checksum""" + + # Skip CRC checks for ancillary chunks if allowed to load truncated + # images + # 5th byte of first char is 1 [specs, section 5.4] + if ImageFile.LOAD_TRUNCATED_IMAGES and (cid[0] >> 5 & 1): + self.crc_skip(cid, data) + return + + try: + crc1 = _crc32(data, _crc32(cid)) + crc2 = i32(self.fp.read(4)) + if crc1 != crc2: + raise SyntaxError( + f"broken PNG file (bad header checksum in {repr(cid)})" + ) + except struct.error as e: + raise SyntaxError( + f"broken PNG file (incomplete checksum in {repr(cid)})" + ) from e + + def crc_skip(self, cid, data): + """Read checksum. Used if the C module is not present""" + + self.fp.read(4) + + def verify(self, endchunk=b"IEND"): + + # Simple approach; just calculate checksum for all remaining + # blocks. Must be called directly after open. + + cids = [] + + while True: + try: + cid, pos, length = self.read() + except struct.error as e: + raise OSError("truncated PNG file") from e + + if cid == endchunk: + break + self.crc(cid, ImageFile._safe_read(self.fp, length)) + cids.append(cid) + + return cids + + +class iTXt(str): + """ + Subclass of string to allow iTXt chunks to look like strings while + keeping their extra information + + """ + + @staticmethod + def __new__(cls, text, lang=None, tkey=None): + """ + :param cls: the class to use when creating the instance + :param text: value for this key + :param lang: language code + :param tkey: UTF-8 version of the key name + """ + + self = str.__new__(cls, text) + self.lang = lang + self.tkey = tkey + return self + + +class PngInfo: + """ + PNG chunk container (for use with save(pnginfo=)) + + """ + + def __init__(self): + self.chunks = [] + + def add(self, cid, data, after_idat=False): + """Appends an arbitrary chunk. Use with caution. + + :param cid: a byte string, 4 bytes long. + :param data: a byte string of the encoded data + :param after_idat: for use with private chunks. Whether the chunk + should be written after IDAT + + """ + + chunk = [cid, data] + if after_idat: + chunk.append(True) + self.chunks.append(tuple(chunk)) + + def add_itxt(self, key, value, lang="", tkey="", zip=False): + """Appends an iTXt chunk. + + :param key: latin-1 encodable text key name + :param value: value for this key + :param lang: language code + :param tkey: UTF-8 version of the key name + :param zip: compression flag + + """ + + if not isinstance(key, bytes): + key = key.encode("latin-1", "strict") + if not isinstance(value, bytes): + value = value.encode("utf-8", "strict") + if not isinstance(lang, bytes): + lang = lang.encode("utf-8", "strict") + if not isinstance(tkey, bytes): + tkey = tkey.encode("utf-8", "strict") + + if zip: + self.add( + b"iTXt", + key + b"\0\x01\0" + lang + b"\0" + tkey + b"\0" + zlib.compress(value), + ) + else: + self.add(b"iTXt", key + b"\0\0\0" + lang + b"\0" + tkey + b"\0" + value) + + def add_text(self, key, value, zip=False): + """Appends a text chunk. + + :param key: latin-1 encodable text key name + :param value: value for this key, text or an + :py:class:`PIL.PngImagePlugin.iTXt` instance + :param zip: compression flag + + """ + if isinstance(value, iTXt): + return self.add_itxt(key, value, value.lang, value.tkey, zip=zip) + + # The tEXt chunk stores latin-1 text + if not isinstance(value, bytes): + try: + value = value.encode("latin-1", "strict") + except UnicodeError: + return self.add_itxt(key, value, zip=zip) + + if not isinstance(key, bytes): + key = key.encode("latin-1", "strict") + + if zip: + self.add(b"zTXt", key + b"\0\0" + zlib.compress(value)) + else: + self.add(b"tEXt", key + b"\0" + value) + + +# -------------------------------------------------------------------- +# PNG image stream (IHDR/IEND) + + +class PngStream(ChunkStream): + def __init__(self, fp): + super().__init__(fp) + + # local copies of Image attributes + self.im_info = {} + self.im_text = {} + self.im_size = (0, 0) + self.im_mode = None + self.im_tile = None + self.im_palette = None + self.im_custom_mimetype = None + self.im_n_frames = None + self._seq_num = None + self.rewind_state = None + + self.text_memory = 0 + + def check_text_memory(self, chunklen): + self.text_memory += chunklen + if self.text_memory > MAX_TEXT_MEMORY: + raise ValueError( + "Too much memory used in text chunks: " + f"{self.text_memory}>MAX_TEXT_MEMORY" + ) + + def save_rewind(self): + self.rewind_state = { + "info": self.im_info.copy(), + "tile": self.im_tile, + "seq_num": self._seq_num, + } + + def rewind(self): + self.im_info = self.rewind_state["info"] + self.im_tile = self.rewind_state["tile"] + self._seq_num = self.rewind_state["seq_num"] + + def chunk_iCCP(self, pos, length): + + # ICC profile + s = ImageFile._safe_read(self.fp, length) + # according to PNG spec, the iCCP chunk contains: + # Profile name 1-79 bytes (character string) + # Null separator 1 byte (null character) + # Compression method 1 byte (0) + # Compressed profile n bytes (zlib with deflate compression) + i = s.find(b"\0") + logger.debug("iCCP profile name %r", s[:i]) + logger.debug("Compression method %s", s[i]) + comp_method = s[i] + if comp_method != 0: + raise SyntaxError(f"Unknown compression method {comp_method} in iCCP chunk") + try: + icc_profile = _safe_zlib_decompress(s[i + 2 :]) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + icc_profile = None + else: + raise + except zlib.error: + icc_profile = None # FIXME + self.im_info["icc_profile"] = icc_profile + return s + + def chunk_IHDR(self, pos, length): + + # image header + s = ImageFile._safe_read(self.fp, length) + self.im_size = i32(s, 0), i32(s, 4) + try: + self.im_mode, self.im_rawmode = _MODES[(s[8], s[9])] + except Exception: + pass + if s[12]: + self.im_info["interlace"] = 1 + if s[11]: + raise SyntaxError("unknown filter category") + return s + + def chunk_IDAT(self, pos, length): + + # image data + if "bbox" in self.im_info: + tile = [("zip", self.im_info["bbox"], pos, self.im_rawmode)] + else: + if self.im_n_frames is not None: + self.im_info["default_image"] = True + tile = [("zip", (0, 0) + self.im_size, pos, self.im_rawmode)] + self.im_tile = tile + self.im_idat = length + raise EOFError + + def chunk_IEND(self, pos, length): + + # end of PNG image + raise EOFError + + def chunk_PLTE(self, pos, length): + + # palette + s = ImageFile._safe_read(self.fp, length) + if self.im_mode == "P": + self.im_palette = "RGB", s + return s + + def chunk_tRNS(self, pos, length): + + # transparency + s = ImageFile._safe_read(self.fp, length) + if self.im_mode == "P": + if _simple_palette.match(s): + # tRNS contains only one full-transparent entry, + # other entries are full opaque + i = s.find(b"\0") + if i >= 0: + self.im_info["transparency"] = i + else: + # otherwise, we have a byte string with one alpha value + # for each palette entry + self.im_info["transparency"] = s + elif self.im_mode in ("1", "L", "I"): + self.im_info["transparency"] = i16(s) + elif self.im_mode == "RGB": + self.im_info["transparency"] = i16(s), i16(s, 2), i16(s, 4) + return s + + def chunk_gAMA(self, pos, length): + # gamma setting + s = ImageFile._safe_read(self.fp, length) + self.im_info["gamma"] = i32(s) / 100000.0 + return s + + def chunk_cHRM(self, pos, length): + # chromaticity, 8 unsigned ints, actual value is scaled by 100,000 + # WP x,y, Red x,y, Green x,y Blue x,y + + s = ImageFile._safe_read(self.fp, length) + raw_vals = struct.unpack(">%dI" % (len(s) // 4), s) + self.im_info["chromaticity"] = tuple(elt / 100000.0 for elt in raw_vals) + return s + + def chunk_sRGB(self, pos, length): + # srgb rendering intent, 1 byte + # 0 perceptual + # 1 relative colorimetric + # 2 saturation + # 3 absolute colorimetric + + s = ImageFile._safe_read(self.fp, length) + self.im_info["srgb"] = s[0] + return s + + def chunk_pHYs(self, pos, length): + + # pixels per unit + s = ImageFile._safe_read(self.fp, length) + px, py = i32(s, 0), i32(s, 4) + unit = s[8] + if unit == 1: # meter + dpi = px * 0.0254, py * 0.0254 + self.im_info["dpi"] = dpi + elif unit == 0: + self.im_info["aspect"] = px, py + return s + + def chunk_tEXt(self, pos, length): + + # text + s = ImageFile._safe_read(self.fp, length) + try: + k, v = s.split(b"\0", 1) + except ValueError: + # fallback for broken tEXt tags + k = s + v = b"" + if k: + k = k.decode("latin-1", "strict") + v_str = v.decode("latin-1", "replace") + + self.im_info[k] = v if k == "exif" else v_str + self.im_text[k] = v_str + self.check_text_memory(len(v_str)) + + return s + + def chunk_zTXt(self, pos, length): + + # compressed text + s = ImageFile._safe_read(self.fp, length) + try: + k, v = s.split(b"\0", 1) + except ValueError: + k = s + v = b"" + if v: + comp_method = v[0] + else: + comp_method = 0 + if comp_method != 0: + raise SyntaxError(f"Unknown compression method {comp_method} in zTXt chunk") + try: + v = _safe_zlib_decompress(v[1:]) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + v = b"" + else: + raise + except zlib.error: + v = b"" + + if k: + k = k.decode("latin-1", "strict") + v = v.decode("latin-1", "replace") + + self.im_info[k] = self.im_text[k] = v + self.check_text_memory(len(v)) + + return s + + def chunk_iTXt(self, pos, length): + + # international text + r = s = ImageFile._safe_read(self.fp, length) + try: + k, r = r.split(b"\0", 1) + except ValueError: + return s + if len(r) < 2: + return s + cf, cm, r = r[0], r[1], r[2:] + try: + lang, tk, v = r.split(b"\0", 2) + except ValueError: + return s + if cf != 0: + if cm == 0: + try: + v = _safe_zlib_decompress(v) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + return s + else: + raise + except zlib.error: + return s + else: + return s + try: + k = k.decode("latin-1", "strict") + lang = lang.decode("utf-8", "strict") + tk = tk.decode("utf-8", "strict") + v = v.decode("utf-8", "strict") + except UnicodeError: + return s + + self.im_info[k] = self.im_text[k] = iTXt(v, lang, tk) + self.check_text_memory(len(v)) + + return s + + def chunk_eXIf(self, pos, length): + s = ImageFile._safe_read(self.fp, length) + self.im_info["exif"] = b"Exif\x00\x00" + s + return s + + # APNG chunks + def chunk_acTL(self, pos, length): + s = ImageFile._safe_read(self.fp, length) + if self.im_n_frames is not None: + self.im_n_frames = None + warnings.warn("Invalid APNG, will use default PNG image if possible") + return s + n_frames = i32(s) + if n_frames == 0 or n_frames > 0x80000000: + warnings.warn("Invalid APNG, will use default PNG image if possible") + return s + self.im_n_frames = n_frames + self.im_info["loop"] = i32(s, 4) + self.im_custom_mimetype = "image/apng" + return s + + def chunk_fcTL(self, pos, length): + s = ImageFile._safe_read(self.fp, length) + seq = i32(s) + if (self._seq_num is None and seq != 0) or ( + self._seq_num is not None and self._seq_num != seq - 1 + ): + raise SyntaxError("APNG contains frame sequence errors") + self._seq_num = seq + width, height = i32(s, 4), i32(s, 8) + px, py = i32(s, 12), i32(s, 16) + im_w, im_h = self.im_size + if px + width > im_w or py + height > im_h: + raise SyntaxError("APNG contains invalid frames") + self.im_info["bbox"] = (px, py, px + width, py + height) + delay_num, delay_den = i16(s, 20), i16(s, 22) + if delay_den == 0: + delay_den = 100 + self.im_info["duration"] = float(delay_num) / float(delay_den) * 1000 + self.im_info["disposal"] = s[24] + self.im_info["blend"] = s[25] + return s + + def chunk_fdAT(self, pos, length): + s = ImageFile._safe_read(self.fp, 4) + seq = i32(s) + if self._seq_num != seq - 1: + raise SyntaxError("APNG contains frame sequence errors") + self._seq_num = seq + return self.chunk_IDAT(pos + 4, length - 4) + + +# -------------------------------------------------------------------- +# PNG reader + + +def _accept(prefix): + return prefix[:8] == _MAGIC + + +## +# Image plugin for PNG images. + + +class PngImageFile(ImageFile.ImageFile): + + format = "PNG" + format_description = "Portable network graphics" + + def _open(self): + + if not _accept(self.fp.read(8)): + raise SyntaxError("not a PNG file") + self.__fp = self.fp + self.__frame = 0 + + # + # Parse headers up to the first IDAT or fDAT chunk + + self.private_chunks = [] + self.png = PngStream(self.fp) + + while True: + + # + # get next chunk + + cid, pos, length = self.png.read() + + try: + s = self.png.call(cid, pos, length) + except EOFError: + break + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + s = ImageFile._safe_read(self.fp, length) + if cid[1:2].islower(): + self.private_chunks.append((cid, s)) + + self.png.crc(cid, s) + + # + # Copy relevant attributes from the PngStream. An alternative + # would be to let the PngStream class modify these attributes + # directly, but that introduces circular references which are + # difficult to break if things go wrong in the decoder... + # (believe me, I've tried ;-) + + self.mode = self.png.im_mode + self._size = self.png.im_size + self.info = self.png.im_info + self._text = None + self.tile = self.png.im_tile + self.custom_mimetype = self.png.im_custom_mimetype + self.n_frames = self.png.im_n_frames or 1 + self.default_image = self.info.get("default_image", False) + + if self.png.im_palette: + rawmode, data = self.png.im_palette + self.palette = ImagePalette.raw(rawmode, data) + + if cid == b"fdAT": + self.__prepare_idat = length - 4 + else: + self.__prepare_idat = length # used by load_prepare() + + if self.png.im_n_frames is not None: + self._close_exclusive_fp_after_loading = False + self.png.save_rewind() + self.__rewind_idat = self.__prepare_idat + self.__rewind = self.__fp.tell() + if self.default_image: + # IDAT chunk contains default image and not first animation frame + self.n_frames += 1 + self._seek(0) + self.is_animated = self.n_frames > 1 + + @property + def text(self): + # experimental + if self._text is None: + # iTxt, tEXt and zTXt chunks may appear at the end of the file + # So load the file to ensure that they are read + if self.is_animated: + frame = self.__frame + # for APNG, seek to the final frame before loading + self.seek(self.n_frames - 1) + self.load() + if self.is_animated: + self.seek(frame) + return self._text + + def verify(self): + """Verify PNG file""" + + if self.fp is None: + raise RuntimeError("verify must be called directly after open") + + # back up to beginning of IDAT block + self.fp.seek(self.tile[0][2] - 8) + + self.png.verify() + self.png.close() + + if self._exclusive_fp: + self.fp.close() + self.fp = None + + def seek(self, frame): + if not self._seek_check(frame): + return + if frame < self.__frame: + self._seek(0, True) + + last_frame = self.__frame + for f in range(self.__frame + 1, frame + 1): + try: + self._seek(f) + except EOFError as e: + self.seek(last_frame) + raise EOFError("no more images in APNG file") from e + + def _seek(self, frame, rewind=False): + if frame == 0: + if rewind: + self.__fp.seek(self.__rewind) + self.png.rewind() + self.__prepare_idat = self.__rewind_idat + self.im = None + if self.pyaccess: + self.pyaccess = None + self.info = self.png.im_info + self.tile = self.png.im_tile + self.fp = self.__fp + self._prev_im = None + self.dispose = None + self.default_image = self.info.get("default_image", False) + self.dispose_op = self.info.get("disposal") + self.blend_op = self.info.get("blend") + self.dispose_extent = self.info.get("bbox") + self.__frame = 0 + else: + if frame != self.__frame + 1: + raise ValueError(f"cannot seek to frame {frame}") + + # ensure previous frame was loaded + self.load() + + if self.dispose: + self.im.paste(self.dispose, self.dispose_extent) + self._prev_im = self.im.copy() + + self.fp = self.__fp + + # advance to the next frame + if self.__prepare_idat: + ImageFile._safe_read(self.fp, self.__prepare_idat) + self.__prepare_idat = 0 + frame_start = False + while True: + self.fp.read(4) # CRC + + try: + cid, pos, length = self.png.read() + except (struct.error, SyntaxError): + break + + if cid == b"IEND": + raise EOFError("No more images in APNG file") + if cid == b"fcTL": + if frame_start: + # there must be at least one fdAT chunk between fcTL chunks + raise SyntaxError("APNG missing frame data") + frame_start = True + + try: + self.png.call(cid, pos, length) + except UnicodeDecodeError: + break + except EOFError: + if cid == b"fdAT": + length -= 4 + if frame_start: + self.__prepare_idat = length + break + ImageFile._safe_read(self.fp, length) + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + ImageFile._safe_read(self.fp, length) + + self.__frame = frame + self.tile = self.png.im_tile + self.dispose_op = self.info.get("disposal") + self.blend_op = self.info.get("blend") + self.dispose_extent = self.info.get("bbox") + + if not self.tile: + raise EOFError + + # setup frame disposal (actual disposal done when needed in the next _seek()) + if self._prev_im is None and self.dispose_op == APNG_DISPOSE_OP_PREVIOUS: + self.dispose_op = APNG_DISPOSE_OP_BACKGROUND + + if self.dispose_op == APNG_DISPOSE_OP_PREVIOUS: + self.dispose = self._prev_im.copy() + self.dispose = self._crop(self.dispose, self.dispose_extent) + elif self.dispose_op == APNG_DISPOSE_OP_BACKGROUND: + self.dispose = Image.core.fill(self.mode, self.size) + self.dispose = self._crop(self.dispose, self.dispose_extent) + else: + self.dispose = None + + def tell(self): + return self.__frame + + def load_prepare(self): + """internal: prepare to read PNG file""" + + if self.info.get("interlace"): + self.decoderconfig = self.decoderconfig + (1,) + + self.__idat = self.__prepare_idat # used by load_read() + ImageFile.ImageFile.load_prepare(self) + + def load_read(self, read_bytes): + """internal: read more image data""" + + while self.__idat == 0: + # end of chunk, skip forward to next one + + self.fp.read(4) # CRC + + cid, pos, length = self.png.read() + + if cid not in [b"IDAT", b"DDAT", b"fdAT"]: + self.png.push(cid, pos, length) + return b"" + + if cid == b"fdAT": + try: + self.png.call(cid, pos, length) + except EOFError: + pass + self.__idat = length - 4 # sequence_num has already been read + else: + self.__idat = length # empty chunks are allowed + + # read more data from this chunk + if read_bytes <= 0: + read_bytes = self.__idat + else: + read_bytes = min(read_bytes, self.__idat) + + self.__idat = self.__idat - read_bytes + + return self.fp.read(read_bytes) + + def load_end(self): + """internal: finished reading image data""" + if self.__idat != 0: + self.fp.read(self.__idat) + while True: + self.fp.read(4) # CRC + + try: + cid, pos, length = self.png.read() + except (struct.error, SyntaxError): + break + + if cid == b"IEND": + break + elif cid == b"fcTL" and self.is_animated: + # start of the next frame, stop reading + self.__prepare_idat = 0 + self.png.push(cid, pos, length) + break + + try: + self.png.call(cid, pos, length) + except UnicodeDecodeError: + break + except EOFError: + if cid == b"fdAT": + length -= 4 + ImageFile._safe_read(self.fp, length) + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + s = ImageFile._safe_read(self.fp, length) + if cid[1:2].islower(): + self.private_chunks.append((cid, s, True)) + self._text = self.png.im_text + if not self.is_animated: + self.png.close() + self.png = None + else: + if self._prev_im and self.blend_op == APNG_BLEND_OP_OVER: + updated = self._crop(self.im, self.dispose_extent) + self._prev_im.paste( + updated, self.dispose_extent, updated.convert("RGBA") + ) + self.im = self._prev_im + if self.pyaccess: + self.pyaccess = None + + def _getexif(self): + if "exif" not in self.info: + self.load() + if "exif" not in self.info and "Raw profile type exif" not in self.info: + return None + return self.getexif()._get_merged_dict() + + def getexif(self): + if "exif" not in self.info: + self.load() + + return super().getexif() + + def getxmp(self): + """ + Returns a dictionary containing the XMP tags. + Requires defusedxml to be installed. + :returns: XMP tags in a dictionary. + """ + return ( + self._getxmp(self.info["XML:com.adobe.xmp"]) + if "XML:com.adobe.xmp" in self.info + else {} + ) + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# -------------------------------------------------------------------- +# PNG writer + +_OUTMODES = { + # supported PIL modes, and corresponding rawmodes/bits/color combinations + "1": ("1", b"\x01\x00"), + "L;1": ("L;1", b"\x01\x00"), + "L;2": ("L;2", b"\x02\x00"), + "L;4": ("L;4", b"\x04\x00"), + "L": ("L", b"\x08\x00"), + "LA": ("LA", b"\x08\x04"), + "I": ("I;16B", b"\x10\x00"), + "I;16": ("I;16B", b"\x10\x00"), + "P;1": ("P;1", b"\x01\x03"), + "P;2": ("P;2", b"\x02\x03"), + "P;4": ("P;4", b"\x04\x03"), + "P": ("P", b"\x08\x03"), + "RGB": ("RGB", b"\x08\x02"), + "RGBA": ("RGBA", b"\x08\x06"), +} + + +def putchunk(fp, cid, *data): + """Write a PNG chunk (including CRC field)""" + + data = b"".join(data) + + fp.write(o32(len(data)) + cid) + fp.write(data) + crc = _crc32(data, _crc32(cid)) + fp.write(o32(crc)) + + +class _idat: + # wrap output from the encoder in IDAT chunks + + def __init__(self, fp, chunk): + self.fp = fp + self.chunk = chunk + + def write(self, data): + self.chunk(self.fp, b"IDAT", data) + + +class _fdat: + # wrap encoder output in fdAT chunks + + def __init__(self, fp, chunk, seq_num): + self.fp = fp + self.chunk = chunk + self.seq_num = seq_num + + def write(self, data): + self.chunk(self.fp, b"fdAT", o32(self.seq_num), data) + self.seq_num += 1 + + +def _write_multiple_frames(im, fp, chunk, rawmode): + default_image = im.encoderinfo.get("default_image", im.info.get("default_image")) + duration = im.encoderinfo.get("duration", im.info.get("duration", 0)) + loop = im.encoderinfo.get("loop", im.info.get("loop", 0)) + disposal = im.encoderinfo.get( + "disposal", im.info.get("disposal", APNG_DISPOSE_OP_NONE) + ) + blend = im.encoderinfo.get("blend", im.info.get("blend", APNG_BLEND_OP_SOURCE)) + + if default_image: + chain = itertools.chain(im.encoderinfo.get("append_images", [])) + else: + chain = itertools.chain([im], im.encoderinfo.get("append_images", [])) + + im_frames = [] + frame_count = 0 + for im_seq in chain: + for im_frame in ImageSequence.Iterator(im_seq): + im_frame = im_frame.copy() + if im_frame.mode != im.mode: + if im.mode == "P": + im_frame = im_frame.convert(im.mode, palette=im.palette) + else: + im_frame = im_frame.convert(im.mode) + encoderinfo = im.encoderinfo.copy() + if isinstance(duration, (list, tuple)): + encoderinfo["duration"] = duration[frame_count] + if isinstance(disposal, (list, tuple)): + encoderinfo["disposal"] = disposal[frame_count] + if isinstance(blend, (list, tuple)): + encoderinfo["blend"] = blend[frame_count] + frame_count += 1 + + if im_frames: + previous = im_frames[-1] + prev_disposal = previous["encoderinfo"].get("disposal") + prev_blend = previous["encoderinfo"].get("blend") + if prev_disposal == APNG_DISPOSE_OP_PREVIOUS and len(im_frames) < 2: + prev_disposal = APNG_DISPOSE_OP_BACKGROUND + + if prev_disposal == APNG_DISPOSE_OP_BACKGROUND: + base_im = previous["im"] + dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0)) + bbox = previous["bbox"] + if bbox: + dispose = dispose.crop(bbox) + else: + bbox = (0, 0) + im.size + base_im.paste(dispose, bbox) + elif prev_disposal == APNG_DISPOSE_OP_PREVIOUS: + base_im = im_frames[-2]["im"] + else: + base_im = previous["im"] + delta = ImageChops.subtract_modulo( + im_frame.convert("RGB"), base_im.convert("RGB") + ) + bbox = delta.getbbox() + if ( + not bbox + and prev_disposal == encoderinfo.get("disposal") + and prev_blend == encoderinfo.get("blend") + ): + if isinstance(duration, (list, tuple)): + previous["encoderinfo"]["duration"] += encoderinfo["duration"] + continue + else: + bbox = None + im_frames.append({"im": im_frame, "bbox": bbox, "encoderinfo": encoderinfo}) + + # animation control + chunk( + fp, + b"acTL", + o32(len(im_frames)), # 0: num_frames + o32(loop), # 4: num_plays + ) + + # default image IDAT (if it exists) + if default_image: + ImageFile._save(im, _idat(fp, chunk), [("zip", (0, 0) + im.size, 0, rawmode)]) + + seq_num = 0 + for frame, frame_data in enumerate(im_frames): + im_frame = frame_data["im"] + if not frame_data["bbox"]: + bbox = (0, 0) + im_frame.size + else: + bbox = frame_data["bbox"] + im_frame = im_frame.crop(bbox) + size = im_frame.size + encoderinfo = frame_data["encoderinfo"] + frame_duration = int(round(encoderinfo.get("duration", duration))) + frame_disposal = encoderinfo.get("disposal", disposal) + frame_blend = encoderinfo.get("blend", blend) + # frame control + chunk( + fp, + b"fcTL", + o32(seq_num), # sequence_number + o32(size[0]), # width + o32(size[1]), # height + o32(bbox[0]), # x_offset + o32(bbox[1]), # y_offset + o16(frame_duration), # delay_numerator + o16(1000), # delay_denominator + o8(frame_disposal), # dispose_op + o8(frame_blend), # blend_op + ) + seq_num += 1 + # frame data + if frame == 0 and not default_image: + # first frame must be in IDAT chunks for backwards compatibility + ImageFile._save( + im_frame, + _idat(fp, chunk), + [("zip", (0, 0) + im_frame.size, 0, rawmode)], + ) + else: + fdat_chunks = _fdat(fp, chunk, seq_num) + ImageFile._save( + im_frame, + fdat_chunks, + [("zip", (0, 0) + im_frame.size, 0, rawmode)], + ) + seq_num = fdat_chunks.seq_num + + +def _save_all(im, fp, filename): + _save(im, fp, filename, save_all=True) + + +def _save(im, fp, filename, chunk=putchunk, save_all=False): + # save an image to disk (called by the save method) + + mode = im.mode + + if mode == "P": + + # + # attempt to minimize storage requirements for palette images + if "bits" in im.encoderinfo: + # number of bits specified by user + colors = min(1 << im.encoderinfo["bits"], 256) + else: + # check palette contents + if im.palette: + colors = max(min(len(im.palette.getdata()[1]) // 3, 256), 1) + else: + colors = 256 + + if colors <= 16: + if colors <= 2: + bits = 1 + elif colors <= 4: + bits = 2 + else: + bits = 4 + mode = f"{mode};{bits}" + + # encoder options + im.encoderconfig = ( + im.encoderinfo.get("optimize", False), + im.encoderinfo.get("compress_level", -1), + im.encoderinfo.get("compress_type", -1), + im.encoderinfo.get("dictionary", b""), + ) + + # get the corresponding PNG mode + try: + rawmode, mode = _OUTMODES[mode] + except KeyError as e: + raise OSError(f"cannot write mode {mode} as PNG") from e + + # + # write minimal PNG file + + fp.write(_MAGIC) + + chunk( + fp, + b"IHDR", + o32(im.size[0]), # 0: size + o32(im.size[1]), + mode, # 8: depth/type + b"\0", # 10: compression + b"\0", # 11: filter category + b"\0", # 12: interlace flag + ) + + chunks = [b"cHRM", b"gAMA", b"sBIT", b"sRGB", b"tIME"] + + icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile")) + if icc: + # ICC profile + # according to PNG spec, the iCCP chunk contains: + # Profile name 1-79 bytes (character string) + # Null separator 1 byte (null character) + # Compression method 1 byte (0) + # Compressed profile n bytes (zlib with deflate compression) + name = b"ICC Profile" + data = name + b"\0\0" + zlib.compress(icc) + chunk(fp, b"iCCP", data) + + # You must either have sRGB or iCCP. + # Disallow sRGB chunks when an iCCP-chunk has been emitted. + chunks.remove(b"sRGB") + + info = im.encoderinfo.get("pnginfo") + if info: + chunks_multiple_allowed = [b"sPLT", b"iTXt", b"tEXt", b"zTXt"] + for info_chunk in info.chunks: + cid, data = info_chunk[:2] + if cid in chunks: + chunks.remove(cid) + chunk(fp, cid, data) + elif cid in chunks_multiple_allowed: + chunk(fp, cid, data) + elif cid[1:2].islower(): + # Private chunk + after_idat = info_chunk[2:3] + if not after_idat: + chunk(fp, cid, data) + + if im.mode == "P": + palette_byte_number = colors * 3 + palette_bytes = im.im.getpalette("RGB")[:palette_byte_number] + while len(palette_bytes) < palette_byte_number: + palette_bytes += b"\0" + chunk(fp, b"PLTE", palette_bytes) + + transparency = im.encoderinfo.get("transparency", im.info.get("transparency", None)) + + if transparency or transparency == 0: + if im.mode == "P": + # limit to actual palette size + alpha_bytes = colors + if isinstance(transparency, bytes): + chunk(fp, b"tRNS", transparency[:alpha_bytes]) + else: + transparency = max(0, min(255, transparency)) + alpha = b"\xFF" * transparency + b"\0" + chunk(fp, b"tRNS", alpha[:alpha_bytes]) + elif im.mode in ("1", "L", "I"): + transparency = max(0, min(65535, transparency)) + chunk(fp, b"tRNS", o16(transparency)) + elif im.mode == "RGB": + red, green, blue = transparency + chunk(fp, b"tRNS", o16(red) + o16(green) + o16(blue)) + else: + if "transparency" in im.encoderinfo: + # don't bother with transparency if it's an RGBA + # and it's in the info dict. It's probably just stale. + raise OSError("cannot use transparency for this mode") + else: + if im.mode == "P" and im.im.getpalettemode() == "RGBA": + alpha = im.im.getpalette("RGBA", "A") + alpha_bytes = colors + chunk(fp, b"tRNS", alpha[:alpha_bytes]) + + dpi = im.encoderinfo.get("dpi") + if dpi: + chunk( + fp, + b"pHYs", + o32(int(dpi[0] / 0.0254 + 0.5)), + o32(int(dpi[1] / 0.0254 + 0.5)), + b"\x01", + ) + + if info: + chunks = [b"bKGD", b"hIST"] + for info_chunk in info.chunks: + cid, data = info_chunk[:2] + if cid in chunks: + chunks.remove(cid) + chunk(fp, cid, data) + + exif = im.encoderinfo.get("exif", im.info.get("exif")) + if exif: + if isinstance(exif, Image.Exif): + exif = exif.tobytes(8) + if exif.startswith(b"Exif\x00\x00"): + exif = exif[6:] + chunk(fp, b"eXIf", exif) + + if save_all: + _write_multiple_frames(im, fp, chunk, rawmode) + else: + ImageFile._save(im, _idat(fp, chunk), [("zip", (0, 0) + im.size, 0, rawmode)]) + + if info: + for info_chunk in info.chunks: + cid, data = info_chunk[:2] + if cid[1:2].islower(): + # Private chunk + after_idat = info_chunk[2:3] + if after_idat: + chunk(fp, cid, data) + + chunk(fp, b"IEND", b"") + + if hasattr(fp, "flush"): + fp.flush() + + +# -------------------------------------------------------------------- +# PNG chunk converter + + +def getchunks(im, **params): + """Return a list of PNG chunks representing this image.""" + + class collector: + data = [] + + def write(self, data): + pass + + def append(self, chunk): + self.data.append(chunk) + + def append(fp, cid, *data): + data = b"".join(data) + crc = o32(_crc32(data, _crc32(cid))) + fp.append((cid, data, crc)) + + fp = collector() + + try: + im.encoderinfo = params + _save(im, fp, None, append) + finally: + del im.encoderinfo + + return fp.data + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(PngImageFile.format, PngImageFile, _accept) +Image.register_save(PngImageFile.format, _save) +Image.register_save_all(PngImageFile.format, _save_all) + +Image.register_extensions(PngImageFile.format, [".png", ".apng"]) + +Image.register_mime(PngImageFile.format, "image/png") diff --git a/.venv/lib/python3.9/site-packages/PIL/PpmImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/PpmImagePlugin.py new file mode 100644 index 00000000..abf4d651 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PpmImagePlugin.py @@ -0,0 +1,164 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PPM support for PIL +# +# History: +# 96-03-24 fl Created +# 98-03-06 fl Write RGBA images (as RGB, that is) +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile + +# +# -------------------------------------------------------------------- + +b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d" + +MODES = { + # standard + b"P4": "1", + b"P5": "L", + b"P6": "RGB", + # extensions + b"P0CMYK": "CMYK", + # PIL extensions (for test purposes only) + b"PyP": "P", + b"PyRGBA": "RGBA", + b"PyCMYK": "CMYK", +} + + +def _accept(prefix): + return prefix[0:1] == b"P" and prefix[1] in b"0456y" + + +## +# Image plugin for PBM, PGM, and PPM images. + + +class PpmImageFile(ImageFile.ImageFile): + + format = "PPM" + format_description = "Pbmplus image" + + def _token(self, s=b""): + while True: # read until next whitespace + c = self.fp.read(1) + if not c or c in b_whitespace: + break + if c > b"\x79": + raise ValueError("Expected ASCII value, found binary") + s = s + c + if len(s) > 9: + raise ValueError("Expected int, got > 9 digits") + return s + + def _open(self): + + # check magic + s = self.fp.read(1) + if s != b"P": + raise SyntaxError("not a PPM file") + magic_number = self._token(s) + mode = MODES[magic_number] + + self.custom_mimetype = { + b"P4": "image/x-portable-bitmap", + b"P5": "image/x-portable-graymap", + b"P6": "image/x-portable-pixmap", + }.get(magic_number) + + if mode == "1": + self.mode = "1" + rawmode = "1;I" + else: + self.mode = rawmode = mode + + for ix in range(3): + while True: + while True: + s = self.fp.read(1) + if s not in b_whitespace: + break + if s == b"": + raise ValueError("File does not extend beyond magic number") + if s != b"#": + break + s = self.fp.readline() + s = int(self._token(s)) + if ix == 0: + xsize = s + elif ix == 1: + ysize = s + if mode == "1": + break + elif ix == 2: + # maxgrey + if s > 255: + if not mode == "L": + raise ValueError(f"Too many colors for band: {s}") + if s < 2 ** 16: + self.mode = "I" + rawmode = "I;16B" + else: + self.mode = "I" + rawmode = "I;32B" + + self._size = xsize, ysize + self.tile = [("raw", (0, 0, xsize, ysize), self.fp.tell(), (rawmode, 0, 1))] + + +# +# -------------------------------------------------------------------- + + +def _save(im, fp, filename): + if im.mode == "1": + rawmode, head = "1;I", b"P4" + elif im.mode == "L": + rawmode, head = "L", b"P5" + elif im.mode == "I": + if im.getextrema()[1] < 2 ** 16: + rawmode, head = "I;16B", b"P5" + else: + rawmode, head = "I;32B", b"P5" + elif im.mode == "RGB": + rawmode, head = "RGB", b"P6" + elif im.mode == "RGBA": + rawmode, head = "RGB", b"P6" + else: + raise OSError(f"cannot write mode {im.mode} as PPM") + fp.write(head + ("\n%d %d\n" % im.size).encode("ascii")) + if head == b"P6": + fp.write(b"255\n") + if head == b"P5": + if rawmode == "L": + fp.write(b"255\n") + elif rawmode == "I;16B": + fp.write(b"65535\n") + elif rawmode == "I;32B": + fp.write(b"2147483648\n") + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]) + + # ALTERNATIVE: save via builtin debug function + # im._dump(filename) + + +# +# -------------------------------------------------------------------- + + +Image.register_open(PpmImageFile.format, PpmImageFile, _accept) +Image.register_save(PpmImageFile.format, _save) + +Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm"]) + +Image.register_mime(PpmImageFile.format, "image/x-portable-anymap") diff --git a/.venv/lib/python3.9/site-packages/PIL/PsdImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/PsdImagePlugin.py new file mode 100644 index 00000000..04b21e3d --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PsdImagePlugin.py @@ -0,0 +1,325 @@ +# +# The Python Imaging Library +# $Id$ +# +# Adobe PSD 2.5/3.0 file handling +# +# History: +# 1995-09-01 fl Created +# 1997-01-03 fl Read most PSD images +# 1997-01-18 fl Fixed P and CMYK support +# 2001-10-21 fl Added seek/tell support (for layers) +# +# Copyright (c) 1997-2001 by Secret Labs AB. +# Copyright (c) 1995-2001 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import io + +from . import Image, ImageFile, ImagePalette +from ._binary import i8 +from ._binary import i16be as i16 +from ._binary import i32be as i32 +from ._binary import si16be as si16 + +MODES = { + # (photoshop mode, bits) -> (pil mode, required channels) + (0, 1): ("1", 1), + (0, 8): ("L", 1), + (1, 8): ("L", 1), + (2, 8): ("P", 1), + (3, 8): ("RGB", 3), + (4, 8): ("CMYK", 4), + (7, 8): ("L", 1), # FIXME: multilayer + (8, 8): ("L", 1), # duotone + (9, 8): ("LAB", 3), +} + + +# --------------------------------------------------------------------. +# read PSD images + + +def _accept(prefix): + return prefix[:4] == b"8BPS" + + +## +# Image plugin for Photoshop images. + + +class PsdImageFile(ImageFile.ImageFile): + + format = "PSD" + format_description = "Adobe Photoshop" + _close_exclusive_fp_after_loading = False + + def _open(self): + + read = self.fp.read + + # + # header + + s = read(26) + if not _accept(s) or i16(s, 4) != 1: + raise SyntaxError("not a PSD file") + + psd_bits = i16(s, 22) + psd_channels = i16(s, 12) + psd_mode = i16(s, 24) + + mode, channels = MODES[(psd_mode, psd_bits)] + + if channels > psd_channels: + raise OSError("not enough channels") + + self.mode = mode + self._size = i32(s, 18), i32(s, 14) + + # + # color mode data + + size = i32(read(4)) + if size: + data = read(size) + if mode == "P" and size == 768: + self.palette = ImagePalette.raw("RGB;L", data) + + # + # image resources + + self.resources = [] + + size = i32(read(4)) + if size: + # load resources + end = self.fp.tell() + size + while self.fp.tell() < end: + read(4) # signature + id = i16(read(2)) + name = read(i8(read(1))) + if not (len(name) & 1): + read(1) # padding + data = read(i32(read(4))) + if len(data) & 1: + read(1) # padding + self.resources.append((id, name, data)) + if id == 1039: # ICC profile + self.info["icc_profile"] = data + + # + # layer and mask information + + self.layers = [] + + size = i32(read(4)) + if size: + end = self.fp.tell() + size + size = i32(read(4)) + if size: + _layer_data = io.BytesIO(ImageFile._safe_read(self.fp, size)) + self.layers = _layerinfo(_layer_data, size) + self.fp.seek(end) + self.n_frames = len(self.layers) + self.is_animated = self.n_frames > 1 + + # + # image descriptor + + self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels) + + # keep the file open + self.__fp = self.fp + self.frame = 1 + self._min_frame = 1 + + def seek(self, layer): + if not self._seek_check(layer): + return + + # seek to given layer (1..max) + try: + name, mode, bbox, tile = self.layers[layer - 1] + self.mode = mode + self.tile = tile + self.frame = layer + self.fp = self.__fp + return name, bbox + except IndexError as e: + raise EOFError("no such layer") from e + + def tell(self): + # return layer number (0=image, 1..max=layers) + return self.frame + + def load_prepare(self): + # create image memory if necessary + if not self.im or self.im.mode != self.mode or self.im.size != self.size: + self.im = Image.core.fill(self.mode, self.size, 0) + # create palette (optional) + if self.mode == "P": + Image.Image.load(self) + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +def _layerinfo(fp, ct_bytes): + # read layerinfo block + layers = [] + + def read(size): + return ImageFile._safe_read(fp, size) + + ct = si16(read(2)) + + # sanity check + if ct_bytes < (abs(ct) * 20): + raise SyntaxError("Layer block too short for number of layers requested") + + for i in range(abs(ct)): + + # bounding box + y0 = i32(read(4)) + x0 = i32(read(4)) + y1 = i32(read(4)) + x1 = i32(read(4)) + + # image info + info = [] + mode = [] + ct_types = i16(read(2)) + types = list(range(ct_types)) + if len(types) > 4: + continue + + for i in types: + type = i16(read(2)) + + if type == 65535: + m = "A" + else: + m = "RGBA"[type] + + mode.append(m) + size = i32(read(4)) + info.append((m, size)) + + # figure out the image mode + mode.sort() + if mode == ["R"]: + mode = "L" + elif mode == ["B", "G", "R"]: + mode = "RGB" + elif mode == ["A", "B", "G", "R"]: + mode = "RGBA" + else: + mode = None # unknown + + # skip over blend flags and extra information + read(12) # filler + name = "" + size = i32(read(4)) # length of the extra data field + combined = 0 + if size: + data_end = fp.tell() + size + + length = i32(read(4)) + if length: + fp.seek(length - 16, io.SEEK_CUR) + combined += length + 4 + + length = i32(read(4)) + if length: + fp.seek(length, io.SEEK_CUR) + combined += length + 4 + + length = i8(read(1)) + if length: + # Don't know the proper encoding, + # Latin-1 should be a good guess + name = read(length).decode("latin-1", "replace") + combined += length + 1 + + fp.seek(data_end) + layers.append((name, mode, (x0, y0, x1, y1))) + + # get tiles + i = 0 + for name, mode, bbox in layers: + tile = [] + for m in mode: + t = _maketile(fp, m, bbox, 1) + if t: + tile.extend(t) + layers[i] = name, mode, bbox, tile + i += 1 + + return layers + + +def _maketile(file, mode, bbox, channels): + + tile = None + read = file.read + + compression = i16(read(2)) + + xsize = bbox[2] - bbox[0] + ysize = bbox[3] - bbox[1] + + offset = file.tell() + + if compression == 0: + # + # raw compression + tile = [] + for channel in range(channels): + layer = mode[channel] + if mode == "CMYK": + layer += ";I" + tile.append(("raw", bbox, offset, layer)) + offset = offset + xsize * ysize + + elif compression == 1: + # + # packbits compression + i = 0 + tile = [] + bytecount = read(channels * ysize * 2) + offset = file.tell() + for channel in range(channels): + layer = mode[channel] + if mode == "CMYK": + layer += ";I" + tile.append(("packbits", bbox, offset, layer)) + for y in range(ysize): + offset = offset + i16(bytecount, i) + i += 2 + + file.seek(offset) + + if offset & 1: + read(1) # padding + + return tile + + +# -------------------------------------------------------------------- +# registry + + +Image.register_open(PsdImageFile.format, PsdImageFile, _accept) + +Image.register_extension(PsdImageFile.format, ".psd") + +Image.register_mime(PsdImageFile.format, "image/vnd.adobe.photoshop") diff --git a/.venv/lib/python3.9/site-packages/PIL/PyAccess.py b/.venv/lib/python3.9/site-packages/PIL/PyAccess.py new file mode 100644 index 00000000..eeaa0ccc --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/PyAccess.py @@ -0,0 +1,353 @@ +# +# The Python Imaging Library +# Pillow fork +# +# Python implementation of the PixelAccess Object +# +# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-2009 by Fredrik Lundh. +# Copyright (c) 2013 Eric Soroos +# +# See the README file for information on usage and redistribution +# + +# Notes: +# +# * Implements the pixel access object following Access. +# * Does not implement the line functions, as they don't appear to be used +# * Taking only the tuple form, which is used from python. +# * Fill.c uses the integer form, but it's still going to use the old +# Access.c implementation. +# + +import logging +import sys + +try: + from cffi import FFI + + defs = """ + struct Pixel_RGBA { + unsigned char r,g,b,a; + }; + struct Pixel_I16 { + unsigned char l,r; + }; + """ + ffi = FFI() + ffi.cdef(defs) +except ImportError as ex: + # Allow error import for doc purposes, but error out when accessing + # anything in core. + from ._util import deferred_error + + FFI = ffi = deferred_error(ex) + +logger = logging.getLogger(__name__) + + +class PyAccess: + def __init__(self, img, readonly=False): + vals = dict(img.im.unsafe_ptrs) + self.readonly = readonly + self.image8 = ffi.cast("unsigned char **", vals["image8"]) + self.image32 = ffi.cast("int **", vals["image32"]) + self.image = ffi.cast("unsigned char **", vals["image"]) + self.xsize, self.ysize = img.im.size + self._img = img + + # Keep pointer to im object to prevent dereferencing. + self._im = img.im + if self._im.mode == "P": + self._palette = img.palette + + # Debugging is polluting test traces, only useful here + # when hacking on PyAccess + # logger.debug("%s", vals) + self._post_init() + + def _post_init(self): + pass + + def __setitem__(self, xy, color): + """ + Modifies the pixel at x,y. The color is given as a single + numerical value for single band images, and a tuple for + multi-band images + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :param color: The pixel value. + """ + if self.readonly: + raise ValueError("Attempt to putpixel a read only image") + (x, y) = xy + if x < 0: + x = self.xsize + x + if y < 0: + y = self.ysize + y + (x, y) = self.check_xy((x, y)) + + if ( + self._im.mode == "P" + and isinstance(color, (list, tuple)) + and len(color) in [3, 4] + ): + # RGB or RGBA value for a P image + color = self._palette.getcolor(color, self._img) + + return self.set_pixel(x, y, color) + + def __getitem__(self, xy): + """ + Returns the pixel at x,y. The pixel is returned as a single + value for single band images or a tuple for multiple band + images + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :returns: a pixel value for single band images, a tuple of + pixel values for multiband images. + """ + (x, y) = xy + if x < 0: + x = self.xsize + x + if y < 0: + y = self.ysize + y + (x, y) = self.check_xy((x, y)) + return self.get_pixel(x, y) + + putpixel = __setitem__ + getpixel = __getitem__ + + def check_xy(self, xy): + (x, y) = xy + if not (0 <= x < self.xsize and 0 <= y < self.ysize): + raise ValueError("pixel location out of range") + return xy + + +class _PyAccess32_2(PyAccess): + """PA, LA, stored in first and last bytes of a 32 bit word""" + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return (pixel.r, pixel.a) + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + # tuple + pixel.r = min(color[0], 255) + pixel.a = min(color[1], 255) + + +class _PyAccess32_3(PyAccess): + """RGB and friends, stored in the first three bytes of a 32 bit word""" + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return (pixel.r, pixel.g, pixel.b) + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + # tuple + pixel.r = min(color[0], 255) + pixel.g = min(color[1], 255) + pixel.b = min(color[2], 255) + pixel.a = 255 + + +class _PyAccess32_4(PyAccess): + """RGBA etc, all 4 bytes of a 32 bit word""" + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return (pixel.r, pixel.g, pixel.b, pixel.a) + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + # tuple + pixel.r = min(color[0], 255) + pixel.g = min(color[1], 255) + pixel.b = min(color[2], 255) + pixel.a = min(color[3], 255) + + +class _PyAccess8(PyAccess): + """1, L, P, 8 bit images stored as uint8""" + + def _post_init(self, *args, **kwargs): + self.pixels = self.image8 + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + try: + # integer + self.pixels[y][x] = min(color, 255) + except TypeError: + # tuple + self.pixels[y][x] = min(color[0], 255) + + +class _PyAccessI16_N(PyAccess): + """I;16 access, native bitendian without conversion""" + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("unsigned short **", self.image) + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + try: + # integer + self.pixels[y][x] = min(color, 65535) + except TypeError: + # tuple + self.pixels[y][x] = min(color[0], 65535) + + +class _PyAccessI16_L(PyAccess): + """I;16L access, with conversion""" + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_I16 **", self.image) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return pixel.l + pixel.r * 256 + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + try: + color = min(color, 65535) + except TypeError: + color = min(color[0], 65535) + + pixel.l = color & 0xFF # noqa: E741 + pixel.r = color >> 8 + + +class _PyAccessI16_B(PyAccess): + """I;16B access, with conversion""" + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_I16 **", self.image) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return pixel.l * 256 + pixel.r + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + try: + color = min(color, 65535) + except Exception: + color = min(color[0], 65535) + + pixel.l = color >> 8 # noqa: E741 + pixel.r = color & 0xFF + + +class _PyAccessI32_N(PyAccess): + """Signed Int32 access, native endian""" + + def _post_init(self, *args, **kwargs): + self.pixels = self.image32 + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + self.pixels[y][x] = color + + +class _PyAccessI32_Swap(PyAccess): + """I;32L/B access, with byteswapping conversion""" + + def _post_init(self, *args, **kwargs): + self.pixels = self.image32 + + def reverse(self, i): + orig = ffi.new("int *", i) + chars = ffi.cast("unsigned char *", orig) + chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], chars[1], chars[0] + return ffi.cast("int *", chars)[0] + + def get_pixel(self, x, y): + return self.reverse(self.pixels[y][x]) + + def set_pixel(self, x, y, color): + self.pixels[y][x] = self.reverse(color) + + +class _PyAccessF(PyAccess): + """32 bit float access""" + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("float **", self.image32) + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + try: + # not a tuple + self.pixels[y][x] = color + except TypeError: + # tuple + self.pixels[y][x] = color[0] + + +mode_map = { + "1": _PyAccess8, + "L": _PyAccess8, + "P": _PyAccess8, + "LA": _PyAccess32_2, + "La": _PyAccess32_2, + "PA": _PyAccess32_2, + "RGB": _PyAccess32_3, + "LAB": _PyAccess32_3, + "HSV": _PyAccess32_3, + "YCbCr": _PyAccess32_3, + "RGBA": _PyAccess32_4, + "RGBa": _PyAccess32_4, + "RGBX": _PyAccess32_4, + "CMYK": _PyAccess32_4, + "F": _PyAccessF, + "I": _PyAccessI32_N, +} + +if sys.byteorder == "little": + mode_map["I;16"] = _PyAccessI16_N + mode_map["I;16L"] = _PyAccessI16_N + mode_map["I;16B"] = _PyAccessI16_B + + mode_map["I;32L"] = _PyAccessI32_N + mode_map["I;32B"] = _PyAccessI32_Swap +else: + mode_map["I;16"] = _PyAccessI16_L + mode_map["I;16L"] = _PyAccessI16_L + mode_map["I;16B"] = _PyAccessI16_N + + mode_map["I;32L"] = _PyAccessI32_Swap + mode_map["I;32B"] = _PyAccessI32_N + + +def new(img, readonly=False): + access_type = mode_map.get(img.mode, None) + if not access_type: + logger.debug("PyAccess Not Implemented: %s", img.mode) + return None + return access_type(img, readonly) diff --git a/.venv/lib/python3.9/site-packages/PIL/SgiImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/SgiImagePlugin.py new file mode 100644 index 00000000..5f1ef6ed --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/SgiImagePlugin.py @@ -0,0 +1,230 @@ +# +# The Python Imaging Library. +# $Id$ +# +# SGI image file handling +# +# See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli. +# +# +# +# History: +# 2017-22-07 mb Add RLE decompression +# 2016-16-10 mb Add save method without compression +# 1995-09-10 fl Created +# +# Copyright (c) 2016 by Mickael Bonfill. +# Copyright (c) 2008 by Karsten Hiddemann. +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1995 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +import os +import struct + +from . import Image, ImageFile +from ._binary import i16be as i16 +from ._binary import o8 + + +def _accept(prefix): + return len(prefix) >= 2 and i16(prefix) == 474 + + +MODES = { + (1, 1, 1): "L", + (1, 2, 1): "L", + (2, 1, 1): "L;16B", + (2, 2, 1): "L;16B", + (1, 3, 3): "RGB", + (2, 3, 3): "RGB;16B", + (1, 3, 4): "RGBA", + (2, 3, 4): "RGBA;16B", +} + + +## +# Image plugin for SGI images. +class SgiImageFile(ImageFile.ImageFile): + + format = "SGI" + format_description = "SGI Image File Format" + + def _open(self): + + # HEAD + headlen = 512 + s = self.fp.read(headlen) + + if not _accept(s): + raise ValueError("Not an SGI image file") + + # compression : verbatim or RLE + compression = s[2] + + # bpc : 1 or 2 bytes (8bits or 16bits) + bpc = s[3] + + # dimension : 1, 2 or 3 (depending on xsize, ysize and zsize) + dimension = i16(s, 4) + + # xsize : width + xsize = i16(s, 6) + + # ysize : height + ysize = i16(s, 8) + + # zsize : channels count + zsize = i16(s, 10) + + # layout + layout = bpc, dimension, zsize + + # determine mode from bits/zsize + rawmode = "" + try: + rawmode = MODES[layout] + except KeyError: + pass + + if rawmode == "": + raise ValueError("Unsupported SGI image mode") + + self._size = xsize, ysize + self.mode = rawmode.split(";")[0] + if self.mode == "RGB": + self.custom_mimetype = "image/rgb" + + # orientation -1 : scanlines begins at the bottom-left corner + orientation = -1 + + # decoder info + if compression == 0: + pagesize = xsize * ysize * bpc + if bpc == 2: + self.tile = [ + ("SGI16", (0, 0) + self.size, headlen, (self.mode, 0, orientation)) + ] + else: + self.tile = [] + offset = headlen + for layer in self.mode: + self.tile.append( + ("raw", (0, 0) + self.size, offset, (layer, 0, orientation)) + ) + offset += pagesize + elif compression == 1: + self.tile = [ + ("sgi_rle", (0, 0) + self.size, headlen, (rawmode, orientation, bpc)) + ] + + +def _save(im, fp, filename): + if im.mode != "RGB" and im.mode != "RGBA" and im.mode != "L": + raise ValueError("Unsupported SGI image mode") + + # Get the keyword arguments + info = im.encoderinfo + + # Byte-per-pixel precision, 1 = 8bits per pixel + bpc = info.get("bpc", 1) + + if bpc not in (1, 2): + raise ValueError("Unsupported number of bytes per pixel") + + # Flip the image, since the origin of SGI file is the bottom-left corner + orientation = -1 + # Define the file as SGI File Format + magicNumber = 474 + # Run-Length Encoding Compression - Unsupported at this time + rle = 0 + + # Number of dimensions (x,y,z) + dim = 3 + # X Dimension = width / Y Dimension = height + x, y = im.size + if im.mode == "L" and y == 1: + dim = 1 + elif im.mode == "L": + dim = 2 + # Z Dimension: Number of channels + z = len(im.mode) + + if dim == 1 or dim == 2: + z = 1 + + # assert we've got the right number of bands. + if len(im.getbands()) != z: + raise ValueError( + f"incorrect number of bands in SGI write: {z} vs {len(im.getbands())}" + ) + + # Minimum Byte value + pinmin = 0 + # Maximum Byte value (255 = 8bits per pixel) + pinmax = 255 + # Image name (79 characters max, truncated below in write) + imgName = os.path.splitext(os.path.basename(filename))[0] + imgName = imgName.encode("ascii", "ignore") + # Standard representation of pixel in the file + colormap = 0 + fp.write(struct.pack(">h", magicNumber)) + fp.write(o8(rle)) + fp.write(o8(bpc)) + fp.write(struct.pack(">H", dim)) + fp.write(struct.pack(">H", x)) + fp.write(struct.pack(">H", y)) + fp.write(struct.pack(">H", z)) + fp.write(struct.pack(">l", pinmin)) + fp.write(struct.pack(">l", pinmax)) + fp.write(struct.pack("4s", b"")) # dummy + fp.write(struct.pack("79s", imgName)) # truncates to 79 chars + fp.write(struct.pack("s", b"")) # force null byte after imgname + fp.write(struct.pack(">l", colormap)) + fp.write(struct.pack("404s", b"")) # dummy + + rawmode = "L" + if bpc == 2: + rawmode = "L;16B" + + for channel in im.split(): + fp.write(channel.tobytes("raw", rawmode, 0, orientation)) + + if hasattr(fp, "flush"): + fp.flush() + + +class SGI16Decoder(ImageFile.PyDecoder): + _pulls_fd = True + + def decode(self, buffer): + rawmode, stride, orientation = self.args + pagesize = self.state.xsize * self.state.ysize + zsize = len(self.mode) + self.fd.seek(512) + + for band in range(zsize): + channel = Image.new("L", (self.state.xsize, self.state.ysize)) + channel.frombytes( + self.fd.read(2 * pagesize), "raw", "L;16B", stride, orientation + ) + self.im.putband(channel.im, band) + + return -1, 0 + + +# +# registry + + +Image.register_decoder("SGI16", SGI16Decoder) +Image.register_open(SgiImageFile.format, SgiImageFile, _accept) +Image.register_save(SgiImageFile.format, _save) +Image.register_mime(SgiImageFile.format, "image/sgi") + +Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"]) + +# End of file diff --git a/.venv/lib/python3.9/site-packages/PIL/SpiderImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/SpiderImagePlugin.py new file mode 100644 index 00000000..062af9f9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/SpiderImagePlugin.py @@ -0,0 +1,324 @@ +# +# The Python Imaging Library. +# +# SPIDER image file handling +# +# History: +# 2004-08-02 Created BB +# 2006-03-02 added save method +# 2006-03-13 added support for stack images +# +# Copyright (c) 2004 by Health Research Inc. (HRI) RENSSELAER, NY 12144. +# Copyright (c) 2004 by William Baxter. +# Copyright (c) 2004 by Secret Labs AB. +# Copyright (c) 2004 by Fredrik Lundh. +# + +## +# Image plugin for the Spider image format. This format is is used +# by the SPIDER software, in processing image data from electron +# microscopy and tomography. +## + +# +# SpiderImagePlugin.py +# +# The Spider image format is used by SPIDER software, in processing +# image data from electron microscopy and tomography. +# +# Spider home page: +# https://spider.wadsworth.org/spider_doc/spider/docs/spider.html +# +# Details about the Spider image format: +# https://spider.wadsworth.org/spider_doc/spider/docs/image_doc.html +# +import os +import struct +import sys + +from PIL import Image, ImageFile + + +def isInt(f): + try: + i = int(f) + if f - i == 0: + return 1 + else: + return 0 + except (ValueError, OverflowError): + return 0 + + +iforms = [1, 3, -11, -12, -21, -22] + + +# There is no magic number to identify Spider files, so just check a +# series of header locations to see if they have reasonable values. +# Returns no. of bytes in the header, if it is a valid Spider header, +# otherwise returns 0 + + +def isSpiderHeader(t): + h = (99,) + t # add 1 value so can use spider header index start=1 + # header values 1,2,5,12,13,22,23 should be integers + for i in [1, 2, 5, 12, 13, 22, 23]: + if not isInt(h[i]): + return 0 + # check iform + iform = int(h[5]) + if iform not in iforms: + return 0 + # check other header values + labrec = int(h[13]) # no. records in file header + labbyt = int(h[22]) # total no. of bytes in header + lenbyt = int(h[23]) # record length in bytes + if labbyt != (labrec * lenbyt): + return 0 + # looks like a valid header + return labbyt + + +def isSpiderImage(filename): + with open(filename, "rb") as fp: + f = fp.read(92) # read 23 * 4 bytes + t = struct.unpack(">23f", f) # try big-endian first + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + t = struct.unpack("<23f", f) # little-endian + hdrlen = isSpiderHeader(t) + return hdrlen + + +class SpiderImageFile(ImageFile.ImageFile): + + format = "SPIDER" + format_description = "Spider 2D image" + _close_exclusive_fp_after_loading = False + + def _open(self): + # check header + n = 27 * 4 # read 27 float values + f = self.fp.read(n) + + try: + self.bigendian = 1 + t = struct.unpack(">27f", f) # try big-endian first + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + self.bigendian = 0 + t = struct.unpack("<27f", f) # little-endian + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + raise SyntaxError("not a valid Spider file") + except struct.error as e: + raise SyntaxError("not a valid Spider file") from e + + h = (99,) + t # add 1 value : spider header index starts at 1 + iform = int(h[5]) + if iform != 1: + raise SyntaxError("not a Spider 2D image") + + self._size = int(h[12]), int(h[2]) # size in pixels (width, height) + self.istack = int(h[24]) + self.imgnumber = int(h[27]) + + if self.istack == 0 and self.imgnumber == 0: + # stk=0, img=0: a regular 2D image + offset = hdrlen + self._nimages = 1 + elif self.istack > 0 and self.imgnumber == 0: + # stk>0, img=0: Opening the stack for the first time + self.imgbytes = int(h[12]) * int(h[2]) * 4 + self.hdrlen = hdrlen + self._nimages = int(h[26]) + # Point to the first image in the stack + offset = hdrlen * 2 + self.imgnumber = 1 + elif self.istack == 0 and self.imgnumber > 0: + # stk=0, img>0: an image within the stack + offset = hdrlen + self.stkoffset + self.istack = 2 # So Image knows it's still a stack + else: + raise SyntaxError("inconsistent stack header values") + + if self.bigendian: + self.rawmode = "F;32BF" + else: + self.rawmode = "F;32F" + self.mode = "F" + + self.tile = [("raw", (0, 0) + self.size, offset, (self.rawmode, 0, 1))] + self.__fp = self.fp # FIXME: hack + + @property + def n_frames(self): + return self._nimages + + @property + def is_animated(self): + return self._nimages > 1 + + # 1st image index is zero (although SPIDER imgnumber starts at 1) + def tell(self): + if self.imgnumber < 1: + return 0 + else: + return self.imgnumber - 1 + + def seek(self, frame): + if self.istack == 0: + raise EOFError("attempt to seek in a non-stack file") + if not self._seek_check(frame): + return + self.stkoffset = self.hdrlen + frame * (self.hdrlen + self.imgbytes) + self.fp = self.__fp + self.fp.seek(self.stkoffset) + self._open() + + # returns a byte image after rescaling to 0..255 + def convert2byte(self, depth=255): + (minimum, maximum) = self.getextrema() + m = 1 + if maximum != minimum: + m = depth / (maximum - minimum) + b = -m * minimum + return self.point(lambda i, m=m, b=b: i * m + b).convert("L") + + # returns a ImageTk.PhotoImage object, after rescaling to 0..255 + def tkPhotoImage(self): + from PIL import ImageTk + + return ImageTk.PhotoImage(self.convert2byte(), palette=256) + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# -------------------------------------------------------------------- +# Image series + +# given a list of filenames, return a list of images +def loadImageSeries(filelist=None): + """create a list of :py:class:`~PIL.Image.Image` objects for use in a montage""" + if filelist is None or len(filelist) < 1: + return + + imglist = [] + for img in filelist: + if not os.path.exists(img): + print(f"unable to find {img}") + continue + try: + with Image.open(img) as im: + im = im.convert2byte() + except Exception: + if not isSpiderImage(img): + print(img + " is not a Spider image file") + continue + im.info["filename"] = img + imglist.append(im) + return imglist + + +# -------------------------------------------------------------------- +# For saving images in Spider format + + +def makeSpiderHeader(im): + nsam, nrow = im.size + lenbyt = nsam * 4 # There are labrec records in the header + labrec = int(1024 / lenbyt) + if 1024 % lenbyt != 0: + labrec += 1 + labbyt = labrec * lenbyt + hdr = [] + nvalues = int(labbyt / 4) + for i in range(nvalues): + hdr.append(0.0) + + if len(hdr) < 23: + return [] + + # NB these are Fortran indices + hdr[1] = 1.0 # nslice (=1 for an image) + hdr[2] = float(nrow) # number of rows per slice + hdr[5] = 1.0 # iform for 2D image + hdr[12] = float(nsam) # number of pixels per line + hdr[13] = float(labrec) # number of records in file header + hdr[22] = float(labbyt) # total number of bytes in header + hdr[23] = float(lenbyt) # record length in bytes + + # adjust for Fortran indexing + hdr = hdr[1:] + hdr.append(0.0) + # pack binary data into a string + hdrstr = [] + for v in hdr: + hdrstr.append(struct.pack("f", v)) + return hdrstr + + +def _save(im, fp, filename): + if im.mode[0] != "F": + im = im.convert("F") + + hdr = makeSpiderHeader(im) + if len(hdr) < 256: + raise OSError("Error creating Spider header") + + # write the SPIDER header + fp.writelines(hdr) + + rawmode = "F;32NF" # 32-bit native floating point + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]) + + +def _save_spider(im, fp, filename): + # get the filename extension and register it with Image + ext = os.path.splitext(filename)[1] + Image.register_extension(SpiderImageFile.format, ext) + _save(im, fp, filename) + + +# -------------------------------------------------------------------- + + +Image.register_open(SpiderImageFile.format, SpiderImageFile) +Image.register_save(SpiderImageFile.format, _save_spider) + +if __name__ == "__main__": + + if len(sys.argv) < 2: + print("Syntax: python3 SpiderImagePlugin.py [infile] [outfile]") + sys.exit() + + filename = sys.argv[1] + if not isSpiderImage(filename): + print("input image must be in Spider format") + sys.exit() + + with Image.open(filename) as im: + print("image: " + str(im)) + print("format: " + str(im.format)) + print("size: " + str(im.size)) + print("mode: " + str(im.mode)) + print("max, min: ", end=" ") + print(im.getextrema()) + + if len(sys.argv) > 2: + outfile = sys.argv[2] + + # perform some image operation + im = im.transpose(Image.FLIP_LEFT_RIGHT) + print( + f"saving a flipped version of {os.path.basename(filename)} " + f"as {outfile} " + ) + im.save(outfile, SpiderImageFile.format) diff --git a/.venv/lib/python3.9/site-packages/PIL/SunImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/SunImagePlugin.py new file mode 100644 index 00000000..c03759a0 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/SunImagePlugin.py @@ -0,0 +1,136 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Sun image file handling +# +# History: +# 1995-09-10 fl Created +# 1996-05-28 fl Fixed 32-bit alignment +# 1998-12-29 fl Import ImagePalette module +# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault) +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1995-1996 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile, ImagePalette +from ._binary import i32be as i32 + + +def _accept(prefix): + return len(prefix) >= 4 and i32(prefix) == 0x59A66A95 + + +## +# Image plugin for Sun raster files. + + +class SunImageFile(ImageFile.ImageFile): + + format = "SUN" + format_description = "Sun Raster File" + + def _open(self): + + # The Sun Raster file header is 32 bytes in length + # and has the following format: + + # typedef struct _SunRaster + # { + # DWORD MagicNumber; /* Magic (identification) number */ + # DWORD Width; /* Width of image in pixels */ + # DWORD Height; /* Height of image in pixels */ + # DWORD Depth; /* Number of bits per pixel */ + # DWORD Length; /* Size of image data in bytes */ + # DWORD Type; /* Type of raster file */ + # DWORD ColorMapType; /* Type of color map */ + # DWORD ColorMapLength; /* Size of the color map in bytes */ + # } SUNRASTER; + + # HEAD + s = self.fp.read(32) + if not _accept(s): + raise SyntaxError("not an SUN raster file") + + offset = 32 + + self._size = i32(s, 4), i32(s, 8) + + depth = i32(s, 12) + # data_length = i32(s, 16) # unreliable, ignore. + file_type = i32(s, 20) + palette_type = i32(s, 24) # 0: None, 1: RGB, 2: Raw/arbitrary + palette_length = i32(s, 28) + + if depth == 1: + self.mode, rawmode = "1", "1;I" + elif depth == 4: + self.mode, rawmode = "L", "L;4" + elif depth == 8: + self.mode = rawmode = "L" + elif depth == 24: + if file_type == 3: + self.mode, rawmode = "RGB", "RGB" + else: + self.mode, rawmode = "RGB", "BGR" + elif depth == 32: + if file_type == 3: + self.mode, rawmode = "RGB", "RGBX" + else: + self.mode, rawmode = "RGB", "BGRX" + else: + raise SyntaxError("Unsupported Mode/Bit Depth") + + if palette_length: + if palette_length > 1024: + raise SyntaxError("Unsupported Color Palette Length") + + if palette_type != 1: + raise SyntaxError("Unsupported Palette Type") + + offset = offset + palette_length + self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length)) + if self.mode == "L": + self.mode = "P" + rawmode = rawmode.replace("L", "P") + + # 16 bit boundaries on stride + stride = ((self.size[0] * depth + 15) // 16) * 2 + + # file type: Type is the version (or flavor) of the bitmap + # file. The following values are typically found in the Type + # field: + # 0000h Old + # 0001h Standard + # 0002h Byte-encoded + # 0003h RGB format + # 0004h TIFF format + # 0005h IFF format + # FFFFh Experimental + + # Old and standard are the same, except for the length tag. + # byte-encoded is run-length-encoded + # RGB looks similar to standard, but RGB byte order + # TIFF and IFF mean that they were converted from T/IFF + # Experimental means that it's something else. + # (https://www.fileformat.info/format/sunraster/egff.htm) + + if file_type in (0, 1, 3, 4, 5): + self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride))] + elif file_type == 2: + self.tile = [("sun_rle", (0, 0) + self.size, offset, rawmode)] + else: + raise SyntaxError("Unsupported Sun Raster file type") + + +# +# registry + + +Image.register_open(SunImageFile.format, SunImageFile, _accept) + +Image.register_extension(SunImageFile.format, ".ras") diff --git a/.venv/lib/python3.9/site-packages/PIL/TarIO.py b/.venv/lib/python3.9/site-packages/PIL/TarIO.py new file mode 100644 index 00000000..d108362f --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/TarIO.py @@ -0,0 +1,65 @@ +# +# The Python Imaging Library. +# $Id$ +# +# read files from within a tar file +# +# History: +# 95-06-18 fl Created +# 96-05-28 fl Open files in binary mode +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995-96. +# +# See the README file for information on usage and redistribution. +# + +import io + +from . import ContainerIO + + +class TarIO(ContainerIO.ContainerIO): + """A file object that provides read access to a given member of a TAR file.""" + + def __init__(self, tarfile, file): + """ + Create file object. + + :param tarfile: Name of TAR file. + :param file: Name of member file. + """ + self.fh = open(tarfile, "rb") + + while True: + + s = self.fh.read(512) + if len(s) != 512: + raise OSError("unexpected end of tar file") + + name = s[:100].decode("utf-8") + i = name.find("\0") + if i == 0: + raise OSError("cannot find subfile") + if i > 0: + name = name[:i] + + size = int(s[124:135], 8) + + if file == name: + break + + self.fh.seek((size + 511) & (~511), io.SEEK_CUR) + + # Open region + super().__init__(self.fh, self.fh.tell(), size) + + # Context manager support + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + self.fh.close() diff --git a/.venv/lib/python3.9/site-packages/PIL/TgaImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/TgaImagePlugin.py new file mode 100644 index 00000000..5e5d52d1 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/TgaImagePlugin.py @@ -0,0 +1,248 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TGA file handling +# +# History: +# 95-09-01 fl created (reads 24-bit files only) +# 97-01-04 fl support more TGA versions, including compressed images +# 98-07-04 fl fixed orientation and alpha layer bugs +# 98-09-11 fl fixed orientation for runlength decoder +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1995-97. +# +# See the README file for information on usage and redistribution. +# + + +import warnings + +from . import Image, ImageFile, ImagePalette +from ._binary import i16le as i16 +from ._binary import o8 +from ._binary import o16le as o16 + +# +# -------------------------------------------------------------------- +# Read RGA file + + +MODES = { + # map imagetype/depth to rawmode + (1, 8): "P", + (3, 1): "1", + (3, 8): "L", + (3, 16): "LA", + (2, 16): "BGR;5", + (2, 24): "BGR", + (2, 32): "BGRA", +} + + +## +# Image plugin for Targa files. + + +class TgaImageFile(ImageFile.ImageFile): + + format = "TGA" + format_description = "Targa" + + def _open(self): + + # process header + s = self.fp.read(18) + + id_len = s[0] + + colormaptype = s[1] + imagetype = s[2] + + depth = s[16] + + flags = s[17] + + self._size = i16(s, 12), i16(s, 14) + + # validate header fields + if ( + colormaptype not in (0, 1) + or self.size[0] <= 0 + or self.size[1] <= 0 + or depth not in (1, 8, 16, 24, 32) + ): + raise SyntaxError("not a TGA file") + + # image mode + if imagetype in (3, 11): + self.mode = "L" + if depth == 1: + self.mode = "1" # ??? + elif depth == 16: + self.mode = "LA" + elif imagetype in (1, 9): + self.mode = "P" + elif imagetype in (2, 10): + self.mode = "RGB" + if depth == 32: + self.mode = "RGBA" + else: + raise SyntaxError("unknown TGA mode") + + # orientation + orientation = flags & 0x30 + if orientation == 0x20: + orientation = 1 + elif not orientation: + orientation = -1 + else: + raise SyntaxError("unknown TGA orientation") + + self.info["orientation"] = orientation + + if imagetype & 8: + self.info["compression"] = "tga_rle" + + if id_len: + self.info["id_section"] = self.fp.read(id_len) + + if colormaptype: + # read palette + start, size, mapdepth = i16(s, 3), i16(s, 5), s[7] + if mapdepth == 16: + self.palette = ImagePalette.raw( + "BGR;15", b"\0" * 2 * start + self.fp.read(2 * size) + ) + elif mapdepth == 24: + self.palette = ImagePalette.raw( + "BGR", b"\0" * 3 * start + self.fp.read(3 * size) + ) + elif mapdepth == 32: + self.palette = ImagePalette.raw( + "BGRA", b"\0" * 4 * start + self.fp.read(4 * size) + ) + + # setup tile descriptor + try: + rawmode = MODES[(imagetype & 7, depth)] + if imagetype & 8: + # compressed + self.tile = [ + ( + "tga_rle", + (0, 0) + self.size, + self.fp.tell(), + (rawmode, orientation, depth), + ) + ] + else: + self.tile = [ + ( + "raw", + (0, 0) + self.size, + self.fp.tell(), + (rawmode, 0, orientation), + ) + ] + except KeyError: + pass # cannot decode + + +# +# -------------------------------------------------------------------- +# Write TGA file + + +SAVE = { + "1": ("1", 1, 0, 3), + "L": ("L", 8, 0, 3), + "LA": ("LA", 16, 0, 3), + "P": ("P", 8, 1, 1), + "RGB": ("BGR", 24, 0, 2), + "RGBA": ("BGRA", 32, 0, 2), +} + + +def _save(im, fp, filename): + + try: + rawmode, bits, colormaptype, imagetype = SAVE[im.mode] + except KeyError as e: + raise OSError(f"cannot write mode {im.mode} as TGA") from e + + if "rle" in im.encoderinfo: + rle = im.encoderinfo["rle"] + else: + compression = im.encoderinfo.get("compression", im.info.get("compression")) + rle = compression == "tga_rle" + if rle: + imagetype += 8 + + id_section = im.encoderinfo.get("id_section", im.info.get("id_section", "")) + id_len = len(id_section) + if id_len > 255: + id_len = 255 + id_section = id_section[:255] + warnings.warn("id_section has been trimmed to 255 characters") + + if colormaptype: + colormapfirst, colormaplength, colormapentry = 0, 256, 24 + else: + colormapfirst, colormaplength, colormapentry = 0, 0, 0 + + if im.mode in ("LA", "RGBA"): + flags = 8 + else: + flags = 0 + + orientation = im.encoderinfo.get("orientation", im.info.get("orientation", -1)) + if orientation > 0: + flags = flags | 0x20 + + fp.write( + o8(id_len) + + o8(colormaptype) + + o8(imagetype) + + o16(colormapfirst) + + o16(colormaplength) + + o8(colormapentry) + + o16(0) + + o16(0) + + o16(im.size[0]) + + o16(im.size[1]) + + o8(bits) + + o8(flags) + ) + + if id_section: + fp.write(id_section) + + if colormaptype: + fp.write(im.im.getpalette("RGB", "BGR")) + + if rle: + ImageFile._save( + im, fp, [("tga_rle", (0, 0) + im.size, 0, (rawmode, orientation))] + ) + else: + ImageFile._save( + im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))] + ) + + # write targa version 2 footer + fp.write(b"\000" * 8 + b"TRUEVISION-XFILE." + b"\000") + + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(TgaImageFile.format, TgaImageFile) +Image.register_save(TgaImageFile.format, _save) + +Image.register_extensions(TgaImageFile.format, [".tga", ".icb", ".vda", ".vst"]) + +Image.register_mime(TgaImageFile.format, "image/x-tga") diff --git a/.venv/lib/python3.9/site-packages/PIL/TiffImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/TiffImagePlugin.py new file mode 100644 index 00000000..6b311bbf --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/TiffImagePlugin.py @@ -0,0 +1,2043 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TIFF file handling +# +# TIFF is a flexible, if somewhat aged, image file format originally +# defined by Aldus. Although TIFF supports a wide variety of pixel +# layouts and compression methods, the name doesn't really stand for +# "thousands of incompatible file formats," it just feels that way. +# +# To read TIFF data from a stream, the stream must be seekable. For +# progressive decoding, make sure to use TIFF files where the tag +# directory is placed first in the file. +# +# History: +# 1995-09-01 fl Created +# 1996-05-04 fl Handle JPEGTABLES tag +# 1996-05-18 fl Fixed COLORMAP support +# 1997-01-05 fl Fixed PREDICTOR support +# 1997-08-27 fl Added support for rational tags (from Perry Stoll) +# 1998-01-10 fl Fixed seek/tell (from Jan Blom) +# 1998-07-15 fl Use private names for internal variables +# 1999-06-13 fl Rewritten for PIL 1.0 (1.0) +# 2000-10-11 fl Additional fixes for Python 2.0 (1.1) +# 2001-04-17 fl Fixed rewind support (seek to frame 0) (1.2) +# 2001-05-12 fl Added write support for more tags (from Greg Couch) (1.3) +# 2001-12-18 fl Added workaround for broken Matrox library +# 2002-01-18 fl Don't mess up if photometric tag is missing (D. Alan Stewart) +# 2003-05-19 fl Check FILLORDER tag +# 2003-09-26 fl Added RGBa support +# 2004-02-24 fl Added DPI support; fixed rational write support +# 2005-02-07 fl Added workaround for broken Corel Draw 10 files +# 2006-01-09 fl Added support for float/double tags (from Russell Nelson) +# +# Copyright (c) 1997-2006 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +import io +import itertools +import logging +import os +import struct +import warnings +from collections.abc import MutableMapping +from fractions import Fraction +from numbers import Number, Rational + +from . import Image, ImageFile, ImageOps, ImagePalette, TiffTags +from ._binary import o8 +from .TiffTags import TYPES + +logger = logging.getLogger(__name__) + +# Set these to true to force use of libtiff for reading or writing. +READ_LIBTIFF = False +WRITE_LIBTIFF = False +IFD_LEGACY_API = True +STRIP_SIZE = 65536 + +II = b"II" # little-endian (Intel style) +MM = b"MM" # big-endian (Motorola style) + +# +# -------------------------------------------------------------------- +# Read TIFF files + +# a few tag names, just to make the code below a bit more readable +IMAGEWIDTH = 256 +IMAGELENGTH = 257 +BITSPERSAMPLE = 258 +COMPRESSION = 259 +PHOTOMETRIC_INTERPRETATION = 262 +FILLORDER = 266 +IMAGEDESCRIPTION = 270 +STRIPOFFSETS = 273 +SAMPLESPERPIXEL = 277 +ROWSPERSTRIP = 278 +STRIPBYTECOUNTS = 279 +X_RESOLUTION = 282 +Y_RESOLUTION = 283 +PLANAR_CONFIGURATION = 284 +RESOLUTION_UNIT = 296 +TRANSFERFUNCTION = 301 +SOFTWARE = 305 +DATE_TIME = 306 +ARTIST = 315 +PREDICTOR = 317 +COLORMAP = 320 +TILEOFFSETS = 324 +SUBIFD = 330 +EXTRASAMPLES = 338 +SAMPLEFORMAT = 339 +JPEGTABLES = 347 +YCBCRSUBSAMPLING = 530 +REFERENCEBLACKWHITE = 532 +COPYRIGHT = 33432 +IPTC_NAA_CHUNK = 33723 # newsphoto properties +PHOTOSHOP_CHUNK = 34377 # photoshop properties +ICCPROFILE = 34675 +EXIFIFD = 34665 +XMP = 700 +JPEGQUALITY = 65537 # pseudo-tag by libtiff + +# https://github.com/imagej/ImageJA/blob/master/src/main/java/ij/io/TiffDecoder.java +IMAGEJ_META_DATA_BYTE_COUNTS = 50838 +IMAGEJ_META_DATA = 50839 + +COMPRESSION_INFO = { + # Compression => pil compression name + 1: "raw", + 2: "tiff_ccitt", + 3: "group3", + 4: "group4", + 5: "tiff_lzw", + 6: "tiff_jpeg", # obsolete + 7: "jpeg", + 8: "tiff_adobe_deflate", + 32771: "tiff_raw_16", # 16-bit padding + 32773: "packbits", + 32809: "tiff_thunderscan", + 32946: "tiff_deflate", + 34676: "tiff_sgilog", + 34677: "tiff_sgilog24", + 34925: "lzma", + 50000: "zstd", + 50001: "webp", +} + +COMPRESSION_INFO_REV = {v: k for k, v in COMPRESSION_INFO.items()} + +OPEN_INFO = { + # (ByteOrder, PhotoInterpretation, SampleFormat, FillOrder, BitsPerSample, + # ExtraSamples) => mode, rawmode + (II, 0, (1,), 1, (1,), ()): ("1", "1;I"), + (MM, 0, (1,), 1, (1,), ()): ("1", "1;I"), + (II, 0, (1,), 2, (1,), ()): ("1", "1;IR"), + (MM, 0, (1,), 2, (1,), ()): ("1", "1;IR"), + (II, 1, (1,), 1, (1,), ()): ("1", "1"), + (MM, 1, (1,), 1, (1,), ()): ("1", "1"), + (II, 1, (1,), 2, (1,), ()): ("1", "1;R"), + (MM, 1, (1,), 2, (1,), ()): ("1", "1;R"), + (II, 0, (1,), 1, (2,), ()): ("L", "L;2I"), + (MM, 0, (1,), 1, (2,), ()): ("L", "L;2I"), + (II, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), + (MM, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), + (II, 1, (1,), 1, (2,), ()): ("L", "L;2"), + (MM, 1, (1,), 1, (2,), ()): ("L", "L;2"), + (II, 1, (1,), 2, (2,), ()): ("L", "L;2R"), + (MM, 1, (1,), 2, (2,), ()): ("L", "L;2R"), + (II, 0, (1,), 1, (4,), ()): ("L", "L;4I"), + (MM, 0, (1,), 1, (4,), ()): ("L", "L;4I"), + (II, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), + (MM, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), + (II, 1, (1,), 1, (4,), ()): ("L", "L;4"), + (MM, 1, (1,), 1, (4,), ()): ("L", "L;4"), + (II, 1, (1,), 2, (4,), ()): ("L", "L;4R"), + (MM, 1, (1,), 2, (4,), ()): ("L", "L;4R"), + (II, 0, (1,), 1, (8,), ()): ("L", "L;I"), + (MM, 0, (1,), 1, (8,), ()): ("L", "L;I"), + (II, 0, (1,), 2, (8,), ()): ("L", "L;IR"), + (MM, 0, (1,), 2, (8,), ()): ("L", "L;IR"), + (II, 1, (1,), 1, (8,), ()): ("L", "L"), + (MM, 1, (1,), 1, (8,), ()): ("L", "L"), + (II, 1, (1,), 2, (8,), ()): ("L", "L;R"), + (MM, 1, (1,), 2, (8,), ()): ("L", "L;R"), + (II, 1, (1,), 1, (12,), ()): ("I;16", "I;12"), + (II, 1, (1,), 1, (16,), ()): ("I;16", "I;16"), + (MM, 1, (1,), 1, (16,), ()): ("I;16B", "I;16B"), + (II, 1, (2,), 1, (16,), ()): ("I", "I;16S"), + (MM, 1, (2,), 1, (16,), ()): ("I", "I;16BS"), + (II, 0, (3,), 1, (32,), ()): ("F", "F;32F"), + (MM, 0, (3,), 1, (32,), ()): ("F", "F;32BF"), + (II, 1, (1,), 1, (32,), ()): ("I", "I;32N"), + (II, 1, (2,), 1, (32,), ()): ("I", "I;32S"), + (MM, 1, (2,), 1, (32,), ()): ("I", "I;32BS"), + (II, 1, (3,), 1, (32,), ()): ("F", "F;32F"), + (MM, 1, (3,), 1, (32,), ()): ("F", "F;32BF"), + (II, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), + (MM, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), + (II, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), + (MM, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), + (II, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), + (MM, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), + (II, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples + (MM, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples + (II, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGBX", "RGBX"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGBX", "RGBX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGBX", "RGBXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGBX", "RGBXX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGBX", "RGBXXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGBX", "RGBXXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 + (MM, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 + (II, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16L"), + (MM, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGBX", "RGBX;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGBX", "RGBX;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16B"), + (II, 3, (1,), 1, (1,), ()): ("P", "P;1"), + (MM, 3, (1,), 1, (1,), ()): ("P", "P;1"), + (II, 3, (1,), 2, (1,), ()): ("P", "P;1R"), + (MM, 3, (1,), 2, (1,), ()): ("P", "P;1R"), + (II, 3, (1,), 1, (2,), ()): ("P", "P;2"), + (MM, 3, (1,), 1, (2,), ()): ("P", "P;2"), + (II, 3, (1,), 2, (2,), ()): ("P", "P;2R"), + (MM, 3, (1,), 2, (2,), ()): ("P", "P;2R"), + (II, 3, (1,), 1, (4,), ()): ("P", "P;4"), + (MM, 3, (1,), 1, (4,), ()): ("P", "P;4"), + (II, 3, (1,), 2, (4,), ()): ("P", "P;4R"), + (MM, 3, (1,), 2, (4,), ()): ("P", "P;4R"), + (II, 3, (1,), 1, (8,), ()): ("P", "P"), + (MM, 3, (1,), 1, (8,), ()): ("P", "P"), + (II, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), + (MM, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), + (II, 3, (1,), 2, (8,), ()): ("P", "P;R"), + (MM, 3, (1,), 2, (8,), ()): ("P", "P;R"), + (II, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), + (MM, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), + (II, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), + (MM, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), + (II, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), + (MM, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), + (II, 5, (1,), 1, (16, 16, 16, 16), ()): ("CMYK", "CMYK;16L"), + # JPEG compressed images handled by LibTiff and auto-converted to RGBX + # Minimal Baseline TIFF requires YCbCr images to have 3 SamplesPerPixel + (II, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), + (MM, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), + (II, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), + (MM, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), +} + +PREFIXES = [ + b"MM\x00\x2A", # Valid TIFF header with big-endian byte order + b"II\x2A\x00", # Valid TIFF header with little-endian byte order + b"MM\x2A\x00", # Invalid TIFF header, assume big-endian + b"II\x00\x2A", # Invalid TIFF header, assume little-endian +] + + +def _accept(prefix): + return prefix[:4] in PREFIXES + + +def _limit_rational(val, max_val): + inv = abs(val) > 1 + n_d = IFDRational(1 / val if inv else val).limit_rational(max_val) + return n_d[::-1] if inv else n_d + + +def _limit_signed_rational(val, max_val, min_val): + frac = Fraction(val) + n_d = frac.numerator, frac.denominator + + if min(n_d) < min_val: + n_d = _limit_rational(val, abs(min_val)) + + if max(n_d) > max_val: + val = Fraction(*n_d) + n_d = _limit_rational(val, max_val) + + return n_d + + +## +# Wrapper for TIFF IFDs. + +_load_dispatch = {} +_write_dispatch = {} + + +class IFDRational(Rational): + """Implements a rational class where 0/0 is a legal value to match + the in the wild use of exif rationals. + + e.g., DigitalZoomRatio - 0.00/0.00 indicates that no digital zoom was used + """ + + """ If the denominator is 0, store this as a float('nan'), otherwise store + as a fractions.Fraction(). Delegate as appropriate + + """ + + __slots__ = ("_numerator", "_denominator", "_val") + + def __init__(self, value, denominator=1): + """ + :param value: either an integer numerator, a + float/rational/other number, or an IFDRational + :param denominator: Optional integer denominator + """ + if isinstance(value, IFDRational): + self._numerator = value.numerator + self._denominator = value.denominator + self._val = value._val + return + + if isinstance(value, Fraction): + self._numerator = value.numerator + self._denominator = value.denominator + else: + self._numerator = value + self._denominator = denominator + + if denominator == 0: + self._val = float("nan") + elif denominator == 1: + self._val = Fraction(value) + else: + self._val = Fraction(value, denominator) + + @property + def numerator(a): + return a._numerator + + @property + def denominator(a): + return a._denominator + + def limit_rational(self, max_denominator): + """ + + :param max_denominator: Integer, the maximum denominator value + :returns: Tuple of (numerator, denominator) + """ + + if self.denominator == 0: + return (self.numerator, self.denominator) + + f = self._val.limit_denominator(max_denominator) + return (f.numerator, f.denominator) + + def __repr__(self): + return str(float(self._val)) + + def __hash__(self): + return self._val.__hash__() + + def __eq__(self, other): + val = self._val + if isinstance(other, IFDRational): + other = other._val + if isinstance(other, float): + val = float(val) + return val == other + + def __getstate__(self): + return [self._val, self._numerator, self._denominator] + + def __setstate__(self, state): + IFDRational.__init__(self, 0) + _val, _numerator, _denominator = state + self._val = _val + self._numerator = _numerator + self._denominator = _denominator + + def _delegate(op): + def delegate(self, *args): + return getattr(self._val, op)(*args) + + return delegate + + """ a = ['add','radd', 'sub', 'rsub', 'mul', 'rmul', + 'truediv', 'rtruediv', 'floordiv', 'rfloordiv', + 'mod','rmod', 'pow','rpow', 'pos', 'neg', + 'abs', 'trunc', 'lt', 'gt', 'le', 'ge', 'bool', + 'ceil', 'floor', 'round'] + print("\n".join("__%s__ = _delegate('__%s__')" % (s,s) for s in a)) + """ + + __add__ = _delegate("__add__") + __radd__ = _delegate("__radd__") + __sub__ = _delegate("__sub__") + __rsub__ = _delegate("__rsub__") + __mul__ = _delegate("__mul__") + __rmul__ = _delegate("__rmul__") + __truediv__ = _delegate("__truediv__") + __rtruediv__ = _delegate("__rtruediv__") + __floordiv__ = _delegate("__floordiv__") + __rfloordiv__ = _delegate("__rfloordiv__") + __mod__ = _delegate("__mod__") + __rmod__ = _delegate("__rmod__") + __pow__ = _delegate("__pow__") + __rpow__ = _delegate("__rpow__") + __pos__ = _delegate("__pos__") + __neg__ = _delegate("__neg__") + __abs__ = _delegate("__abs__") + __trunc__ = _delegate("__trunc__") + __lt__ = _delegate("__lt__") + __gt__ = _delegate("__gt__") + __le__ = _delegate("__le__") + __ge__ = _delegate("__ge__") + __bool__ = _delegate("__bool__") + __ceil__ = _delegate("__ceil__") + __floor__ = _delegate("__floor__") + __round__ = _delegate("__round__") + + +class ImageFileDirectory_v2(MutableMapping): + """This class represents a TIFF tag directory. To speed things up, we + don't decode tags unless they're asked for. + + Exposes a dictionary interface of the tags in the directory:: + + ifd = ImageFileDirectory_v2() + ifd[key] = 'Some Data' + ifd.tagtype[key] = TiffTags.ASCII + print(ifd[key]) + 'Some Data' + + Individual values are returned as the strings or numbers, sequences are + returned as tuples of the values. + + The tiff metadata type of each item is stored in a dictionary of + tag types in + :attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype`. The types + are read from a tiff file, guessed from the type added, or added + manually. + + Data Structures: + + * ``self.tagtype = {}`` + + * Key: numerical TIFF tag number + * Value: integer corresponding to the data type from + :py:data:`.TiffTags.TYPES` + + .. versionadded:: 3.0.0 + + 'Internal' data structures: + + * ``self._tags_v2 = {}`` + + * Key: numerical TIFF tag number + * Value: decoded data, as tuple for multiple values + + * ``self._tagdata = {}`` + + * Key: numerical TIFF tag number + * Value: undecoded byte string from file + + * ``self._tags_v1 = {}`` + + * Key: numerical TIFF tag number + * Value: decoded data in the v1 format + + Tags will be found in the private attributes ``self._tagdata``, and in + ``self._tags_v2`` once decoded. + + ``self.legacy_api`` is a value for internal use, and shouldn't be changed + from outside code. In cooperation with + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`, if ``legacy_api`` + is true, then decoded tags will be populated into both ``_tags_v1`` and + ``_tags_v2``. ``_tags_v2`` will be used if this IFD is used in the TIFF + save routine. Tags should be read from ``_tags_v1`` if + ``legacy_api == true``. + + """ + + def __init__(self, ifh=b"II\052\0\0\0\0\0", prefix=None, group=None): + """Initialize an ImageFileDirectory. + + To construct an ImageFileDirectory from a real file, pass the 8-byte + magic header to the constructor. To only set the endianness, pass it + as the 'prefix' keyword argument. + + :param ifh: One of the accepted magic headers (cf. PREFIXES); also sets + endianness. + :param prefix: Override the endianness of the file. + """ + if ifh[:4] not in PREFIXES: + raise SyntaxError(f"not a TIFF file (header {repr(ifh)} not valid)") + self._prefix = prefix if prefix is not None else ifh[:2] + if self._prefix == MM: + self._endian = ">" + elif self._prefix == II: + self._endian = "<" + else: + raise SyntaxError("not a TIFF IFD") + self.group = group + self.tagtype = {} + """ Dictionary of tag types """ + self.reset() + (self.next,) = self._unpack("L", ifh[4:]) + self._legacy_api = False + + prefix = property(lambda self: self._prefix) + offset = property(lambda self: self._offset) + legacy_api = property(lambda self: self._legacy_api) + + @legacy_api.setter + def legacy_api(self, value): + raise Exception("Not allowing setting of legacy api") + + def reset(self): + self._tags_v1 = {} # will remain empty if legacy_api is false + self._tags_v2 = {} # main tag storage + self._tagdata = {} + self.tagtype = {} # added 2008-06-05 by Florian Hoech + self._next = None + self._offset = None + + def __str__(self): + return str(dict(self)) + + def named(self): + """ + :returns: dict of name|key: value + + Returns the complete tag dictionary, with named tags where possible. + """ + return { + TiffTags.lookup(code, self.group).name: value + for code, value in self.items() + } + + def __len__(self): + return len(set(self._tagdata) | set(self._tags_v2)) + + def __getitem__(self, tag): + if tag not in self._tags_v2: # unpack on the fly + data = self._tagdata[tag] + typ = self.tagtype[tag] + size, handler = self._load_dispatch[typ] + self[tag] = handler(self, data, self.legacy_api) # check type + val = self._tags_v2[tag] + if self.legacy_api and not isinstance(val, (tuple, bytes)): + val = (val,) + return val + + def __contains__(self, tag): + return tag in self._tags_v2 or tag in self._tagdata + + def __setitem__(self, tag, value): + self._setitem(tag, value, self.legacy_api) + + def _setitem(self, tag, value, legacy_api): + basetypes = (Number, bytes, str) + + info = TiffTags.lookup(tag, self.group) + values = [value] if isinstance(value, basetypes) else value + + if tag not in self.tagtype: + if info.type: + self.tagtype[tag] = info.type + else: + self.tagtype[tag] = TiffTags.UNDEFINED + if all(isinstance(v, IFDRational) for v in values): + self.tagtype[tag] = ( + TiffTags.RATIONAL + if all(v >= 0 for v in values) + else TiffTags.SIGNED_RATIONAL + ) + elif all(isinstance(v, int) for v in values): + if all(0 <= v < 2 ** 16 for v in values): + self.tagtype[tag] = TiffTags.SHORT + elif all(-(2 ** 15) < v < 2 ** 15 for v in values): + self.tagtype[tag] = TiffTags.SIGNED_SHORT + else: + self.tagtype[tag] = ( + TiffTags.LONG + if all(v >= 0 for v in values) + else TiffTags.SIGNED_LONG + ) + elif all(isinstance(v, float) for v in values): + self.tagtype[tag] = TiffTags.DOUBLE + elif all(isinstance(v, str) for v in values): + self.tagtype[tag] = TiffTags.ASCII + elif all(isinstance(v, bytes) for v in values): + self.tagtype[tag] = TiffTags.BYTE + + if self.tagtype[tag] == TiffTags.UNDEFINED: + values = [ + v.encode("ascii", "replace") if isinstance(v, str) else v + for v in values + ] + elif self.tagtype[tag] == TiffTags.RATIONAL: + values = [float(v) if isinstance(v, int) else v for v in values] + + is_ifd = self.tagtype[tag] == TiffTags.LONG and isinstance(values, dict) + if not is_ifd: + values = tuple(info.cvt_enum(value) for value in values) + + dest = self._tags_v1 if legacy_api else self._tags_v2 + + # Three branches: + # Spec'd length == 1, Actual length 1, store as element + # Spec'd length == 1, Actual > 1, Warn and truncate. Formerly barfed. + # No Spec, Actual length 1, Formerly (<4.2) returned a 1 element tuple. + # Don't mess with the legacy api, since it's frozen. + if not is_ifd and ( + (info.length == 1) + or self.tagtype[tag] == TiffTags.BYTE + or (info.length is None and len(values) == 1 and not legacy_api) + ): + # Don't mess with the legacy api, since it's frozen. + if legacy_api and self.tagtype[tag] in [ + TiffTags.RATIONAL, + TiffTags.SIGNED_RATIONAL, + ]: # rationals + values = (values,) + try: + (dest[tag],) = values + except ValueError: + # We've got a builtin tag with 1 expected entry + warnings.warn( + f"Metadata Warning, tag {tag} had too many entries: " + f"{len(values)}, expected 1" + ) + dest[tag] = values[0] + + else: + # Spec'd length > 1 or undefined + # Unspec'd, and length > 1 + dest[tag] = values + + def __delitem__(self, tag): + self._tags_v2.pop(tag, None) + self._tags_v1.pop(tag, None) + self._tagdata.pop(tag, None) + + def __iter__(self): + return iter(set(self._tagdata) | set(self._tags_v2)) + + def _unpack(self, fmt, data): + return struct.unpack(self._endian + fmt, data) + + def _pack(self, fmt, *values): + return struct.pack(self._endian + fmt, *values) + + def _register_loader(idx, size): + def decorator(func): + from .TiffTags import TYPES + + if func.__name__.startswith("load_"): + TYPES[idx] = func.__name__[5:].replace("_", " ") + _load_dispatch[idx] = size, func # noqa: F821 + return func + + return decorator + + def _register_writer(idx): + def decorator(func): + _write_dispatch[idx] = func # noqa: F821 + return func + + return decorator + + def _register_basic(idx_fmt_name): + from .TiffTags import TYPES + + idx, fmt, name = idx_fmt_name + TYPES[idx] = name + size = struct.calcsize("=" + fmt) + _load_dispatch[idx] = ( # noqa: F821 + size, + lambda self, data, legacy_api=True: ( + self._unpack("{}{}".format(len(data) // size, fmt), data) + ), + ) + _write_dispatch[idx] = lambda self, *values: ( # noqa: F821 + b"".join(self._pack(fmt, value) for value in values) + ) + + list( + map( + _register_basic, + [ + (TiffTags.SHORT, "H", "short"), + (TiffTags.LONG, "L", "long"), + (TiffTags.SIGNED_BYTE, "b", "signed byte"), + (TiffTags.SIGNED_SHORT, "h", "signed short"), + (TiffTags.SIGNED_LONG, "l", "signed long"), + (TiffTags.FLOAT, "f", "float"), + (TiffTags.DOUBLE, "d", "double"), + (TiffTags.IFD, "L", "long"), + ], + ) + ) + + @_register_loader(1, 1) # Basic type, except for the legacy API. + def load_byte(self, data, legacy_api=True): + return data + + @_register_writer(1) # Basic type, except for the legacy API. + def write_byte(self, data): + return data + + @_register_loader(2, 1) + def load_string(self, data, legacy_api=True): + if data.endswith(b"\0"): + data = data[:-1] + return data.decode("latin-1", "replace") + + @_register_writer(2) + def write_string(self, value): + # remerge of https://github.com/python-pillow/Pillow/pull/1416 + return b"" + value.encode("ascii", "replace") + b"\0" + + @_register_loader(5, 8) + def load_rational(self, data, legacy_api=True): + vals = self._unpack("{}L".format(len(data) // 4), data) + + def combine(a, b): + return (a, b) if legacy_api else IFDRational(a, b) + + return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) + + @_register_writer(5) + def write_rational(self, *values): + return b"".join( + self._pack("2L", *_limit_rational(frac, 2 ** 32 - 1)) for frac in values + ) + + @_register_loader(7, 1) + def load_undefined(self, data, legacy_api=True): + return data + + @_register_writer(7) + def write_undefined(self, value): + return value + + @_register_loader(10, 8) + def load_signed_rational(self, data, legacy_api=True): + vals = self._unpack("{}l".format(len(data) // 4), data) + + def combine(a, b): + return (a, b) if legacy_api else IFDRational(a, b) + + return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) + + @_register_writer(10) + def write_signed_rational(self, *values): + return b"".join( + self._pack("2l", *_limit_signed_rational(frac, 2 ** 31 - 1, -(2 ** 31))) + for frac in values + ) + + def _ensure_read(self, fp, size): + ret = fp.read(size) + if len(ret) != size: + raise OSError( + "Corrupt EXIF data. " + f"Expecting to read {size} bytes but only got {len(ret)}. " + ) + return ret + + def load(self, fp): + + self.reset() + self._offset = fp.tell() + + try: + for i in range(self._unpack("H", self._ensure_read(fp, 2))[0]): + tag, typ, count, data = self._unpack("HHL4s", self._ensure_read(fp, 12)) + + tagname = TiffTags.lookup(tag, self.group).name + typname = TYPES.get(typ, "unknown") + msg = f"tag: {tagname} ({tag}) - type: {typname} ({typ})" + + try: + unit_size, handler = self._load_dispatch[typ] + except KeyError: + logger.debug(msg + f" - unsupported type {typ}") + continue # ignore unsupported type + size = count * unit_size + if size > 4: + here = fp.tell() + (offset,) = self._unpack("L", data) + msg += f" Tag Location: {here} - Data Location: {offset}" + fp.seek(offset) + data = ImageFile._safe_read(fp, size) + fp.seek(here) + else: + data = data[:size] + + if len(data) != size: + warnings.warn( + "Possibly corrupt EXIF data. " + f"Expecting to read {size} bytes but only got {len(data)}." + f" Skipping tag {tag}" + ) + logger.debug(msg) + continue + + if not data: + logger.debug(msg) + continue + + self._tagdata[tag] = data + self.tagtype[tag] = typ + + msg += " - value: " + ( + "" % size if size > 32 else repr(data) + ) + logger.debug(msg) + + (self.next,) = self._unpack("L", self._ensure_read(fp, 4)) + except OSError as msg: + warnings.warn(str(msg)) + return + + def tobytes(self, offset=0): + # FIXME What about tagdata? + result = self._pack("H", len(self._tags_v2)) + + entries = [] + offset = offset + len(result) + len(self._tags_v2) * 12 + 4 + stripoffsets = None + + # pass 1: convert tags to binary format + # always write tags in ascending order + for tag, value in sorted(self._tags_v2.items()): + if tag == STRIPOFFSETS: + stripoffsets = len(entries) + typ = self.tagtype.get(tag) + logger.debug(f"Tag {tag}, Type: {typ}, Value: {repr(value)}") + is_ifd = typ == TiffTags.LONG and isinstance(value, dict) + if is_ifd: + if self._endian == "<": + ifh = b"II\x2A\x00\x08\x00\x00\x00" + else: + ifh = b"MM\x00\x2A\x00\x00\x00\x08" + ifd = ImageFileDirectory_v2(ifh, group=tag) + values = self._tags_v2[tag] + for ifd_tag, ifd_value in values.items(): + ifd[ifd_tag] = ifd_value + data = ifd.tobytes(offset) + else: + values = value if isinstance(value, tuple) else (value,) + data = self._write_dispatch[typ](self, *values) + + tagname = TiffTags.lookup(tag, self.group).name + typname = "ifd" if is_ifd else TYPES.get(typ, "unknown") + msg = f"save: {tagname} ({tag}) - type: {typname} ({typ})" + msg += " - value: " + ( + "" % len(data) if len(data) >= 16 else str(values) + ) + logger.debug(msg) + + # count is sum of lengths for string and arbitrary data + if is_ifd: + count = 1 + elif typ in [TiffTags.BYTE, TiffTags.ASCII, TiffTags.UNDEFINED]: + count = len(data) + else: + count = len(values) + # figure out if data fits into the entry + if len(data) <= 4: + entries.append((tag, typ, count, data.ljust(4, b"\0"), b"")) + else: + entries.append((tag, typ, count, self._pack("L", offset), data)) + offset += (len(data) + 1) // 2 * 2 # pad to word + + # update strip offset data to point beyond auxiliary data + if stripoffsets is not None: + tag, typ, count, value, data = entries[stripoffsets] + if data: + raise NotImplementedError("multistrip support not yet implemented") + value = self._pack("L", self._unpack("L", value)[0] + offset) + entries[stripoffsets] = tag, typ, count, value, data + + # pass 2: write entries to file + for tag, typ, count, value, data in entries: + logger.debug(f"{tag} {typ} {count} {repr(value)} {repr(data)}") + result += self._pack("HHL4s", tag, typ, count, value) + + # -- overwrite here for multi-page -- + result += b"\0\0\0\0" # end of entries + + # pass 3: write auxiliary data to file + for tag, typ, count, value, data in entries: + result += data + if len(data) & 1: + result += b"\0" + + return result + + def save(self, fp): + + if fp.tell() == 0: # skip TIFF header on subsequent pages + # tiff header -- PIL always starts the first IFD at offset 8 + fp.write(self._prefix + self._pack("HL", 42, 8)) + + offset = fp.tell() + result = self.tobytes(offset) + fp.write(result) + return offset + len(result) + + +ImageFileDirectory_v2._load_dispatch = _load_dispatch +ImageFileDirectory_v2._write_dispatch = _write_dispatch +for idx, name in TYPES.items(): + name = name.replace(" ", "_") + setattr(ImageFileDirectory_v2, "load_" + name, _load_dispatch[idx][1]) + setattr(ImageFileDirectory_v2, "write_" + name, _write_dispatch[idx]) +del _load_dispatch, _write_dispatch, idx, name + + +# Legacy ImageFileDirectory support. +class ImageFileDirectory_v1(ImageFileDirectory_v2): + """This class represents the **legacy** interface to a TIFF tag directory. + + Exposes a dictionary interface of the tags in the directory:: + + ifd = ImageFileDirectory_v1() + ifd[key] = 'Some Data' + ifd.tagtype[key] = TiffTags.ASCII + print(ifd[key]) + ('Some Data',) + + Also contains a dictionary of tag types as read from the tiff image file, + :attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v1.tagtype`. + + Values are returned as a tuple. + + .. deprecated:: 3.0.0 + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._legacy_api = True + + tags = property(lambda self: self._tags_v1) + tagdata = property(lambda self: self._tagdata) + + # defined in ImageFileDirectory_v2 + tagtype: dict + """Dictionary of tag types""" + + @classmethod + def from_v2(cls, original): + """Returns an + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + instance with the same data as is contained in the original + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + instance. + + :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + + """ + + ifd = cls(prefix=original.prefix) + ifd._tagdata = original._tagdata + ifd.tagtype = original.tagtype + ifd.next = original.next # an indicator for multipage tiffs + return ifd + + def to_v2(self): + """Returns an + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + instance with the same data as is contained in the original + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + instance. + + :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + + """ + + ifd = ImageFileDirectory_v2(prefix=self.prefix) + ifd._tagdata = dict(self._tagdata) + ifd.tagtype = dict(self.tagtype) + ifd._tags_v2 = dict(self._tags_v2) + return ifd + + def __contains__(self, tag): + return tag in self._tags_v1 or tag in self._tagdata + + def __len__(self): + return len(set(self._tagdata) | set(self._tags_v1)) + + def __iter__(self): + return iter(set(self._tagdata) | set(self._tags_v1)) + + def __setitem__(self, tag, value): + for legacy_api in (False, True): + self._setitem(tag, value, legacy_api) + + def __getitem__(self, tag): + if tag not in self._tags_v1: # unpack on the fly + data = self._tagdata[tag] + typ = self.tagtype[tag] + size, handler = self._load_dispatch[typ] + for legacy in (False, True): + self._setitem(tag, handler(self, data, legacy), legacy) + val = self._tags_v1[tag] + if not isinstance(val, (tuple, bytes)): + val = (val,) + return val + + +# undone -- switch this pointer when IFD_LEGACY_API == False +ImageFileDirectory = ImageFileDirectory_v1 + + +## +# Image plugin for TIFF files. + + +class TiffImageFile(ImageFile.ImageFile): + + format = "TIFF" + format_description = "Adobe TIFF" + _close_exclusive_fp_after_loading = False + + def __init__(self, fp=None, filename=None): + self.tag_v2 = None + """ Image file directory (tag dictionary) """ + + self.tag = None + """ Legacy tag entries """ + + super().__init__(fp, filename) + + def _open(self): + """Open the first image in a TIFF file""" + + # Header + ifh = self.fp.read(8) + + self.tag_v2 = ImageFileDirectory_v2(ifh) + + # legacy IFD entries will be filled in later + self.ifd = None + + # setup frame pointers + self.__first = self.__next = self.tag_v2.next + self.__frame = -1 + self.__fp = self.fp + self._frame_pos = [] + self._n_frames = None + + logger.debug("*** TiffImageFile._open ***") + logger.debug(f"- __first: {self.__first}") + logger.debug(f"- ifh: {repr(ifh)}") # Use repr to avoid str(bytes) + + # and load the first frame + self._seek(0) + + @property + def n_frames(self): + if self._n_frames is None: + current = self.tell() + self._seek(len(self._frame_pos)) + while self._n_frames is None: + self._seek(self.tell() + 1) + self.seek(current) + return self._n_frames + + def seek(self, frame): + """Select a given frame as current image""" + if not self._seek_check(frame): + return + self._seek(frame) + # Create a new core image object on second and + # subsequent frames in the image. Image may be + # different size/mode. + Image._decompression_bomb_check(self.size) + self.im = Image.core.new(self.mode, self.size) + + def _seek(self, frame): + self.fp = self.__fp + + # reset buffered io handle in case fp + # was passed to libtiff, invalidating the buffer + self.fp.tell() + + while len(self._frame_pos) <= frame: + if not self.__next: + raise EOFError("no more images in TIFF file") + logger.debug( + f"Seeking to frame {frame}, on frame {self.__frame}, " + f"__next {self.__next}, location: {self.fp.tell()}" + ) + self.fp.seek(self.__next) + self._frame_pos.append(self.__next) + logger.debug("Loading tags, location: %s" % self.fp.tell()) + self.tag_v2.load(self.fp) + if self.tag_v2.next in self._frame_pos: + # This IFD has already been processed + # Declare this to be the end of the image + self.__next = 0 + else: + self.__next = self.tag_v2.next + if self.__next == 0: + self._n_frames = frame + 1 + if len(self._frame_pos) == 1: + self.is_animated = self.__next != 0 + self.__frame += 1 + self.fp.seek(self._frame_pos[frame]) + self.tag_v2.load(self.fp) + # fill the legacy tag/ifd entries + self.tag = self.ifd = ImageFileDirectory_v1.from_v2(self.tag_v2) + self.__frame = frame + self._setup() + + def tell(self): + """Return the current frame number""" + return self.__frame + + def getxmp(self): + """ + Returns a dictionary containing the XMP tags. + Requires defusedxml to be installed. + :returns: XMP tags in a dictionary. + """ + return self._getxmp(self.tag_v2[700]) if 700 in self.tag_v2 else {} + + def load(self): + if self.tile and self.use_load_libtiff: + return self._load_libtiff() + return super().load() + + def load_end(self): + if self._tile_orientation: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(self._tile_orientation) + if method is not None: + self.im = self.im.transpose(method) + self._size = self.im.size + + # allow closing if we're on the first frame, there's no next + # This is the ImageFile.load path only, libtiff specific below. + if not self.is_animated: + self._close_exclusive_fp_after_loading = True + + # reset buffered io handle in case fp + # was passed to libtiff, invalidating the buffer + self.fp.tell() + + # load IFD data from fp before it is closed + exif = self.getexif() + for key in TiffTags.TAGS_V2_GROUPS.keys(): + if key not in exif: + continue + exif.get_ifd(key) + + def _load_libtiff(self): + """Overload method triggered when we detect a compressed tiff + Calls out to libtiff""" + + Image.Image.load(self) + + self.load_prepare() + + if not len(self.tile) == 1: + raise OSError("Not exactly one tile") + + # (self._compression, (extents tuple), + # 0, (rawmode, self._compression, fp)) + extents = self.tile[0][1] + args = list(self.tile[0][3]) + + # To be nice on memory footprint, if there's a + # file descriptor, use that instead of reading + # into a string in python. + # libtiff closes the file descriptor, so pass in a dup. + try: + fp = hasattr(self.fp, "fileno") and os.dup(self.fp.fileno()) + # flush the file descriptor, prevents error on pypy 2.4+ + # should also eliminate the need for fp.tell + # in _seek + if hasattr(self.fp, "flush"): + self.fp.flush() + except OSError: + # io.BytesIO have a fileno, but returns an OSError if + # it doesn't use a file descriptor. + fp = False + + if fp: + args[2] = fp + + decoder = Image._getdecoder( + self.mode, "libtiff", tuple(args), self.decoderconfig + ) + try: + decoder.setimage(self.im, extents) + except ValueError as e: + raise OSError("Couldn't set the image") from e + + close_self_fp = self._exclusive_fp and not self.is_animated + if hasattr(self.fp, "getvalue"): + # We've got a stringio like thing passed in. Yay for all in memory. + # The decoder needs the entire file in one shot, so there's not + # a lot we can do here other than give it the entire file. + # unless we could do something like get the address of the + # underlying string for stringio. + # + # Rearranging for supporting byteio items, since they have a fileno + # that returns an OSError if there's no underlying fp. Easier to + # deal with here by reordering. + logger.debug("have getvalue. just sending in a string from getvalue") + n, err = decoder.decode(self.fp.getvalue()) + elif fp: + # we've got a actual file on disk, pass in the fp. + logger.debug("have fileno, calling fileno version of the decoder.") + if not close_self_fp: + self.fp.seek(0) + # 4 bytes, otherwise the trace might error out + n, err = decoder.decode(b"fpfp") + else: + # we have something else. + logger.debug("don't have fileno or getvalue. just reading") + self.fp.seek(0) + # UNDONE -- so much for that buffer size thing. + n, err = decoder.decode(self.fp.read()) + + self.tile = [] + self.readonly = 0 + + self.load_end() + + # libtiff closed the fp in a, we need to close self.fp, if possible + if close_self_fp: + self.fp.close() + self.fp = None # might be shared + + if err < 0: + raise OSError(err) + + return Image.Image.load(self) + + def _setup(self): + """Setup this image object based on current tags""" + + if 0xBC01 in self.tag_v2: + raise OSError("Windows Media Photo files not yet supported") + + # extract relevant tags + self._compression = COMPRESSION_INFO[self.tag_v2.get(COMPRESSION, 1)] + self._planar_configuration = self.tag_v2.get(PLANAR_CONFIGURATION, 1) + + # photometric is a required tag, but not everyone is reading + # the specification + photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0) + + # old style jpeg compression images most certainly are YCbCr + if self._compression == "tiff_jpeg": + photo = 6 + + fillorder = self.tag_v2.get(FILLORDER, 1) + + logger.debug("*** Summary ***") + logger.debug(f"- compression: {self._compression}") + logger.debug(f"- photometric_interpretation: {photo}") + logger.debug(f"- planar_configuration: {self._planar_configuration}") + logger.debug(f"- fill_order: {fillorder}") + logger.debug(f"- YCbCr subsampling: {self.tag.get(530)}") + + # size + xsize = int(self.tag_v2.get(IMAGEWIDTH)) + ysize = int(self.tag_v2.get(IMAGELENGTH)) + self._size = xsize, ysize + + logger.debug(f"- size: {self.size}") + + sampleFormat = self.tag_v2.get(SAMPLEFORMAT, (1,)) + if len(sampleFormat) > 1 and max(sampleFormat) == min(sampleFormat) == 1: + # SAMPLEFORMAT is properly per band, so an RGB image will + # be (1,1,1). But, we don't support per band pixel types, + # and anything more than one band is a uint8. So, just + # take the first element. Revisit this if adding support + # for more exotic images. + sampleFormat = (1,) + + bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,)) + extra_tuple = self.tag_v2.get(EXTRASAMPLES, ()) + if photo in (2, 6, 8): # RGB, YCbCr, LAB + bps_count = 3 + elif photo == 5: # CMYK + bps_count = 4 + else: + bps_count = 1 + bps_count += len(extra_tuple) + # Some files have only one value in bps_tuple, + # while should have more. Fix it + if bps_count > len(bps_tuple) and len(bps_tuple) == 1: + bps_tuple = bps_tuple * bps_count + + samplesPerPixel = self.tag_v2.get( + SAMPLESPERPIXEL, + 3 if self._compression == "tiff_jpeg" and photo in (2, 6) else 1, + ) + if len(bps_tuple) != samplesPerPixel: + raise SyntaxError("unknown data organization") + + # mode: check photometric interpretation and bits per pixel + key = ( + self.tag_v2.prefix, + photo, + sampleFormat, + fillorder, + bps_tuple, + extra_tuple, + ) + logger.debug(f"format key: {key}") + try: + self.mode, rawmode = OPEN_INFO[key] + except KeyError as e: + logger.debug("- unsupported format") + raise SyntaxError("unknown pixel mode") from e + + logger.debug(f"- raw mode: {rawmode}") + logger.debug(f"- pil mode: {self.mode}") + + self.info["compression"] = self._compression + + xres = self.tag_v2.get(X_RESOLUTION, 1) + yres = self.tag_v2.get(Y_RESOLUTION, 1) + + if xres and yres: + resunit = self.tag_v2.get(RESOLUTION_UNIT) + if resunit == 2: # dots per inch + self.info["dpi"] = (xres, yres) + elif resunit == 3: # dots per centimeter. convert to dpi + self.info["dpi"] = (xres * 2.54, yres * 2.54) + elif resunit is None: # used to default to 1, but now 2) + self.info["dpi"] = (xres, yres) + # For backward compatibility, + # we also preserve the old behavior + self.info["resolution"] = xres, yres + else: # No absolute unit of measurement + self.info["resolution"] = xres, yres + + # build tile descriptors + x = y = layer = 0 + self.tile = [] + self.use_load_libtiff = READ_LIBTIFF or self._compression != "raw" + if self.use_load_libtiff: + # Decoder expects entire file as one tile. + # There's a buffer size limit in load (64k) + # so large g4 images will fail if we use that + # function. + # + # Setup the one tile for the whole image, then + # use the _load_libtiff function. + + # libtiff handles the fillmode for us, so 1;IR should + # actually be 1;I. Including the R double reverses the + # bits, so stripes of the image are reversed. See + # https://github.com/python-pillow/Pillow/issues/279 + if fillorder == 2: + # Replace fillorder with fillorder=1 + key = key[:3] + (1,) + key[4:] + logger.debug(f"format key: {key}") + # this should always work, since all the + # fillorder==2 modes have a corresponding + # fillorder=1 mode + self.mode, rawmode = OPEN_INFO[key] + # libtiff always returns the bytes in native order. + # we're expecting image byte order. So, if the rawmode + # contains I;16, we need to convert from native to image + # byte order. + if rawmode == "I;16": + rawmode = "I;16N" + if ";16B" in rawmode: + rawmode = rawmode.replace(";16B", ";16N") + if ";16L" in rawmode: + rawmode = rawmode.replace(";16L", ";16N") + + # YCbCr images with new jpeg compression with pixels in one plane + # unpacked straight into RGB values + if ( + photo == 6 + and self._compression == "jpeg" + and self._planar_configuration == 1 + ): + rawmode = "RGB" + + # Offset in the tile tuple is 0, we go from 0,0 to + # w,h, and we only do this once -- eds + a = (rawmode, self._compression, False, self.tag_v2.offset) + self.tile.append(("libtiff", (0, 0, xsize, ysize), 0, a)) + + elif STRIPOFFSETS in self.tag_v2 or TILEOFFSETS in self.tag_v2: + # striped image + if STRIPOFFSETS in self.tag_v2: + offsets = self.tag_v2[STRIPOFFSETS] + h = self.tag_v2.get(ROWSPERSTRIP, ysize) + w = self.size[0] + else: + # tiled image + offsets = self.tag_v2[TILEOFFSETS] + w = self.tag_v2.get(322) + h = self.tag_v2.get(323) + + for offset in offsets: + if x + w > xsize: + stride = w * sum(bps_tuple) / 8 # bytes per line + else: + stride = 0 + + tile_rawmode = rawmode + if self._planar_configuration == 2: + # each band on it's own layer + tile_rawmode = rawmode[layer] + # adjust stride width accordingly + stride /= bps_count + + a = (tile_rawmode, int(stride), 1) + self.tile.append( + ( + self._compression, + (x, y, min(x + w, xsize), min(y + h, ysize)), + offset, + a, + ) + ) + x = x + w + if x >= self.size[0]: + x, y = 0, y + h + if y >= self.size[1]: + x = y = 0 + layer += 1 + else: + logger.debug("- unsupported data organization") + raise SyntaxError("unknown data organization") + + # Fix up info. + if ICCPROFILE in self.tag_v2: + self.info["icc_profile"] = self.tag_v2[ICCPROFILE] + + # fixup palette descriptor + + if self.mode in ["P", "PA"]: + palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]] + self.palette = ImagePalette.raw("RGB;L", b"".join(palette)) + + self._tile_orientation = self.tag_v2.get(0x0112) + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# +# -------------------------------------------------------------------- +# Write TIFF files + +# little endian is default except for image modes with +# explicit big endian byte-order + +SAVE_INFO = { + # mode => rawmode, byteorder, photometrics, + # sampleformat, bitspersample, extra + "1": ("1", II, 1, 1, (1,), None), + "L": ("L", II, 1, 1, (8,), None), + "LA": ("LA", II, 1, 1, (8, 8), 2), + "P": ("P", II, 3, 1, (8,), None), + "PA": ("PA", II, 3, 1, (8, 8), 2), + "I": ("I;32S", II, 1, 2, (32,), None), + "I;16": ("I;16", II, 1, 1, (16,), None), + "I;16S": ("I;16S", II, 1, 2, (16,), None), + "F": ("F;32F", II, 1, 3, (32,), None), + "RGB": ("RGB", II, 2, 1, (8, 8, 8), None), + "RGBX": ("RGBX", II, 2, 1, (8, 8, 8, 8), 0), + "RGBA": ("RGBA", II, 2, 1, (8, 8, 8, 8), 2), + "CMYK": ("CMYK", II, 5, 1, (8, 8, 8, 8), None), + "YCbCr": ("YCbCr", II, 6, 1, (8, 8, 8), None), + "LAB": ("LAB", II, 8, 1, (8, 8, 8), None), + "I;32BS": ("I;32BS", MM, 1, 2, (32,), None), + "I;16B": ("I;16B", MM, 1, 1, (16,), None), + "I;16BS": ("I;16BS", MM, 1, 2, (16,), None), + "F;32BF": ("F;32BF", MM, 1, 3, (32,), None), +} + + +def _save(im, fp, filename): + + try: + rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode] + except KeyError as e: + raise OSError(f"cannot write mode {im.mode} as TIFF") from e + + ifd = ImageFileDirectory_v2(prefix=prefix) + + encoderinfo = im.encoderinfo + encoderconfig = im.encoderconfig + compression = encoderinfo.get("compression", im.info.get("compression")) + if compression is None: + compression = "raw" + elif compression == "tiff_jpeg": + # OJPEG is obsolete, so use new-style JPEG compression instead + compression = "jpeg" + elif compression == "tiff_deflate": + compression = "tiff_adobe_deflate" + + libtiff = WRITE_LIBTIFF or compression != "raw" + + # required for color libtiff images + ifd[PLANAR_CONFIGURATION] = getattr(im, "_planar_configuration", 1) + + ifd[IMAGEWIDTH] = im.size[0] + ifd[IMAGELENGTH] = im.size[1] + + # write any arbitrary tags passed in as an ImageFileDirectory + if "tiffinfo" in encoderinfo: + info = encoderinfo["tiffinfo"] + elif "exif" in encoderinfo: + info = encoderinfo["exif"] + if isinstance(info, bytes): + exif = Image.Exif() + exif.load(info) + info = exif + else: + info = {} + logger.debug("Tiffinfo Keys: %s" % list(info)) + if isinstance(info, ImageFileDirectory_v1): + info = info.to_v2() + for key in info: + if isinstance(info, Image.Exif) and key in TiffTags.TAGS_V2_GROUPS.keys(): + ifd[key] = info.get_ifd(key) + else: + ifd[key] = info.get(key) + try: + ifd.tagtype[key] = info.tagtype[key] + except Exception: + pass # might not be an IFD. Might not have populated type + + # additions written by Greg Couch, gregc@cgl.ucsf.edu + # inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com + if hasattr(im, "tag_v2"): + # preserve tags from original TIFF image file + for key in ( + RESOLUTION_UNIT, + X_RESOLUTION, + Y_RESOLUTION, + IPTC_NAA_CHUNK, + PHOTOSHOP_CHUNK, + XMP, + ): + if key in im.tag_v2: + ifd[key] = im.tag_v2[key] + ifd.tagtype[key] = im.tag_v2.tagtype[key] + + # preserve ICC profile (should also work when saving other formats + # which support profiles as TIFF) -- 2008-06-06 Florian Hoech + icc = encoderinfo.get("icc_profile", im.info.get("icc_profile")) + if icc: + ifd[ICCPROFILE] = icc + + for key, name in [ + (IMAGEDESCRIPTION, "description"), + (X_RESOLUTION, "resolution"), + (Y_RESOLUTION, "resolution"), + (X_RESOLUTION, "x_resolution"), + (Y_RESOLUTION, "y_resolution"), + (RESOLUTION_UNIT, "resolution_unit"), + (SOFTWARE, "software"), + (DATE_TIME, "date_time"), + (ARTIST, "artist"), + (COPYRIGHT, "copyright"), + ]: + if name in encoderinfo: + ifd[key] = encoderinfo[name] + + dpi = encoderinfo.get("dpi") + if dpi: + ifd[RESOLUTION_UNIT] = 2 + ifd[X_RESOLUTION] = dpi[0] + ifd[Y_RESOLUTION] = dpi[1] + + if bits != (1,): + ifd[BITSPERSAMPLE] = bits + if len(bits) != 1: + ifd[SAMPLESPERPIXEL] = len(bits) + if extra is not None: + ifd[EXTRASAMPLES] = extra + if format != 1: + ifd[SAMPLEFORMAT] = format + + if PHOTOMETRIC_INTERPRETATION not in ifd: + ifd[PHOTOMETRIC_INTERPRETATION] = photo + elif im.mode in ("1", "L") and ifd[PHOTOMETRIC_INTERPRETATION] == 0: + if im.mode == "1": + inverted_im = im.copy() + px = inverted_im.load() + for y in range(inverted_im.height): + for x in range(inverted_im.width): + px[x, y] = 0 if px[x, y] == 255 else 255 + im = inverted_im + else: + im = ImageOps.invert(im) + + if im.mode in ["P", "PA"]: + lut = im.im.getpalette("RGB", "RGB;L") + ifd[COLORMAP] = tuple(v * 256 for v in lut) + # data orientation + stride = len(bits) * ((im.size[0] * bits[0] + 7) // 8) + # aim for given strip size (64 KB by default) when using libtiff writer + if libtiff: + rows_per_strip = 1 if stride == 0 else min(STRIP_SIZE // stride, im.size[1]) + # JPEG encoder expects multiple of 8 rows + if compression == "jpeg": + rows_per_strip = min(((rows_per_strip + 7) // 8) * 8, im.size[1]) + else: + rows_per_strip = im.size[1] + if rows_per_strip == 0: + rows_per_strip = 1 + strip_byte_counts = 1 if stride == 0 else stride * rows_per_strip + strips_per_image = (im.size[1] + rows_per_strip - 1) // rows_per_strip + ifd[ROWSPERSTRIP] = rows_per_strip + if strip_byte_counts >= 2 ** 16: + ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG + ifd[STRIPBYTECOUNTS] = (strip_byte_counts,) * (strips_per_image - 1) + ( + stride * im.size[1] - strip_byte_counts * (strips_per_image - 1), + ) + ifd[STRIPOFFSETS] = tuple( + range(0, strip_byte_counts * strips_per_image, strip_byte_counts) + ) # this is adjusted by IFD writer + # no compression by default: + ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1) + + if im.mode == "YCbCr": + for tag, value in { + YCBCRSUBSAMPLING: (1, 1), + REFERENCEBLACKWHITE: (0, 255, 128, 255, 128, 255), + }.items(): + ifd.setdefault(tag, value) + + if libtiff: + if "quality" in encoderinfo: + quality = encoderinfo["quality"] + if not isinstance(quality, int) or quality < 0 or quality > 100: + raise ValueError("Invalid quality setting") + if compression != "jpeg": + raise ValueError( + "quality setting only supported for 'jpeg' compression" + ) + ifd[JPEGQUALITY] = quality + + logger.debug("Saving using libtiff encoder") + logger.debug("Items: %s" % sorted(ifd.items())) + _fp = 0 + if hasattr(fp, "fileno"): + try: + fp.seek(0) + _fp = os.dup(fp.fileno()) + except io.UnsupportedOperation: + pass + + # optional types for non core tags + types = {} + # SAMPLEFORMAT is determined by the image format and should not be copied + # from legacy_ifd. + # STRIPOFFSETS and STRIPBYTECOUNTS are added by the library + # based on the data in the strip. + # The other tags expect arrays with a certain length (fixed or depending on + # BITSPERSAMPLE, etc), passing arrays with a different length will result in + # segfaults. Block these tags until we add extra validation. + # SUBIFD may also cause a segfault. + blocklist = [ + REFERENCEBLACKWHITE, + SAMPLEFORMAT, + STRIPBYTECOUNTS, + STRIPOFFSETS, + TRANSFERFUNCTION, + SUBIFD, + ] + + atts = {} + # bits per sample is a single short in the tiff directory, not a list. + atts[BITSPERSAMPLE] = bits[0] + # Merge the ones that we have with (optional) more bits from + # the original file, e.g x,y resolution so that we can + # save(load('')) == original file. + legacy_ifd = {} + if hasattr(im, "tag"): + legacy_ifd = im.tag.to_v2() + for tag, value in itertools.chain( + ifd.items(), getattr(im, "tag_v2", {}).items(), legacy_ifd.items() + ): + # Libtiff can only process certain core items without adding + # them to the custom dictionary. + # Custom items are supported for int, float, unicode, string and byte + # values. Other types and tuples require a tagtype. + if tag not in TiffTags.LIBTIFF_CORE: + if not Image.core.libtiff_support_custom_tags: + continue + + if tag in ifd.tagtype: + types[tag] = ifd.tagtype[tag] + elif not (isinstance(value, (int, float, str, bytes))): + continue + else: + type = TiffTags.lookup(tag).type + if type: + types[tag] = type + if tag not in atts and tag not in blocklist: + if isinstance(value, str): + atts[tag] = value.encode("ascii", "replace") + b"\0" + elif isinstance(value, IFDRational): + atts[tag] = float(value) + else: + atts[tag] = value + + logger.debug("Converted items: %s" % sorted(atts.items())) + + # libtiff always expects the bytes in native order. + # we're storing image byte order. So, if the rawmode + # contains I;16, we need to convert from native to image + # byte order. + if im.mode in ("I;16B", "I;16"): + rawmode = "I;16N" + + # Pass tags as sorted list so that the tags are set in a fixed order. + # This is required by libtiff for some tags. For example, the JPEGQUALITY + # pseudo tag requires that the COMPRESS tag was already set. + tags = list(atts.items()) + tags.sort() + a = (rawmode, compression, _fp, filename, tags, types) + e = Image._getencoder(im.mode, "libtiff", a, encoderconfig) + e.setimage(im.im, (0, 0) + im.size) + while True: + # undone, change to self.decodermaxblock: + l, s, d = e.encode(16 * 1024) + if not _fp: + fp.write(d) + if s: + break + if s < 0: + raise OSError(f"encoder error {s} when writing image file") + + else: + offset = ifd.save(fp) + + ImageFile._save( + im, fp, [("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))] + ) + + # -- helper for multi-page save -- + if "_debug_multipage" in encoderinfo: + # just to access o32 and o16 (using correct byte order) + im._debug_multipage = ifd + + +class AppendingTiffWriter: + fieldSizes = [ + 0, # None + 1, # byte + 1, # ascii + 2, # short + 4, # long + 8, # rational + 1, # sbyte + 1, # undefined + 2, # sshort + 4, # slong + 8, # srational + 4, # float + 8, # double + ] + + # StripOffsets = 273 + # FreeOffsets = 288 + # TileOffsets = 324 + # JPEGQTables = 519 + # JPEGDCTables = 520 + # JPEGACTables = 521 + Tags = {273, 288, 324, 519, 520, 521} + + def __init__(self, fn, new=False): + if hasattr(fn, "read"): + self.f = fn + self.close_fp = False + else: + self.name = fn + self.close_fp = True + try: + self.f = open(fn, "w+b" if new else "r+b") + except OSError: + self.f = open(fn, "w+b") + self.beginning = self.f.tell() + self.setup() + + def setup(self): + # Reset everything. + self.f.seek(self.beginning, os.SEEK_SET) + + self.whereToWriteNewIFDOffset = None + self.offsetOfNewPage = 0 + + self.IIMM = IIMM = self.f.read(4) + if not IIMM: + # empty file - first page + self.isFirst = True + return + + self.isFirst = False + if IIMM == b"II\x2a\x00": + self.setEndian("<") + elif IIMM == b"MM\x00\x2a": + self.setEndian(">") + else: + raise RuntimeError("Invalid TIFF file header") + + self.skipIFDs() + self.goToEnd() + + def finalize(self): + if self.isFirst: + return + + # fix offsets + self.f.seek(self.offsetOfNewPage) + + IIMM = self.f.read(4) + if not IIMM: + # raise RuntimeError("nothing written into new page") + # Make it easy to finish a frame without committing to a new one. + return + + if IIMM != self.IIMM: + raise RuntimeError("IIMM of new page doesn't match IIMM of first page") + + IFDoffset = self.readLong() + IFDoffset += self.offsetOfNewPage + self.f.seek(self.whereToWriteNewIFDOffset) + self.writeLong(IFDoffset) + self.f.seek(IFDoffset) + self.fixIFD() + + def newFrame(self): + # Call this to finish a frame. + self.finalize() + self.setup() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.close_fp: + self.close() + return False + + def tell(self): + return self.f.tell() - self.offsetOfNewPage + + def seek(self, offset, whence=io.SEEK_SET): + if whence == os.SEEK_SET: + offset += self.offsetOfNewPage + + self.f.seek(offset, whence) + return self.tell() + + def goToEnd(self): + self.f.seek(0, os.SEEK_END) + pos = self.f.tell() + + # pad to 16 byte boundary + padBytes = 16 - pos % 16 + if 0 < padBytes < 16: + self.f.write(bytes(padBytes)) + self.offsetOfNewPage = self.f.tell() + + def setEndian(self, endian): + self.endian = endian + self.longFmt = self.endian + "L" + self.shortFmt = self.endian + "H" + self.tagFormat = self.endian + "HHL" + + def skipIFDs(self): + while True: + IFDoffset = self.readLong() + if IFDoffset == 0: + self.whereToWriteNewIFDOffset = self.f.tell() - 4 + break + + self.f.seek(IFDoffset) + numTags = self.readShort() + self.f.seek(numTags * 12, os.SEEK_CUR) + + def write(self, data): + return self.f.write(data) + + def readShort(self): + (value,) = struct.unpack(self.shortFmt, self.f.read(2)) + return value + + def readLong(self): + (value,) = struct.unpack(self.longFmt, self.f.read(4)) + return value + + def rewriteLastShortToLong(self, value): + self.f.seek(-2, os.SEEK_CUR) + bytesWritten = self.f.write(struct.pack(self.longFmt, value)) + if bytesWritten is not None and bytesWritten != 4: + raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 4") + + def rewriteLastShort(self, value): + self.f.seek(-2, os.SEEK_CUR) + bytesWritten = self.f.write(struct.pack(self.shortFmt, value)) + if bytesWritten is not None and bytesWritten != 2: + raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 2") + + def rewriteLastLong(self, value): + self.f.seek(-4, os.SEEK_CUR) + bytesWritten = self.f.write(struct.pack(self.longFmt, value)) + if bytesWritten is not None and bytesWritten != 4: + raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 4") + + def writeShort(self, value): + bytesWritten = self.f.write(struct.pack(self.shortFmt, value)) + if bytesWritten is not None and bytesWritten != 2: + raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 2") + + def writeLong(self, value): + bytesWritten = self.f.write(struct.pack(self.longFmt, value)) + if bytesWritten is not None and bytesWritten != 4: + raise RuntimeError(f"wrote only {bytesWritten} bytes but wanted 4") + + def close(self): + self.finalize() + self.f.close() + + def fixIFD(self): + numTags = self.readShort() + + for i in range(numTags): + tag, fieldType, count = struct.unpack(self.tagFormat, self.f.read(8)) + + fieldSize = self.fieldSizes[fieldType] + totalSize = fieldSize * count + isLocal = totalSize <= 4 + if not isLocal: + offset = self.readLong() + offset += self.offsetOfNewPage + self.rewriteLastLong(offset) + + if tag in self.Tags: + curPos = self.f.tell() + + if isLocal: + self.fixOffsets( + count, isShort=(fieldSize == 2), isLong=(fieldSize == 4) + ) + self.f.seek(curPos + 4) + else: + self.f.seek(offset) + self.fixOffsets( + count, isShort=(fieldSize == 2), isLong=(fieldSize == 4) + ) + self.f.seek(curPos) + + offset = curPos = None + + elif isLocal: + # skip the locally stored value that is not an offset + self.f.seek(4, os.SEEK_CUR) + + def fixOffsets(self, count, isShort=False, isLong=False): + if not isShort and not isLong: + raise RuntimeError("offset is neither short nor long") + + for i in range(count): + offset = self.readShort() if isShort else self.readLong() + offset += self.offsetOfNewPage + if isShort and offset >= 65536: + # offset is now too large - we must convert shorts to longs + if count != 1: + raise RuntimeError("not implemented") # XXX TODO + + # simple case - the offset is just one and therefore it is + # local (not referenced with another offset) + self.rewriteLastShortToLong(offset) + self.f.seek(-10, os.SEEK_CUR) + self.writeShort(TiffTags.LONG) # rewrite the type to LONG + self.f.seek(8, os.SEEK_CUR) + elif isShort: + self.rewriteLastShort(offset) + else: + self.rewriteLastLong(offset) + + +def _save_all(im, fp, filename): + encoderinfo = im.encoderinfo.copy() + encoderconfig = im.encoderconfig + append_images = list(encoderinfo.get("append_images", [])) + if not hasattr(im, "n_frames") and not append_images: + return _save(im, fp, filename) + + cur_idx = im.tell() + try: + with AppendingTiffWriter(fp) as tf: + for ims in [im] + append_images: + ims.encoderinfo = encoderinfo + ims.encoderconfig = encoderconfig + if not hasattr(ims, "n_frames"): + nfr = 1 + else: + nfr = ims.n_frames + + for idx in range(nfr): + ims.seek(idx) + ims.load() + _save(ims, tf, filename) + tf.newFrame() + finally: + im.seek(cur_idx) + + +# +# -------------------------------------------------------------------- +# Register + +Image.register_open(TiffImageFile.format, TiffImageFile, _accept) +Image.register_save(TiffImageFile.format, _save) +Image.register_save_all(TiffImageFile.format, _save_all) + +Image.register_extensions(TiffImageFile.format, [".tif", ".tiff"]) + +Image.register_mime(TiffImageFile.format, "image/tiff") diff --git a/.venv/lib/python3.9/site-packages/PIL/TiffTags.py b/.venv/lib/python3.9/site-packages/PIL/TiffTags.py new file mode 100644 index 00000000..88856aa9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/TiffTags.py @@ -0,0 +1,521 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TIFF tags +# +# This module provides clear-text names for various well-known +# TIFF tags. the TIFF codec works just fine without it. +# +# Copyright (c) Secret Labs AB 1999. +# +# See the README file for information on usage and redistribution. +# + +## +# This module provides constants and clear-text names for various +# well-known TIFF tags. +## + +from collections import namedtuple + + +class TagInfo(namedtuple("_TagInfo", "value name type length enum")): + __slots__ = [] + + def __new__(cls, value=None, name="unknown", type=None, length=None, enum=None): + return super().__new__(cls, value, name, type, length, enum or {}) + + def cvt_enum(self, value): + # Using get will call hash(value), which can be expensive + # for some types (e.g. Fraction). Since self.enum is rarely + # used, it's usually better to test it first. + return self.enum.get(value, value) if self.enum else value + + +def lookup(tag, group=None): + """ + :param tag: Integer tag number + :returns: Taginfo namedtuple, From the TAGS_V2 info if possible, + otherwise just populating the value and name from TAGS. + If the tag is not recognized, "unknown" is returned for the name + + """ + + if group is not None: + info = TAGS_V2_GROUPS[group].get(tag) if group in TAGS_V2_GROUPS else None + else: + info = TAGS_V2.get(tag) + return info or TagInfo(tag, TAGS.get(tag, "unknown")) + + +## +# Map tag numbers to tag info. +# +# id: (Name, Type, Length, enum_values) +# +# The length here differs from the length in the tiff spec. For +# numbers, the tiff spec is for the number of fields returned. We +# agree here. For string-like types, the tiff spec uses the length of +# field in bytes. In Pillow, we are using the number of expected +# fields, in general 1 for string-like types. + + +BYTE = 1 +ASCII = 2 +SHORT = 3 +LONG = 4 +RATIONAL = 5 +SIGNED_BYTE = 6 +UNDEFINED = 7 +SIGNED_SHORT = 8 +SIGNED_LONG = 9 +SIGNED_RATIONAL = 10 +FLOAT = 11 +DOUBLE = 12 +IFD = 13 + +TAGS_V2 = { + 254: ("NewSubfileType", LONG, 1), + 255: ("SubfileType", SHORT, 1), + 256: ("ImageWidth", LONG, 1), + 257: ("ImageLength", LONG, 1), + 258: ("BitsPerSample", SHORT, 0), + 259: ( + "Compression", + SHORT, + 1, + { + "Uncompressed": 1, + "CCITT 1d": 2, + "Group 3 Fax": 3, + "Group 4 Fax": 4, + "LZW": 5, + "JPEG": 6, + "PackBits": 32773, + }, + ), + 262: ( + "PhotometricInterpretation", + SHORT, + 1, + { + "WhiteIsZero": 0, + "BlackIsZero": 1, + "RGB": 2, + "RGB Palette": 3, + "Transparency Mask": 4, + "CMYK": 5, + "YCbCr": 6, + "CieLAB": 8, + "CFA": 32803, # TIFF/EP, Adobe DNG + "LinearRaw": 32892, # Adobe DNG + }, + ), + 263: ("Threshholding", SHORT, 1), + 264: ("CellWidth", SHORT, 1), + 265: ("CellLength", SHORT, 1), + 266: ("FillOrder", SHORT, 1), + 269: ("DocumentName", ASCII, 1), + 270: ("ImageDescription", ASCII, 1), + 271: ("Make", ASCII, 1), + 272: ("Model", ASCII, 1), + 273: ("StripOffsets", LONG, 0), + 274: ("Orientation", SHORT, 1), + 277: ("SamplesPerPixel", SHORT, 1), + 278: ("RowsPerStrip", LONG, 1), + 279: ("StripByteCounts", LONG, 0), + 280: ("MinSampleValue", SHORT, 0), + 281: ("MaxSampleValue", SHORT, 0), + 282: ("XResolution", RATIONAL, 1), + 283: ("YResolution", RATIONAL, 1), + 284: ("PlanarConfiguration", SHORT, 1, {"Contiguous": 1, "Separate": 2}), + 285: ("PageName", ASCII, 1), + 286: ("XPosition", RATIONAL, 1), + 287: ("YPosition", RATIONAL, 1), + 288: ("FreeOffsets", LONG, 1), + 289: ("FreeByteCounts", LONG, 1), + 290: ("GrayResponseUnit", SHORT, 1), + 291: ("GrayResponseCurve", SHORT, 0), + 292: ("T4Options", LONG, 1), + 293: ("T6Options", LONG, 1), + 296: ("ResolutionUnit", SHORT, 1, {"none": 1, "inch": 2, "cm": 3}), + 297: ("PageNumber", SHORT, 2), + 301: ("TransferFunction", SHORT, 0), + 305: ("Software", ASCII, 1), + 306: ("DateTime", ASCII, 1), + 315: ("Artist", ASCII, 1), + 316: ("HostComputer", ASCII, 1), + 317: ("Predictor", SHORT, 1, {"none": 1, "Horizontal Differencing": 2}), + 318: ("WhitePoint", RATIONAL, 2), + 319: ("PrimaryChromaticities", RATIONAL, 6), + 320: ("ColorMap", SHORT, 0), + 321: ("HalftoneHints", SHORT, 2), + 322: ("TileWidth", LONG, 1), + 323: ("TileLength", LONG, 1), + 324: ("TileOffsets", LONG, 0), + 325: ("TileByteCounts", LONG, 0), + 332: ("InkSet", SHORT, 1), + 333: ("InkNames", ASCII, 1), + 334: ("NumberOfInks", SHORT, 1), + 336: ("DotRange", SHORT, 0), + 337: ("TargetPrinter", ASCII, 1), + 338: ("ExtraSamples", SHORT, 0), + 339: ("SampleFormat", SHORT, 0), + 340: ("SMinSampleValue", DOUBLE, 0), + 341: ("SMaxSampleValue", DOUBLE, 0), + 342: ("TransferRange", SHORT, 6), + 347: ("JPEGTables", UNDEFINED, 1), + # obsolete JPEG tags + 512: ("JPEGProc", SHORT, 1), + 513: ("JPEGInterchangeFormat", LONG, 1), + 514: ("JPEGInterchangeFormatLength", LONG, 1), + 515: ("JPEGRestartInterval", SHORT, 1), + 517: ("JPEGLosslessPredictors", SHORT, 0), + 518: ("JPEGPointTransforms", SHORT, 0), + 519: ("JPEGQTables", LONG, 0), + 520: ("JPEGDCTables", LONG, 0), + 521: ("JPEGACTables", LONG, 0), + 529: ("YCbCrCoefficients", RATIONAL, 3), + 530: ("YCbCrSubSampling", SHORT, 2), + 531: ("YCbCrPositioning", SHORT, 1), + 532: ("ReferenceBlackWhite", RATIONAL, 6), + 700: ("XMP", BYTE, 0), + 33432: ("Copyright", ASCII, 1), + 33723: ("IptcNaaInfo", UNDEFINED, 1), + 34377: ("PhotoshopInfo", BYTE, 0), + # FIXME add more tags here + 34665: ("ExifIFD", LONG, 1), + 34675: ("ICCProfile", UNDEFINED, 1), + 34853: ("GPSInfoIFD", LONG, 1), + 36864: ("ExifVersion", UNDEFINED, 1), + 40965: ("InteroperabilityIFD", LONG, 1), + 41730: ("CFAPattern", UNDEFINED, 1), + # MPInfo + 45056: ("MPFVersion", UNDEFINED, 1), + 45057: ("NumberOfImages", LONG, 1), + 45058: ("MPEntry", UNDEFINED, 1), + 45059: ("ImageUIDList", UNDEFINED, 0), # UNDONE, check + 45060: ("TotalFrames", LONG, 1), + 45313: ("MPIndividualNum", LONG, 1), + 45569: ("PanOrientation", LONG, 1), + 45570: ("PanOverlap_H", RATIONAL, 1), + 45571: ("PanOverlap_V", RATIONAL, 1), + 45572: ("BaseViewpointNum", LONG, 1), + 45573: ("ConvergenceAngle", SIGNED_RATIONAL, 1), + 45574: ("BaselineLength", RATIONAL, 1), + 45575: ("VerticalDivergence", SIGNED_RATIONAL, 1), + 45576: ("AxisDistance_X", SIGNED_RATIONAL, 1), + 45577: ("AxisDistance_Y", SIGNED_RATIONAL, 1), + 45578: ("AxisDistance_Z", SIGNED_RATIONAL, 1), + 45579: ("YawAngle", SIGNED_RATIONAL, 1), + 45580: ("PitchAngle", SIGNED_RATIONAL, 1), + 45581: ("RollAngle", SIGNED_RATIONAL, 1), + 40960: ("FlashPixVersion", UNDEFINED, 1), + 50741: ("MakerNoteSafety", SHORT, 1, {"Unsafe": 0, "Safe": 1}), + 50780: ("BestQualityScale", RATIONAL, 1), + 50838: ("ImageJMetaDataByteCounts", LONG, 0), # Can be more than one + 50839: ("ImageJMetaData", UNDEFINED, 1), # see Issue #2006 +} +TAGS_V2_GROUPS = { + # ExifIFD + 34665: { + 36864: ("ExifVersion", UNDEFINED, 1), + 40960: ("FlashPixVersion", UNDEFINED, 1), + 40965: ("InteroperabilityIFD", LONG, 1), + 41730: ("CFAPattern", UNDEFINED, 1), + }, + # GPSInfoIFD + 34853: {}, + # InteroperabilityIFD + 40965: {1: ("InteropIndex", ASCII, 1), 2: ("InteropVersion", UNDEFINED, 1)}, +} + +# Legacy Tags structure +# these tags aren't included above, but were in the previous versions +TAGS = { + 347: "JPEGTables", + 700: "XMP", + # Additional Exif Info + 32932: "Wang Annotation", + 33434: "ExposureTime", + 33437: "FNumber", + 33445: "MD FileTag", + 33446: "MD ScalePixel", + 33447: "MD ColorTable", + 33448: "MD LabName", + 33449: "MD SampleInfo", + 33450: "MD PrepDate", + 33451: "MD PrepTime", + 33452: "MD FileUnits", + 33550: "ModelPixelScaleTag", + 33723: "IptcNaaInfo", + 33918: "INGR Packet Data Tag", + 33919: "INGR Flag Registers", + 33920: "IrasB Transformation Matrix", + 33922: "ModelTiepointTag", + 34264: "ModelTransformationTag", + 34377: "PhotoshopInfo", + 34735: "GeoKeyDirectoryTag", + 34736: "GeoDoubleParamsTag", + 34737: "GeoAsciiParamsTag", + 34850: "ExposureProgram", + 34852: "SpectralSensitivity", + 34855: "ISOSpeedRatings", + 34856: "OECF", + 34864: "SensitivityType", + 34865: "StandardOutputSensitivity", + 34866: "RecommendedExposureIndex", + 34867: "ISOSpeed", + 34868: "ISOSpeedLatitudeyyy", + 34869: "ISOSpeedLatitudezzz", + 34908: "HylaFAX FaxRecvParams", + 34909: "HylaFAX FaxSubAddress", + 34910: "HylaFAX FaxRecvTime", + 36864: "ExifVersion", + 36867: "DateTimeOriginal", + 36868: "DateTImeDigitized", + 37121: "ComponentsConfiguration", + 37122: "CompressedBitsPerPixel", + 37724: "ImageSourceData", + 37377: "ShutterSpeedValue", + 37378: "ApertureValue", + 37379: "BrightnessValue", + 37380: "ExposureBiasValue", + 37381: "MaxApertureValue", + 37382: "SubjectDistance", + 37383: "MeteringMode", + 37384: "LightSource", + 37385: "Flash", + 37386: "FocalLength", + 37396: "SubjectArea", + 37500: "MakerNote", + 37510: "UserComment", + 37520: "SubSec", + 37521: "SubSecTimeOriginal", + 37522: "SubsecTimeDigitized", + 40960: "FlashPixVersion", + 40961: "ColorSpace", + 40962: "PixelXDimension", + 40963: "PixelYDimension", + 40964: "RelatedSoundFile", + 40965: "InteroperabilityIFD", + 41483: "FlashEnergy", + 41484: "SpatialFrequencyResponse", + 41486: "FocalPlaneXResolution", + 41487: "FocalPlaneYResolution", + 41488: "FocalPlaneResolutionUnit", + 41492: "SubjectLocation", + 41493: "ExposureIndex", + 41495: "SensingMethod", + 41728: "FileSource", + 41729: "SceneType", + 41730: "CFAPattern", + 41985: "CustomRendered", + 41986: "ExposureMode", + 41987: "WhiteBalance", + 41988: "DigitalZoomRatio", + 41989: "FocalLengthIn35mmFilm", + 41990: "SceneCaptureType", + 41991: "GainControl", + 41992: "Contrast", + 41993: "Saturation", + 41994: "Sharpness", + 41995: "DeviceSettingDescription", + 41996: "SubjectDistanceRange", + 42016: "ImageUniqueID", + 42032: "CameraOwnerName", + 42033: "BodySerialNumber", + 42034: "LensSpecification", + 42035: "LensMake", + 42036: "LensModel", + 42037: "LensSerialNumber", + 42112: "GDAL_METADATA", + 42113: "GDAL_NODATA", + 42240: "Gamma", + 50215: "Oce Scanjob Description", + 50216: "Oce Application Selector", + 50217: "Oce Identification Number", + 50218: "Oce ImageLogic Characteristics", + # Adobe DNG + 50706: "DNGVersion", + 50707: "DNGBackwardVersion", + 50708: "UniqueCameraModel", + 50709: "LocalizedCameraModel", + 50710: "CFAPlaneColor", + 50711: "CFALayout", + 50712: "LinearizationTable", + 50713: "BlackLevelRepeatDim", + 50714: "BlackLevel", + 50715: "BlackLevelDeltaH", + 50716: "BlackLevelDeltaV", + 50717: "WhiteLevel", + 50718: "DefaultScale", + 50719: "DefaultCropOrigin", + 50720: "DefaultCropSize", + 50721: "ColorMatrix1", + 50722: "ColorMatrix2", + 50723: "CameraCalibration1", + 50724: "CameraCalibration2", + 50725: "ReductionMatrix1", + 50726: "ReductionMatrix2", + 50727: "AnalogBalance", + 50728: "AsShotNeutral", + 50729: "AsShotWhiteXY", + 50730: "BaselineExposure", + 50731: "BaselineNoise", + 50732: "BaselineSharpness", + 50733: "BayerGreenSplit", + 50734: "LinearResponseLimit", + 50735: "CameraSerialNumber", + 50736: "LensInfo", + 50737: "ChromaBlurRadius", + 50738: "AntiAliasStrength", + 50740: "DNGPrivateData", + 50778: "CalibrationIlluminant1", + 50779: "CalibrationIlluminant2", + 50784: "Alias Layer Metadata", +} + + +def _populate(): + for k, v in TAGS_V2.items(): + # Populate legacy structure. + TAGS[k] = v[0] + if len(v) == 4: + for sk, sv in v[3].items(): + TAGS[(k, sv)] = sk + + TAGS_V2[k] = TagInfo(k, *v) + + for group, tags in TAGS_V2_GROUPS.items(): + for k, v in tags.items(): + tags[k] = TagInfo(k, *v) + + +_populate() +## +# Map type numbers to type names -- defined in ImageFileDirectory. + +TYPES = {} + +# was: +# TYPES = { +# 1: "byte", +# 2: "ascii", +# 3: "short", +# 4: "long", +# 5: "rational", +# 6: "signed byte", +# 7: "undefined", +# 8: "signed short", +# 9: "signed long", +# 10: "signed rational", +# 11: "float", +# 12: "double", +# } + +# +# These tags are handled by default in libtiff, without +# adding to the custom dictionary. From tif_dir.c, searching for +# case TIFFTAG in the _TIFFVSetField function: +# Line: item. +# 148: case TIFFTAG_SUBFILETYPE: +# 151: case TIFFTAG_IMAGEWIDTH: +# 154: case TIFFTAG_IMAGELENGTH: +# 157: case TIFFTAG_BITSPERSAMPLE: +# 181: case TIFFTAG_COMPRESSION: +# 202: case TIFFTAG_PHOTOMETRIC: +# 205: case TIFFTAG_THRESHHOLDING: +# 208: case TIFFTAG_FILLORDER: +# 214: case TIFFTAG_ORIENTATION: +# 221: case TIFFTAG_SAMPLESPERPIXEL: +# 228: case TIFFTAG_ROWSPERSTRIP: +# 238: case TIFFTAG_MINSAMPLEVALUE: +# 241: case TIFFTAG_MAXSAMPLEVALUE: +# 244: case TIFFTAG_SMINSAMPLEVALUE: +# 247: case TIFFTAG_SMAXSAMPLEVALUE: +# 250: case TIFFTAG_XRESOLUTION: +# 256: case TIFFTAG_YRESOLUTION: +# 262: case TIFFTAG_PLANARCONFIG: +# 268: case TIFFTAG_XPOSITION: +# 271: case TIFFTAG_YPOSITION: +# 274: case TIFFTAG_RESOLUTIONUNIT: +# 280: case TIFFTAG_PAGENUMBER: +# 284: case TIFFTAG_HALFTONEHINTS: +# 288: case TIFFTAG_COLORMAP: +# 294: case TIFFTAG_EXTRASAMPLES: +# 298: case TIFFTAG_MATTEING: +# 305: case TIFFTAG_TILEWIDTH: +# 316: case TIFFTAG_TILELENGTH: +# 327: case TIFFTAG_TILEDEPTH: +# 333: case TIFFTAG_DATATYPE: +# 344: case TIFFTAG_SAMPLEFORMAT: +# 361: case TIFFTAG_IMAGEDEPTH: +# 364: case TIFFTAG_SUBIFD: +# 376: case TIFFTAG_YCBCRPOSITIONING: +# 379: case TIFFTAG_YCBCRSUBSAMPLING: +# 383: case TIFFTAG_TRANSFERFUNCTION: +# 389: case TIFFTAG_REFERENCEBLACKWHITE: +# 393: case TIFFTAG_INKNAMES: + +# Following pseudo-tags are also handled by default in libtiff: +# TIFFTAG_JPEGQUALITY 65537 + +# some of these are not in our TAGS_V2 dict and were included from tiff.h + +# This list also exists in encode.c +LIBTIFF_CORE = { + 255, + 256, + 257, + 258, + 259, + 262, + 263, + 266, + 274, + 277, + 278, + 280, + 281, + 340, + 341, + 282, + 283, + 284, + 286, + 287, + 296, + 297, + 321, + 320, + 338, + 32995, + 322, + 323, + 32998, + 32996, + 339, + 32997, + 330, + 531, + 530, + 301, + 532, + 333, + # as above + 269, # this has been in our tests forever, and works + 65537, +} + +LIBTIFF_CORE.remove(255) # We don't have support for subfiletypes +LIBTIFF_CORE.remove(322) # We don't have support for writing tiled images with libtiff +LIBTIFF_CORE.remove(323) # Tiled images +LIBTIFF_CORE.remove(333) # Ink Names either + +# Note to advanced users: There may be combinations of these +# parameters and values that when added properly, will work and +# produce valid tiff images that may work in your application. +# It is safe to add and remove tags from this set from Pillow's point +# of view so long as you test against libtiff. diff --git a/.venv/lib/python3.9/site-packages/PIL/WalImageFile.py b/.venv/lib/python3.9/site-packages/PIL/WalImageFile.py new file mode 100644 index 00000000..1354ad32 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/WalImageFile.py @@ -0,0 +1,127 @@ +# +# The Python Imaging Library. +# $Id$ +# +# WAL file handling +# +# History: +# 2003-04-23 fl created +# +# Copyright (c) 2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +""" +This reader is based on the specification available from: +https://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml +and has been tested with a few sample files found using google. + +.. note:: + This format cannot be automatically recognized, so the reader + is not registered for use with :py:func:`PIL.Image.open()`. + To open a WAL file, use the :py:func:`PIL.WalImageFile.open()` function instead. +""" + +from . import Image, ImageFile +from ._binary import i32le as i32 + + +class WalImageFile(ImageFile.ImageFile): + + format = "WAL" + format_description = "Quake2 Texture" + + def _open(self): + self.mode = "P" + + # read header fields + header = self.fp.read(32 + 24 + 32 + 12) + self._size = i32(header, 32), i32(header, 36) + Image._decompression_bomb_check(self.size) + + # load pixel data + offset = i32(header, 40) + self.fp.seek(offset) + + # strings are null-terminated + self.info["name"] = header[:32].split(b"\0", 1)[0] + next_name = header[56 : 56 + 32].split(b"\0", 1)[0] + if next_name: + self.info["next_name"] = next_name + + def load(self): + if self.im: + # Already loaded + return + + self.im = Image.core.new(self.mode, self.size) + self.frombytes(self.fp.read(self.size[0] * self.size[1])) + self.putpalette(quake2palette) + Image.Image.load(self) + + +def open(filename): + """ + Load texture from a Quake2 WAL texture file. + + By default, a Quake2 standard palette is attached to the texture. + To override the palette, use the :py:func:`PIL.Image.Image.putpalette()` method. + + :param filename: WAL file name, or an opened file handle. + :returns: An image instance. + """ + return WalImageFile(filename) + + +quake2palette = ( + # default palette taken from piffo 0.93 by Hans Häggström + b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e" + b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f" + b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c" + b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b" + b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10" + b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07" + b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f" + b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16" + b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d" + b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31" + b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28" + b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07" + b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27" + b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b" + b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01" + b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21" + b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14" + b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07" + b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14" + b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f" + b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34" + b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d" + b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14" + b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01" + b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24" + b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10" + b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01" + b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27" + b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c" + b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a" + b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26" + b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d" + b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01" + b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20" + b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17" + b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07" + b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25" + b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c" + b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01" + b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23" + b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f" + b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b" + b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37" + b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b" + b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01" + b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10" + b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b" + b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20" +) diff --git a/.venv/lib/python3.9/site-packages/PIL/WebPImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/WebPImagePlugin.py new file mode 100644 index 00000000..590161f3 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/WebPImagePlugin.py @@ -0,0 +1,351 @@ +from io import BytesIO + +from . import Image, ImageFile + +try: + from . import _webp + + SUPPORTED = True +except ImportError: + SUPPORTED = False + + +_VALID_WEBP_MODES = {"RGBX": True, "RGBA": True, "RGB": True} + +_VALID_WEBP_LEGACY_MODES = {"RGB": True, "RGBA": True} + +_VP8_MODES_BY_IDENTIFIER = { + b"VP8 ": "RGB", + b"VP8X": "RGBA", + b"VP8L": "RGBA", # lossless +} + + +def _accept(prefix): + is_riff_file_format = prefix[:4] == b"RIFF" + is_webp_file = prefix[8:12] == b"WEBP" + is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER + + if is_riff_file_format and is_webp_file and is_valid_vp8_mode: + if not SUPPORTED: + return ( + "image file could not be identified because WEBP support not installed" + ) + return True + + +class WebPImageFile(ImageFile.ImageFile): + + format = "WEBP" + format_description = "WebP image" + __loaded = 0 + __logical_frame = 0 + + def _open(self): + if not _webp.HAVE_WEBPANIM: + # Legacy mode + data, width, height, self.mode, icc_profile, exif = _webp.WebPDecode( + self.fp.read() + ) + if icc_profile: + self.info["icc_profile"] = icc_profile + if exif: + self.info["exif"] = exif + self._size = width, height + self.fp = BytesIO(data) + self.tile = [("raw", (0, 0) + self.size, 0, self.mode)] + self.n_frames = 1 + self.is_animated = False + return + + # Use the newer AnimDecoder API to parse the (possibly) animated file, + # and access muxed chunks like ICC/EXIF/XMP. + self._decoder = _webp.WebPAnimDecoder(self.fp.read()) + + # Get info from decoder + width, height, loop_count, bgcolor, frame_count, mode = self._decoder.get_info() + self._size = width, height + self.info["loop"] = loop_count + bg_a, bg_r, bg_g, bg_b = ( + (bgcolor >> 24) & 0xFF, + (bgcolor >> 16) & 0xFF, + (bgcolor >> 8) & 0xFF, + bgcolor & 0xFF, + ) + self.info["background"] = (bg_r, bg_g, bg_b, bg_a) + self.n_frames = frame_count + self.is_animated = self.n_frames > 1 + self.mode = "RGB" if mode == "RGBX" else mode + self.rawmode = mode + self.tile = [] + + # Attempt to read ICC / EXIF / XMP chunks from file + icc_profile = self._decoder.get_chunk("ICCP") + exif = self._decoder.get_chunk("EXIF") + xmp = self._decoder.get_chunk("XMP ") + if icc_profile: + self.info["icc_profile"] = icc_profile + if exif: + self.info["exif"] = exif + if xmp: + self.info["xmp"] = xmp + + # Initialize seek state + self._reset(reset=False) + + def _getexif(self): + if "exif" not in self.info: + return None + return self.getexif()._get_merged_dict() + + def seek(self, frame): + if not self._seek_check(frame): + return + + # Set logical frame to requested position + self.__logical_frame = frame + + def _reset(self, reset=True): + if reset: + self._decoder.reset() + self.__physical_frame = 0 + self.__loaded = -1 + self.__timestamp = 0 + + def _get_next(self): + # Get next frame + ret = self._decoder.get_next() + self.__physical_frame += 1 + + # Check if an error occurred + if ret is None: + self._reset() # Reset just to be safe + self.seek(0) + raise EOFError("failed to decode next frame in WebP file") + + # Compute duration + data, timestamp = ret + duration = timestamp - self.__timestamp + self.__timestamp = timestamp + + # libwebp gives frame end, adjust to start of frame + timestamp -= duration + return data, timestamp, duration + + def _seek(self, frame): + if self.__physical_frame == frame: + return # Nothing to do + if frame < self.__physical_frame: + self._reset() # Rewind to beginning + while self.__physical_frame < frame: + self._get_next() # Advance to the requested frame + + def load(self): + if _webp.HAVE_WEBPANIM: + if self.__loaded != self.__logical_frame: + self._seek(self.__logical_frame) + + # We need to load the image data for this frame + data, timestamp, duration = self._get_next() + self.info["timestamp"] = timestamp + self.info["duration"] = duration + self.__loaded = self.__logical_frame + + # Set tile + if self.fp and self._exclusive_fp: + self.fp.close() + self.fp = BytesIO(data) + self.tile = [("raw", (0, 0) + self.size, 0, self.rawmode)] + + return super().load() + + def tell(self): + if not _webp.HAVE_WEBPANIM: + return super().tell() + + return self.__logical_frame + + +def _save_all(im, fp, filename): + encoderinfo = im.encoderinfo.copy() + append_images = list(encoderinfo.get("append_images", [])) + + # If total frame count is 1, then save using the legacy API, which + # will preserve non-alpha modes + total = 0 + for ims in [im] + append_images: + total += getattr(ims, "n_frames", 1) + if total == 1: + _save(im, fp, filename) + return + + background = (0, 0, 0, 0) + if "background" in encoderinfo: + background = encoderinfo["background"] + elif "background" in im.info: + background = im.info["background"] + if isinstance(background, int): + # GifImagePlugin stores a global color table index in + # info["background"]. So it must be converted to an RGBA value + palette = im.getpalette() + if palette: + r, g, b = palette[background * 3 : (background + 1) * 3] + background = (r, g, b, 0) + + duration = im.encoderinfo.get("duration", im.info.get("duration")) + loop = im.encoderinfo.get("loop", 0) + minimize_size = im.encoderinfo.get("minimize_size", False) + kmin = im.encoderinfo.get("kmin", None) + kmax = im.encoderinfo.get("kmax", None) + allow_mixed = im.encoderinfo.get("allow_mixed", False) + verbose = False + lossless = im.encoderinfo.get("lossless", False) + quality = im.encoderinfo.get("quality", 80) + method = im.encoderinfo.get("method", 0) + icc_profile = im.encoderinfo.get("icc_profile") or "" + exif = im.encoderinfo.get("exif", "") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + xmp = im.encoderinfo.get("xmp", "") + if allow_mixed: + lossless = False + + # Sensible keyframe defaults are from gif2webp.c script + if kmin is None: + kmin = 9 if lossless else 3 + if kmax is None: + kmax = 17 if lossless else 5 + + # Validate background color + if ( + not isinstance(background, (list, tuple)) + or len(background) != 4 + or not all(v >= 0 and v < 256 for v in background) + ): + raise OSError( + "Background color is not an RGBA tuple clamped to (0-255): %s" + % str(background) + ) + + # Convert to packed uint + bg_r, bg_g, bg_b, bg_a = background + background = (bg_a << 24) | (bg_r << 16) | (bg_g << 8) | (bg_b << 0) + + # Setup the WebP animation encoder + enc = _webp.WebPAnimEncoder( + im.size[0], + im.size[1], + background, + loop, + minimize_size, + kmin, + kmax, + allow_mixed, + verbose, + ) + + # Add each frame + frame_idx = 0 + timestamp = 0 + cur_idx = im.tell() + try: + for ims in [im] + append_images: + # Get # of frames in this image + nfr = getattr(ims, "n_frames", 1) + + for idx in range(nfr): + ims.seek(idx) + ims.load() + + # Make sure image mode is supported + frame = ims + rawmode = ims.mode + if ims.mode not in _VALID_WEBP_MODES: + alpha = ( + "A" in ims.mode + or "a" in ims.mode + or (ims.mode == "P" and "A" in ims.im.getpalettemode()) + ) + rawmode = "RGBA" if alpha else "RGB" + frame = ims.convert(rawmode) + + if rawmode == "RGB": + # For faster conversion, use RGBX + rawmode = "RGBX" + + # Append the frame to the animation encoder + enc.add( + frame.tobytes("raw", rawmode), + timestamp, + frame.size[0], + frame.size[1], + rawmode, + lossless, + quality, + method, + ) + + # Update timestamp and frame index + if isinstance(duration, (list, tuple)): + timestamp += duration[frame_idx] + else: + timestamp += duration + frame_idx += 1 + + finally: + im.seek(cur_idx) + + # Force encoder to flush frames + enc.add(None, timestamp, 0, 0, "", lossless, quality, 0) + + # Get the final output from the encoder + data = enc.assemble(icc_profile, exif, xmp) + if data is None: + raise OSError("cannot write file as WebP (encoder returned None)") + + fp.write(data) + + +def _save(im, fp, filename): + lossless = im.encoderinfo.get("lossless", False) + quality = im.encoderinfo.get("quality", 80) + icc_profile = im.encoderinfo.get("icc_profile") or "" + exif = im.encoderinfo.get("exif", "") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + xmp = im.encoderinfo.get("xmp", "") + method = im.encoderinfo.get("method", 4) + + if im.mode not in _VALID_WEBP_LEGACY_MODES: + alpha = ( + "A" in im.mode + or "a" in im.mode + or (im.mode == "P" and "transparency" in im.info) + ) + im = im.convert("RGBA" if alpha else "RGB") + + data = _webp.WebPEncode( + im.tobytes(), + im.size[0], + im.size[1], + lossless, + float(quality), + im.mode, + icc_profile, + method, + exif, + xmp, + ) + if data is None: + raise OSError("cannot write file as WebP (encoder returned None)") + + fp.write(data) + + +Image.register_open(WebPImageFile.format, WebPImageFile, _accept) +if SUPPORTED: + Image.register_save(WebPImageFile.format, _save) + if _webp.HAVE_WEBPANIM: + Image.register_save_all(WebPImageFile.format, _save_all) + Image.register_extension(WebPImageFile.format, ".webp") + Image.register_mime(WebPImageFile.format, "image/webp") diff --git a/.venv/lib/python3.9/site-packages/PIL/WmfImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/WmfImagePlugin.py new file mode 100644 index 00000000..27f5d2f8 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/WmfImagePlugin.py @@ -0,0 +1,178 @@ +# +# The Python Imaging Library +# $Id$ +# +# WMF stub codec +# +# history: +# 1996-12-14 fl Created +# 2004-02-22 fl Turned into a stub driver +# 2004-02-23 fl Added EMF support +# +# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +# WMF/EMF reference documentation: +# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf +# http://wvware.sourceforge.net/caolan/index.html +# http://wvware.sourceforge.net/caolan/ora-wmf.html + +from . import Image, ImageFile +from ._binary import i16le as word +from ._binary import i32le as dword +from ._binary import si16le as short +from ._binary import si32le as _long + +_handler = None + + +def register_handler(handler): + """ + Install application-specific WMF image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +if hasattr(Image.core, "drawwmf"): + # install default handler (windows only) + + class WmfHandler: + def open(self, im): + im.mode = "RGB" + self.bbox = im.info["wmf_bbox"] + + def load(self, im): + im.fp.seek(0) # rewind + return Image.frombytes( + "RGB", + im.size, + Image.core.drawwmf(im.fp.read(), im.size, self.bbox), + "raw", + "BGR", + (im.size[0] * 3 + 3) & -4, + -1, + ) + + register_handler(WmfHandler()) + +# +# -------------------------------------------------------------------- +# Read WMF file + + +def _accept(prefix): + return ( + prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or prefix[:4] == b"\x01\x00\x00\x00" + ) + + +## +# Image plugin for Windows metafiles. + + +class WmfStubImageFile(ImageFile.StubImageFile): + + format = "WMF" + format_description = "Windows Metafile" + + def _open(self): + self._inch = None + + # check placable header + s = self.fp.read(80) + + if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00": + + # placeable windows metafile + + # get units per inch + self._inch = word(s, 14) + + # get bounding box + x0 = short(s, 6) + y0 = short(s, 8) + x1 = short(s, 10) + y1 = short(s, 12) + + # normalize size to 72 dots per inch + self.info["dpi"] = 72 + size = ( + (x1 - x0) * self.info["dpi"] // self._inch, + (y1 - y0) * self.info["dpi"] // self._inch, + ) + + self.info["wmf_bbox"] = x0, y0, x1, y1 + + # sanity check (standard metafile header) + if s[22:26] != b"\x01\x00\t\x00": + raise SyntaxError("Unsupported WMF file format") + + elif dword(s) == 1 and s[40:44] == b" EMF": + # enhanced metafile + + # get bounding box + x0 = _long(s, 8) + y0 = _long(s, 12) + x1 = _long(s, 16) + y1 = _long(s, 20) + + # get frame (in 0.01 millimeter units) + frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36) + + size = x1 - x0, y1 - y0 + + # calculate dots per inch from bbox and frame + xdpi = 2540.0 * (x1 - y0) / (frame[2] - frame[0]) + ydpi = 2540.0 * (y1 - y0) / (frame[3] - frame[1]) + + self.info["wmf_bbox"] = x0, y0, x1, y1 + + if xdpi == ydpi: + self.info["dpi"] = xdpi + else: + self.info["dpi"] = xdpi, ydpi + + else: + raise SyntaxError("Unsupported file format") + + self.mode = "RGB" + self._size = size + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + def load(self, dpi=None): + if dpi is not None and self._inch is not None: + self.info["dpi"] = dpi + x0, y0, x1, y1 = self.info["wmf_bbox"] + self._size = ( + (x1 - x0) * self.info["dpi"] // self._inch, + (y1 - y0) * self.info["dpi"] // self._inch, + ) + super().load() + + +def _save(im, fp, filename): + if _handler is None or not hasattr(_handler, "save"): + raise OSError("WMF save handler not installed") + _handler.save(im, fp, filename) + + +# +# -------------------------------------------------------------------- +# Registry stuff + + +Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept) +Image.register_save(WmfStubImageFile.format, _save) + +Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"]) diff --git a/.venv/lib/python3.9/site-packages/PIL/XVThumbImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/XVThumbImagePlugin.py new file mode 100644 index 00000000..4efedb77 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/XVThumbImagePlugin.py @@ -0,0 +1,78 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XV Thumbnail file handler by Charles E. "Gene" Cash +# (gcash@magicnet.net) +# +# see xvcolor.c and xvbrowse.c in the sources to John Bradley's XV, +# available from ftp://ftp.cis.upenn.edu/pub/xv/ +# +# history: +# 98-08-15 cec created (b/w only) +# 98-12-09 cec added color palette +# 98-12-28 fl added to PIL (with only a few very minor modifications) +# +# To do: +# FIXME: make save work (this requires quantization support) +# + +from . import Image, ImageFile, ImagePalette +from ._binary import o8 + +_MAGIC = b"P7 332" + +# standard color palette for thumbnails (RGB332) +PALETTE = b"" +for r in range(8): + for g in range(8): + for b in range(4): + PALETTE = PALETTE + ( + o8((r * 255) // 7) + o8((g * 255) // 7) + o8((b * 255) // 3) + ) + + +def _accept(prefix): + return prefix[:6] == _MAGIC + + +## +# Image plugin for XV thumbnail images. + + +class XVThumbImageFile(ImageFile.ImageFile): + + format = "XVThumb" + format_description = "XV thumbnail image" + + def _open(self): + + # check magic + if not _accept(self.fp.read(6)): + raise SyntaxError("not an XV thumbnail file") + + # Skip to beginning of next line + self.fp.readline() + + # skip info comments + while True: + s = self.fp.readline() + if not s: + raise SyntaxError("Unexpected EOF reading XV thumbnail file") + if s[0] != 35: # ie. when not a comment: '#' + break + + # parse header line (already read) + s = s.strip().split() + + self.mode = "P" + self._size = int(s[0]), int(s[1]) + + self.palette = ImagePalette.raw("RGB", PALETTE) + + self.tile = [("raw", (0, 0) + self.size, self.fp.tell(), (self.mode, 0, 1))] + + +# -------------------------------------------------------------------- + +Image.register_open(XVThumbImageFile.format, XVThumbImageFile, _accept) diff --git a/.venv/lib/python3.9/site-packages/PIL/XbmImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/XbmImagePlugin.py new file mode 100644 index 00000000..644cfb39 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/XbmImagePlugin.py @@ -0,0 +1,94 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XBM File handling +# +# History: +# 1995-09-08 fl Created +# 1996-11-01 fl Added save support +# 1997-07-07 fl Made header parser more tolerant +# 1997-07-22 fl Fixed yet another parser bug +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4) +# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog) +# 2004-02-24 fl Allow some whitespace before first #define +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import re + +from . import Image, ImageFile + +# XBM header +xbm_head = re.compile( + br"\s*#define[ \t]+.*_width[ \t]+(?P[0-9]+)[\r\n]+" + b"#define[ \t]+.*_height[ \t]+(?P[0-9]+)[\r\n]+" + b"(?P" + b"#define[ \t]+[^_]*_x_hot[ \t]+(?P[0-9]+)[\r\n]+" + b"#define[ \t]+[^_]*_y_hot[ \t]+(?P[0-9]+)[\r\n]+" + b")?" + b"[\\000-\\377]*_bits\\[\\]" +) + + +def _accept(prefix): + return prefix.lstrip()[:7] == b"#define" + + +## +# Image plugin for X11 bitmaps. + + +class XbmImageFile(ImageFile.ImageFile): + + format = "XBM" + format_description = "X11 Bitmap" + + def _open(self): + + m = xbm_head.match(self.fp.read(512)) + + if m: + + xsize = int(m.group("width")) + ysize = int(m.group("height")) + + if m.group("hotspot"): + self.info["hotspot"] = (int(m.group("xhot")), int(m.group("yhot"))) + + self.mode = "1" + self._size = xsize, ysize + + self.tile = [("xbm", (0, 0) + self.size, m.end(), None)] + + +def _save(im, fp, filename): + + if im.mode != "1": + raise OSError(f"cannot write mode {im.mode} as XBM") + + fp.write(f"#define im_width {im.size[0]}\n".encode("ascii")) + fp.write(f"#define im_height {im.size[1]}\n".encode("ascii")) + + hotspot = im.encoderinfo.get("hotspot") + if hotspot: + fp.write(f"#define im_x_hot {hotspot[0]}\n".encode("ascii")) + fp.write(f"#define im_y_hot {hotspot[1]}\n".encode("ascii")) + + fp.write(b"static char im_bits[] = {\n") + + ImageFile._save(im, fp, [("xbm", (0, 0) + im.size, 0, None)]) + + fp.write(b"};\n") + + +Image.register_open(XbmImageFile.format, XbmImageFile, _accept) +Image.register_save(XbmImageFile.format, _save) + +Image.register_extension(XbmImageFile.format, ".xbm") + +Image.register_mime(XbmImageFile.format, "image/xbm") diff --git a/.venv/lib/python3.9/site-packages/PIL/XpmImagePlugin.py b/.venv/lib/python3.9/site-packages/PIL/XpmImagePlugin.py new file mode 100644 index 00000000..ebd65ba5 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/XpmImagePlugin.py @@ -0,0 +1,130 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XPM File handling +# +# History: +# 1996-12-29 fl Created +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) +# +# Copyright (c) Secret Labs AB 1997-2001. +# Copyright (c) Fredrik Lundh 1996-2001. +# +# See the README file for information on usage and redistribution. +# + + +import re + +from . import Image, ImageFile, ImagePalette +from ._binary import o8 + +# XPM header +xpm_head = re.compile(b'"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)') + + +def _accept(prefix): + return prefix[:9] == b"/* XPM */" + + +## +# Image plugin for X11 pixel maps. + + +class XpmImageFile(ImageFile.ImageFile): + + format = "XPM" + format_description = "X11 Pixel Map" + + def _open(self): + + if not _accept(self.fp.read(9)): + raise SyntaxError("not an XPM file") + + # skip forward to next string + while True: + s = self.fp.readline() + if not s: + raise SyntaxError("broken XPM file") + m = xpm_head.match(s) + if m: + break + + self._size = int(m.group(1)), int(m.group(2)) + + pal = int(m.group(3)) + bpp = int(m.group(4)) + + if pal > 256 or bpp != 1: + raise ValueError("cannot read this XPM file") + + # + # load palette description + + palette = [b"\0\0\0"] * 256 + + for i in range(pal): + + s = self.fp.readline() + if s[-2:] == b"\r\n": + s = s[:-2] + elif s[-1:] in b"\r\n": + s = s[:-1] + + c = s[1] + s = s[2:-2].split() + + for i in range(0, len(s), 2): + + if s[i] == b"c": + + # process colour key + rgb = s[i + 1] + if rgb == b"None": + self.info["transparency"] = c + elif rgb[0:1] == b"#": + # FIXME: handle colour names (see ImagePalette.py) + rgb = int(rgb[1:], 16) + palette[c] = ( + o8((rgb >> 16) & 255) + o8((rgb >> 8) & 255) + o8(rgb & 255) + ) + else: + # unknown colour + raise ValueError("cannot read this XPM file") + break + + else: + + # missing colour key + raise ValueError("cannot read this XPM file") + + self.mode = "P" + self.palette = ImagePalette.raw("RGB", b"".join(palette)) + + self.tile = [("raw", (0, 0) + self.size, self.fp.tell(), ("P", 0, 1))] + + def load_read(self, bytes): + + # + # load all image data in one chunk + + xsize, ysize = self.size + + s = [None] * ysize + + for i in range(ysize): + s[i] = self.fp.readline()[1 : xsize + 1].ljust(xsize) + + return b"".join(s) + + +# +# Registry + + +Image.register_open(XpmImageFile.format, XpmImageFile, _accept) + +Image.register_extension(XpmImageFile.format, ".xpm") + +Image.register_mime(XpmImageFile.format, "image/xpm") diff --git a/.venv/lib/python3.9/site-packages/PIL/__init__.py b/.venv/lib/python3.9/site-packages/PIL/__init__.py new file mode 100644 index 00000000..890ae44f --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/__init__.py @@ -0,0 +1,139 @@ +"""Pillow (Fork of the Python Imaging Library) + +Pillow is the friendly PIL fork by Alex Clark and Contributors. + https://github.com/python-pillow/Pillow/ + +Pillow is forked from PIL 1.1.7. + +PIL is the Python Imaging Library by Fredrik Lundh and Contributors. +Copyright (c) 1999 by Secret Labs AB. + +Use PIL.__version__ for this Pillow version. + +;-) +""" + +import sys +import warnings + +from . import _version + +# VERSION was removed in Pillow 6.0.0. +__version__ = _version.__version__ + + +# PILLOW_VERSION is deprecated and will be removed in a future release. +# Use __version__ instead. +def _raise_version_warning(): + warnings.warn( + "PILLOW_VERSION is deprecated and will be removed in Pillow 9 (2022-01-02). " + "Use __version__ instead.", + DeprecationWarning, + stacklevel=3, + ) + + +if sys.version_info >= (3, 7): + + def __getattr__(name): + if name == "PILLOW_VERSION": + _raise_version_warning() + return __version__ + raise AttributeError(f"module '{__name__}' has no attribute '{name}'") + + +else: + + class _Deprecated_Version(str): + def __str__(self): + _raise_version_warning() + return super().__str__() + + def __getitem__(self, key): + _raise_version_warning() + return super().__getitem__(key) + + def __eq__(self, other): + _raise_version_warning() + return super().__eq__(other) + + def __ne__(self, other): + _raise_version_warning() + return super().__ne__(other) + + def __gt__(self, other): + _raise_version_warning() + return super().__gt__(other) + + def __lt__(self, other): + _raise_version_warning() + return super().__lt__(other) + + def __ge__(self, other): + _raise_version_warning() + return super().__gt__(other) + + def __le__(self, other): + _raise_version_warning() + return super().__lt__(other) + + PILLOW_VERSION = _Deprecated_Version(__version__) + +del _version + + +_plugins = [ + "BlpImagePlugin", + "BmpImagePlugin", + "BufrStubImagePlugin", + "CurImagePlugin", + "DcxImagePlugin", + "DdsImagePlugin", + "EpsImagePlugin", + "FitsStubImagePlugin", + "FliImagePlugin", + "FpxImagePlugin", + "FtexImagePlugin", + "GbrImagePlugin", + "GifImagePlugin", + "GribStubImagePlugin", + "Hdf5StubImagePlugin", + "IcnsImagePlugin", + "IcoImagePlugin", + "ImImagePlugin", + "ImtImagePlugin", + "IptcImagePlugin", + "JpegImagePlugin", + "Jpeg2KImagePlugin", + "McIdasImagePlugin", + "MicImagePlugin", + "MpegImagePlugin", + "MpoImagePlugin", + "MspImagePlugin", + "PalmImagePlugin", + "PcdImagePlugin", + "PcxImagePlugin", + "PdfImagePlugin", + "PixarImagePlugin", + "PngImagePlugin", + "PpmImagePlugin", + "PsdImagePlugin", + "SgiImagePlugin", + "SpiderImagePlugin", + "SunImagePlugin", + "TgaImagePlugin", + "TiffImagePlugin", + "WebPImagePlugin", + "WmfImagePlugin", + "XbmImagePlugin", + "XpmImagePlugin", + "XVThumbImagePlugin", +] + + +class UnidentifiedImageError(OSError): + """ + Raised in :py:meth:`PIL.Image.open` if an image cannot be opened and identified. + """ + + pass diff --git a/.venv/lib/python3.9/site-packages/PIL/__main__.py b/.venv/lib/python3.9/site-packages/PIL/__main__.py new file mode 100644 index 00000000..a05323f9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/__main__.py @@ -0,0 +1,3 @@ +from .features import pilinfo + +pilinfo() diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/BdfFontFile.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/BdfFontFile.cpython-39.pyc new file mode 100644 index 00000000..16db09b3 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/BdfFontFile.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/BlpImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/BlpImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..82965858 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/BlpImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/BmpImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/BmpImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..b6aa5060 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/BmpImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/BufrStubImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/BufrStubImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..fa38bab4 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/BufrStubImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ContainerIO.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ContainerIO.cpython-39.pyc new file mode 100644 index 00000000..f515f02c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ContainerIO.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/CurImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/CurImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..86d87fe1 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/CurImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/DcxImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/DcxImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..0592341b Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/DcxImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/DdsImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/DdsImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..50a16382 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/DdsImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/EpsImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/EpsImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..6db4df55 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/EpsImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ExifTags.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ExifTags.cpython-39.pyc new file mode 100644 index 00000000..2b32248a Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ExifTags.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/FitsStubImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FitsStubImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..269436f4 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FitsStubImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/FliImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FliImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..64df2000 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FliImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/FontFile.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FontFile.cpython-39.pyc new file mode 100644 index 00000000..063aa750 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FontFile.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/FpxImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FpxImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..5905f353 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FpxImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/FtexImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FtexImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..295cbf8e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/FtexImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/GbrImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GbrImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..b8afda98 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GbrImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/GdImageFile.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GdImageFile.cpython-39.pyc new file mode 100644 index 00000000..1ce02c59 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GdImageFile.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/GifImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GifImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..eef81bed Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GifImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/GimpGradientFile.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GimpGradientFile.cpython-39.pyc new file mode 100644 index 00000000..29207c93 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GimpGradientFile.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/GimpPaletteFile.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GimpPaletteFile.cpython-39.pyc new file mode 100644 index 00000000..ba440feb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GimpPaletteFile.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/GribStubImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GribStubImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..480ba42a Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/GribStubImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/Hdf5StubImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/Hdf5StubImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..0aec9518 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/Hdf5StubImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/IcnsImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/IcnsImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..966a3306 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/IcnsImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/IcoImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/IcoImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..dd87fdfc Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/IcoImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..0f07dcbd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/Image.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/Image.cpython-39.pyc new file mode 100644 index 00000000..63fc10cd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/Image.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageChops.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageChops.cpython-39.pyc new file mode 100644 index 00000000..405003c8 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageChops.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageCms.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageCms.cpython-39.pyc new file mode 100644 index 00000000..846a6107 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageCms.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageColor.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageColor.cpython-39.pyc new file mode 100644 index 00000000..f7880f52 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageColor.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageDraw.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageDraw.cpython-39.pyc new file mode 100644 index 00000000..f0007dd1 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageDraw.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageDraw2.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageDraw2.cpython-39.pyc new file mode 100644 index 00000000..707e48a9 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageDraw2.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageEnhance.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageEnhance.cpython-39.pyc new file mode 100644 index 00000000..661c92dd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageEnhance.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageFile.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageFile.cpython-39.pyc new file mode 100644 index 00000000..f047e05f Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageFile.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageFilter.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageFilter.cpython-39.pyc new file mode 100644 index 00000000..13e7fcd7 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageFilter.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageFont.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageFont.cpython-39.pyc new file mode 100644 index 00000000..f6c1e2a3 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageFont.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageGrab.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageGrab.cpython-39.pyc new file mode 100644 index 00000000..ec610bac Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageGrab.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageMath.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageMath.cpython-39.pyc new file mode 100644 index 00000000..0ff61fb4 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageMath.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageMode.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageMode.cpython-39.pyc new file mode 100644 index 00000000..b51d2945 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageMode.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageMorph.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageMorph.cpython-39.pyc new file mode 100644 index 00000000..2058a444 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageMorph.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageOps.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageOps.cpython-39.pyc new file mode 100644 index 00000000..61666b24 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageOps.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImagePalette.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImagePalette.cpython-39.pyc new file mode 100644 index 00000000..3376b9cf Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImagePalette.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImagePath.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImagePath.cpython-39.pyc new file mode 100644 index 00000000..aaa8d557 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImagePath.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageQt.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageQt.cpython-39.pyc new file mode 100644 index 00000000..c2c27236 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageQt.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageSequence.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageSequence.cpython-39.pyc new file mode 100644 index 00000000..b4b83b33 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageSequence.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageShow.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageShow.cpython-39.pyc new file mode 100644 index 00000000..25c4e386 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageShow.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageStat.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageStat.cpython-39.pyc new file mode 100644 index 00000000..c6d59f99 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageStat.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageTk.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageTk.cpython-39.pyc new file mode 100644 index 00000000..94363045 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageTk.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageTransform.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageTransform.cpython-39.pyc new file mode 100644 index 00000000..f5781b29 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageTransform.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageWin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageWin.cpython-39.pyc new file mode 100644 index 00000000..3bae201b Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImageWin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImtImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImtImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..30ea87fd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/ImtImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/IptcImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/IptcImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..0e5f0353 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/IptcImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/Jpeg2KImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/Jpeg2KImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..b58e37f1 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/Jpeg2KImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/JpegImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/JpegImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..3658c6b6 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/JpegImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/JpegPresets.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/JpegPresets.cpython-39.pyc new file mode 100644 index 00000000..9d1bebbe Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/JpegPresets.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/McIdasImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/McIdasImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..006b6959 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/McIdasImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/MicImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/MicImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..2e0a844f Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/MicImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/MpegImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/MpegImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..5abca8ff Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/MpegImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/MpoImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/MpoImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..bc9592cb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/MpoImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/MspImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/MspImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..f755b52b Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/MspImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PSDraw.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PSDraw.cpython-39.pyc new file mode 100644 index 00000000..8df3eecb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PSDraw.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PaletteFile.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PaletteFile.cpython-39.pyc new file mode 100644 index 00000000..275ce26a Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PaletteFile.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PalmImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PalmImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..cceae806 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PalmImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PcdImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PcdImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..ceb3bbe5 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PcdImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PcfFontFile.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PcfFontFile.cpython-39.pyc new file mode 100644 index 00000000..601f8076 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PcfFontFile.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PcxImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PcxImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..8d70e1c9 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PcxImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PdfImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PdfImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..5dc1839f Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PdfImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PdfParser.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PdfParser.cpython-39.pyc new file mode 100644 index 00000000..41736c2c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PdfParser.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PixarImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PixarImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..6fba0a70 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PixarImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PngImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PngImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..86e5d02f Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PngImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PpmImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PpmImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..1c06b776 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PpmImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PsdImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PsdImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..8c0dff9d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PsdImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/PyAccess.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PyAccess.cpython-39.pyc new file mode 100644 index 00000000..eb006787 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/PyAccess.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/SgiImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/SgiImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..2cb7ae76 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/SgiImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/SpiderImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/SpiderImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..717cd279 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/SpiderImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/SunImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/SunImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..43e1a7db Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/SunImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/TarIO.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/TarIO.cpython-39.pyc new file mode 100644 index 00000000..2fd0cb05 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/TarIO.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/TgaImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/TgaImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..04c45790 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/TgaImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/TiffImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/TiffImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..bd4d4d9a Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/TiffImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/TiffTags.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/TiffTags.cpython-39.pyc new file mode 100644 index 00000000..45a24cbb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/TiffTags.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/WalImageFile.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/WalImageFile.cpython-39.pyc new file mode 100644 index 00000000..6c4846de Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/WalImageFile.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/WebPImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/WebPImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..5cd20a9b Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/WebPImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/WmfImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/WmfImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..4bc29c80 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/WmfImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/XVThumbImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/XVThumbImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..d5af023d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/XVThumbImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/XbmImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/XbmImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..73d9a67e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/XbmImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/XpmImagePlugin.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/XpmImagePlugin.cpython-39.pyc new file mode 100644 index 00000000..e9828245 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/XpmImagePlugin.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..fd9b20e1 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/__main__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/__main__.cpython-39.pyc new file mode 100644 index 00000000..d74f3524 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/__main__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/_binary.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/_binary.cpython-39.pyc new file mode 100644 index 00000000..3753e4e9 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/_binary.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/_tkinter_finder.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/_tkinter_finder.cpython-39.pyc new file mode 100644 index 00000000..24aa3a82 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/_tkinter_finder.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/_util.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/_util.cpython-39.pyc new file mode 100644 index 00000000..2f37e74a Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/_util.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/_version.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/_version.cpython-39.pyc new file mode 100644 index 00000000..9b05a2f2 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/_version.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/__pycache__/features.cpython-39.pyc b/.venv/lib/python3.9/site-packages/PIL/__pycache__/features.cpython-39.pyc new file mode 100644 index 00000000..c1e95d7e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/__pycache__/features.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/PIL/_binary.py b/.venv/lib/python3.9/site-packages/PIL/_binary.py new file mode 100644 index 00000000..a74ee9eb --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/_binary.py @@ -0,0 +1,102 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Binary input/output support routines. +# +# Copyright (c) 1997-2003 by Secret Labs AB +# Copyright (c) 1995-2003 by Fredrik Lundh +# Copyright (c) 2012 by Brian Crowell +# +# See the README file for information on usage and redistribution. +# + + +"""Binary input/output support routines.""" + + +from struct import pack, unpack_from + + +def i8(c): + return c if c.__class__ is int else c[0] + + +def o8(i): + return bytes((i & 255,)) + + +# Input, le = little endian, be = big endian +def i16le(c, o=0): + """ + Converts a 2-bytes (16 bits) string to an unsigned integer. + + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string + """ + return unpack_from("h", c, o)[0] + + +def i32le(c, o=0): + """ + Converts a 4-bytes (32 bits) string to an unsigned integer. + + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string + """ + return unpack_from("H", c, o)[0] + + +def i32be(c, o=0): + return unpack_from(">I", c, o)[0] + + +# Output, le = little endian, be = big endian +def o16le(i): + return pack("H", i) + + +def o32be(i): + return pack(">I", i) diff --git a/.venv/lib/python3.9/site-packages/PIL/_imaging.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/PIL/_imaging.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..542623e3 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/_imaging.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/PIL/_imagingcms.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/PIL/_imagingcms.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..3e9888cb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/_imagingcms.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/PIL/_imagingft.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/PIL/_imagingft.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..6997ae16 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/_imagingft.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/PIL/_imagingmath.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/PIL/_imagingmath.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..07ac2b32 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/_imagingmath.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/PIL/_imagingmorph.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/PIL/_imagingmorph.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..e5218593 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/_imagingmorph.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/PIL/_imagingtk.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/PIL/_imagingtk.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..8a943efe Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/_imagingtk.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/PIL/_tkinter_finder.py b/.venv/lib/python3.9/site-packages/PIL/_tkinter_finder.py new file mode 100644 index 00000000..58aeffbf --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/_tkinter_finder.py @@ -0,0 +1,20 @@ +""" Find compiled module linking to Tcl / Tk libraries +""" +import sys +import tkinter +import warnings +from tkinter import _tkinter as tk + +if hasattr(sys, "pypy_find_executable"): + TKINTER_LIB = tk.tklib_cffi.__file__ +else: + TKINTER_LIB = tk.__file__ + +tk_version = str(tkinter.TkVersion) +if tk_version == "8.4": + warnings.warn( + "Support for Tk/Tcl 8.4 is deprecated and will be removed" + " in Pillow 10 (2023-01-02). Please upgrade to Tk/Tcl 8.5 " + "or newer.", + DeprecationWarning, + ) diff --git a/.venv/lib/python3.9/site-packages/PIL/_util.py b/.venv/lib/python3.9/site-packages/PIL/_util.py new file mode 100644 index 00000000..0c5d3892 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/_util.py @@ -0,0 +1,19 @@ +import os +from pathlib import Path + + +def isPath(f): + return isinstance(f, (bytes, str, Path)) + + +# Checks if an object is a string, and that it points to a directory. +def isDirectory(f): + return isPath(f) and os.path.isdir(f) + + +class deferred_error: + def __init__(self, ex): + self.ex = ex + + def __getattr__(self, elt): + raise self.ex diff --git a/.venv/lib/python3.9/site-packages/PIL/_version.py b/.venv/lib/python3.9/site-packages/PIL/_version.py new file mode 100644 index 00000000..d7f0f7ea --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/_version.py @@ -0,0 +1,2 @@ +# Master version for Pillow +__version__ = "8.4.0" diff --git a/.venv/lib/python3.9/site-packages/PIL/_webp.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/PIL/_webp.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..4ed812f0 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/PIL/_webp.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/PIL/features.py b/.venv/lib/python3.9/site-packages/PIL/features.py new file mode 100644 index 00000000..3838568f --- /dev/null +++ b/.venv/lib/python3.9/site-packages/PIL/features.py @@ -0,0 +1,320 @@ +import collections +import os +import sys +import warnings + +import PIL + +from . import Image + +modules = { + "pil": ("PIL._imaging", "PILLOW_VERSION"), + "tkinter": ("PIL._tkinter_finder", "tk_version"), + "freetype2": ("PIL._imagingft", "freetype2_version"), + "littlecms2": ("PIL._imagingcms", "littlecms_version"), + "webp": ("PIL._webp", "webpdecoder_version"), +} + + +def check_module(feature): + """ + Checks if a module is available. + + :param feature: The module to check for. + :returns: ``True`` if available, ``False`` otherwise. + :raises ValueError: If the module is not defined in this version of Pillow. + """ + if not (feature in modules): + raise ValueError(f"Unknown module {feature}") + + module, ver = modules[feature] + + try: + __import__(module) + return True + except ImportError: + return False + + +def version_module(feature): + """ + :param feature: The module to check for. + :returns: + The loaded version number as a string, or ``None`` if unknown or not available. + :raises ValueError: If the module is not defined in this version of Pillow. + """ + if not check_module(feature): + return None + + module, ver = modules[feature] + + if ver is None: + return None + + return getattr(__import__(module, fromlist=[ver]), ver) + + +def get_supported_modules(): + """ + :returns: A list of all supported modules. + """ + return [f for f in modules if check_module(f)] + + +codecs = { + "jpg": ("jpeg", "jpeglib"), + "jpg_2000": ("jpeg2k", "jp2klib"), + "zlib": ("zip", "zlib"), + "libtiff": ("libtiff", "libtiff"), +} + + +def check_codec(feature): + """ + Checks if a codec is available. + + :param feature: The codec to check for. + :returns: ``True`` if available, ``False`` otherwise. + :raises ValueError: If the codec is not defined in this version of Pillow. + """ + if feature not in codecs: + raise ValueError(f"Unknown codec {feature}") + + codec, lib = codecs[feature] + + return codec + "_encoder" in dir(Image.core) + + +def version_codec(feature): + """ + :param feature: The codec to check for. + :returns: + The version number as a string, or ``None`` if not available. + Checked at compile time for ``jpg``, run-time otherwise. + :raises ValueError: If the codec is not defined in this version of Pillow. + """ + if not check_codec(feature): + return None + + codec, lib = codecs[feature] + + version = getattr(Image.core, lib + "_version") + + if feature == "libtiff": + return version.split("\n")[0].split("Version ")[1] + + return version + + +def get_supported_codecs(): + """ + :returns: A list of all supported codecs. + """ + return [f for f in codecs if check_codec(f)] + + +features = { + "webp_anim": ("PIL._webp", "HAVE_WEBPANIM", None), + "webp_mux": ("PIL._webp", "HAVE_WEBPMUX", None), + "transp_webp": ("PIL._webp", "HAVE_TRANSPARENCY", None), + "raqm": ("PIL._imagingft", "HAVE_RAQM", "raqm_version"), + "fribidi": ("PIL._imagingft", "HAVE_FRIBIDI", "fribidi_version"), + "harfbuzz": ("PIL._imagingft", "HAVE_HARFBUZZ", "harfbuzz_version"), + "libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO", "libjpeg_turbo_version"), + "libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT", "imagequant_version"), + "xcb": ("PIL._imaging", "HAVE_XCB", None), +} + + +def check_feature(feature): + """ + Checks if a feature is available. + + :param feature: The feature to check for. + :returns: ``True`` if available, ``False`` if unavailable, ``None`` if unknown. + :raises ValueError: If the feature is not defined in this version of Pillow. + """ + if feature not in features: + raise ValueError(f"Unknown feature {feature}") + + module, flag, ver = features[feature] + + try: + imported_module = __import__(module, fromlist=["PIL"]) + return getattr(imported_module, flag) + except ImportError: + return None + + +def version_feature(feature): + """ + :param feature: The feature to check for. + :returns: The version number as a string, or ``None`` if not available. + :raises ValueError: If the feature is not defined in this version of Pillow. + """ + if not check_feature(feature): + return None + + module, flag, ver = features[feature] + + if ver is None: + return None + + return getattr(__import__(module, fromlist=[ver]), ver) + + +def get_supported_features(): + """ + :returns: A list of all supported features. + """ + return [f for f in features if check_feature(f)] + + +def check(feature): + """ + :param feature: A module, codec, or feature name. + :returns: + ``True`` if the module, codec, or feature is available, + ``False`` or ``None`` otherwise. + """ + + if feature in modules: + return check_module(feature) + if feature in codecs: + return check_codec(feature) + if feature in features: + return check_feature(feature) + warnings.warn(f"Unknown feature '{feature}'.", stacklevel=2) + return False + + +def version(feature): + """ + :param feature: + The module, codec, or feature to check for. + :returns: + The version number as a string, or ``None`` if unknown or not available. + """ + if feature in modules: + return version_module(feature) + if feature in codecs: + return version_codec(feature) + if feature in features: + return version_feature(feature) + return None + + +def get_supported(): + """ + :returns: A list of all supported modules, features, and codecs. + """ + + ret = get_supported_modules() + ret.extend(get_supported_features()) + ret.extend(get_supported_codecs()) + return ret + + +def pilinfo(out=None, supported_formats=True): + """ + Prints information about this installation of Pillow. + This function can be called with ``python3 -m PIL``. + + :param out: + The output stream to print to. Defaults to ``sys.stdout`` if ``None``. + :param supported_formats: + If ``True``, a list of all supported image file formats will be printed. + """ + + if out is None: + out = sys.stdout + + Image.init() + + print("-" * 68, file=out) + print(f"Pillow {PIL.__version__}", file=out) + py_version = sys.version.splitlines() + print(f"Python {py_version[0].strip()}", file=out) + for py_version in py_version[1:]: + print(f" {py_version.strip()}", file=out) + print("-" * 68, file=out) + print( + f"Python modules loaded from {os.path.dirname(Image.__file__)}", + file=out, + ) + print( + f"Binary modules loaded from {os.path.dirname(Image.core.__file__)}", + file=out, + ) + print("-" * 68, file=out) + + for name, feature in [ + ("pil", "PIL CORE"), + ("tkinter", "TKINTER"), + ("freetype2", "FREETYPE2"), + ("littlecms2", "LITTLECMS2"), + ("webp", "WEBP"), + ("transp_webp", "WEBP Transparency"), + ("webp_mux", "WEBPMUX"), + ("webp_anim", "WEBP Animation"), + ("jpg", "JPEG"), + ("jpg_2000", "OPENJPEG (JPEG2000)"), + ("zlib", "ZLIB (PNG/ZIP)"), + ("libtiff", "LIBTIFF"), + ("raqm", "RAQM (Bidirectional Text)"), + ("libimagequant", "LIBIMAGEQUANT (Quantization method)"), + ("xcb", "XCB (X protocol)"), + ]: + if check(name): + if name == "jpg" and check_feature("libjpeg_turbo"): + v = "libjpeg-turbo " + version_feature("libjpeg_turbo") + else: + v = version(name) + if v is not None: + version_static = name in ("pil", "jpg") + if name == "littlecms2": + # this check is also in src/_imagingcms.c:setup_module() + version_static = tuple(int(x) for x in v.split(".")) < (2, 7) + t = "compiled for" if version_static else "loaded" + if name == "raqm": + for f in ("fribidi", "harfbuzz"): + v2 = version_feature(f) + if v2 is not None: + v += f", {f} {v2}" + print("---", feature, "support ok,", t, v, file=out) + else: + print("---", feature, "support ok", file=out) + else: + print("***", feature, "support not installed", file=out) + print("-" * 68, file=out) + + if supported_formats: + extensions = collections.defaultdict(list) + for ext, i in Image.EXTENSION.items(): + extensions[i].append(ext) + + for i in sorted(Image.ID): + line = f"{i}" + if i in Image.MIME: + line = f"{line} {Image.MIME[i]}" + print(line, file=out) + + if i in extensions: + print( + "Extensions: {}".format(", ".join(sorted(extensions[i]))), file=out + ) + + features = [] + if i in Image.OPEN: + features.append("open") + if i in Image.SAVE: + features.append("save") + if i in Image.SAVE_ALL: + features.append("save_all") + if i in Image.DECODERS: + features.append("decode") + if i in Image.ENCODERS: + features.append("encode") + + print("Features: {}".format(", ".join(features)), file=out) + print("-" * 68, file=out) diff --git a/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/INSTALLER b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/LICENSE b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/LICENSE new file mode 100644 index 00000000..bbae200a --- /dev/null +++ b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/LICENSE @@ -0,0 +1,636 @@ +The Python Imaging Library (PIL) is + + Copyright © 1997-2011 by Secret Labs AB + Copyright © 1995-2011 by Fredrik Lundh + +Pillow is the friendly PIL fork. It is + + Copyright © 2010-2021 by Alex Clark and contributors + +Like PIL, Pillow is licensed under the open source HPND License: + +By obtaining, using, and/or copying this software and/or its associated +documentation, you agree that you have read, understood, and will comply +with the following terms and conditions: + +Permission to use, copy, modify, and distribute this software and its +associated documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appears in all copies, and that +both that copyright notice and this permission notice appear in supporting +documentation, and that the name of Secret Labs AB or the author not be +used in advertising or publicity pertaining to distribution of the software +without specific, written prior permission. + +SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS +SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. +IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL, +INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + + +---- + +FREETYPE2 + +The FreeType 2 font engine is copyrighted work and cannot be used +legally without a software license. In order to make this project +usable to a vast majority of developers, we distribute it under two +mutually exclusive open-source licenses. + +This means that *you* must choose *one* of the two licenses described +below, then obey all its terms and conditions when using FreeType 2 in +any of your projects or products. + + - The FreeType License, found in the file `FTL.TXT', which is similar + to the original BSD license *with* an advertising clause that forces + you to explicitly cite the FreeType project in your product's + documentation. All details are in the license file. This license + is suited to products which don't use the GNU General Public + License. + + Note that this license is compatible to the GNU General Public + License version 3, but not version 2. + + - The GNU General Public License version 2, found in `GPLv2.TXT' (any + later version can be used also), for programs which already use the + GPL. Note that the FTL is incompatible with GPLv2 due to its + advertisement clause. + +The contributed BDF and PCF drivers come with a license similar to that +of the X Window System. It is compatible to the above two licenses (see +file src/bdf/README and src/pcf/README). The same holds for the files +`fthash.c' and `fthash.h'; their code was part of the BDF driver in +earlier FreeType versions. + +The gzip module uses the zlib license (see src/gzip/zlib.h) which too is +compatible to the above two licenses. + +The MD5 checksum support (only used for debugging in development builds) +is in the public domain. + +---- + +HARFBUZZ + +HarfBuzz is licensed under the so-called "Old MIT" license. Details follow. +For parts of HarfBuzz that are licensed under different licenses see individual +files names COPYING in subdirectories where applicable. + +Copyright © 2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020 Google, Inc. +Copyright © 2018,2019,2020 Ebrahim Byagowi +Copyright © 2019,2020 Facebook, Inc. +Copyright © 2012 Mozilla Foundation +Copyright © 2011 Codethink Limited +Copyright © 2008,2010 Nokia Corporation and/or its subsidiary(-ies) +Copyright © 2009 Keith Stribley +Copyright © 2009 Martin Hosken and SIL International +Copyright © 2007 Chris Wilson +Copyright © 2006 Behdad Esfahbod +Copyright © 2005 David Turner +Copyright © 2004,2007,2008,2009,2010 Red Hat, Inc. +Copyright © 1998-2004 David Turner and Werner Lemberg + +For full copyright notices consult the individual files in the package. + + +Permission is hereby granted, without written agreement and without +license or royalty fees, to use, copy, modify, and distribute this +software and its documentation for any purpose, provided that the +above copyright notice and the following two paragraphs appear in +all copies of this software. + +IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR +DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN +IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, +BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS +ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO +PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + + +---- + +LCMS2 + +Little CMS +Copyright (c) 1998-2020 Marti Maria Saguer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +---- + +LIBJPEG + +1. We don't promise that this software works. (But if you find any bugs, + please let us know!) +2. You can use this software for whatever you want. You don't have to pay us. +3. You may not pretend that you wrote this software. If you use it in a + program, you must acknowledge somewhere in your documentation that + you've used the IJG code. + +In legalese: + +The authors make NO WARRANTY or representation, either express or implied, +with respect to this software, its quality, accuracy, merchantability, or +fitness for a particular purpose. This software is provided "AS IS", and you, +its user, assume the entire risk as to its quality and accuracy. + +This software is copyright (C) 1991-2020, Thomas G. Lane, Guido Vollbeding. +All Rights Reserved except as specified below. + +Permission is hereby granted to use, copy, modify, and distribute this +software (or portions thereof) for any purpose, without fee, subject to these +conditions: +(1) If any part of the source code for this software is distributed, then this +README file must be included, with this copyright and no-warranty notice +unaltered; and any additions, deletions, or changes to the original files +must be clearly indicated in accompanying documentation. +(2) If only executable code is distributed, then the accompanying +documentation must state that "this software is based in part on the work of +the Independent JPEG Group". +(3) Permission for use of this software is granted only if the user accepts +full responsibility for any undesirable consequences; the authors accept +NO LIABILITY for damages of any kind. + +These conditions apply to any software derived from or based on the IJG code, +not just to the unmodified library. If you use our work, you ought to +acknowledge us. + +Permission is NOT granted for the use of any IJG author's name or company name +in advertising or publicity relating to this software or products derived from +it. This software may be referred to only as "the Independent JPEG Group's +software". + +We specifically permit and encourage the use of this software as the basis of +commercial products, provided that all warranty or liability claims are +assumed by the product vendor. + +---- + +LIBLZMA + +XZ Utils Licensing +================== + + Different licenses apply to different files in this package. Here + is a rough summary of which licenses apply to which parts of this + package (but check the individual files to be sure!): + + - liblzma is in the public domain. + + - xz, xzdec, and lzmadec command line tools are in the public + domain unless GNU getopt_long had to be compiled and linked + in from the lib directory. The getopt_long code is under + GNU LGPLv2.1+. + + - The scripts to grep, diff, and view compressed files have been + adapted from gzip. These scripts and their documentation are + under GNU GPLv2+. + + - All the documentation in the doc directory and most of the + XZ Utils specific documentation files in other directories + are in the public domain. + + - Translated messages are in the public domain. + + - The build system contains public domain files, and files that + are under GNU GPLv2+ or GNU GPLv3+. None of these files end up + in the binaries being built. + + - Test files and test code in the tests directory, and debugging + utilities in the debug directory are in the public domain. + + - The extra directory may contain public domain files, and files + that are under various free software licenses. + + You can do whatever you want with the files that have been put into + the public domain. If you find public domain legally problematic, + take the previous sentence as a license grant. If you still find + the lack of copyright legally problematic, you have too many + lawyers. + + As usual, this software is provided "as is", without any warranty. + + If you copy significant amounts of public domain code from XZ Utils + into your project, acknowledging this somewhere in your software is + polite (especially if it is proprietary, non-free software), but + naturally it is not legally required. Here is an example of a good + notice to put into "about box" or into documentation: + + This software includes code from XZ Utils . + + The following license texts are included in the following files: + - COPYING.LGPLv2.1: GNU Lesser General Public License version 2.1 + - COPYING.GPLv2: GNU General Public License version 2 + - COPYING.GPLv3: GNU General Public License version 3 + + Note that the toolchain (compiler, linker etc.) may add some code + pieces that are copyrighted. Thus, it is possible that e.g. liblzma + binary wouldn't actually be in the public domain in its entirety + even though it contains no copyrighted code from the XZ Utils source + package. + + If you have questions, don't hesitate to ask the author(s) for more + information. + +---- + +LIBTIFF + +Copyright (c) 1988-1997 Sam Leffler +Copyright (c) 1991-1997 Silicon Graphics, Inc. + +Permission to use, copy, modify, distribute, and sell this software and +its documentation for any purpose is hereby granted without fee, provided +that (i) the above copyright notices and this permission notice appear in +all copies of the software and related documentation, and (ii) the names of +Sam Leffler and Silicon Graphics may not be used in any advertising or +publicity relating to the software without the specific, prior written +permission of Sam Leffler and Silicon Graphics. + +THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, +EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY +WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + +IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR +ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, +OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF +LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +OF THIS SOFTWARE. + +---- + +LIBWEBP + +Copyright (c) 2010, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + * Neither the name of Google nor the names of its contributors may + be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + +OPENJPEG + +* + * The copyright in this software is being made available under the 2-clauses + * BSD License, included below. This software may be subject to other third + * party and contributor rights, including patent rights, and no such rights + * are granted under this license. + * + * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium + * Copyright (c) 2002-2014, Professor Benoit Macq + * Copyright (c) 2003-2014, Antonin Descampe + * Copyright (c) 2003-2009, Francois-Olivier Devaux + * Copyright (c) 2005, Herve Drolon, FreeImage Team + * Copyright (c) 2002-2003, Yannick Verschueren + * Copyright (c) 2001-2003, David Janssens + * Copyright (c) 2011-2012, Centre National d'Etudes Spatiales (CNES), France + * Copyright (c) 2012, CS Systemes d'Information, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +--- + +COPYRIGHT NOTICE, DISCLAIMER, and LICENSE +========================================= + +PNG Reference Library License version 2 +--------------------------------------- + + * Copyright (c) 1995-2019 The PNG Reference Library Authors. + * Copyright (c) 2018-2019 Cosmin Truta. + * Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson. + * Copyright (c) 1996-1997 Andreas Dilger. + * Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc. + +The software is supplied "as is", without warranty of any kind, +express or implied, including, without limitation, the warranties +of merchantability, fitness for a particular purpose, title, and +non-infringement. In no event shall the Copyright owners, or +anyone distributing the software, be liable for any damages or +other liability, whether in contract, tort or otherwise, arising +from, out of, or in connection with the software, or the use or +other dealings in the software, even if advised of the possibility +of such damage. + +Permission is hereby granted to use, copy, modify, and distribute +this software, or portions hereof, for any purpose, without fee, +subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you + use this software in a product, an acknowledgment in the product + documentation would be appreciated, but is not required. + + 2. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + + 3. This Copyright notice may not be removed or altered from any + source or altered source distribution. + + +PNG Reference Library License version 1 (for libpng 0.5 through 1.6.35) +----------------------------------------------------------------------- + +libpng versions 1.0.7, July 1, 2000, through 1.6.35, July 15, 2018 are +Copyright (c) 2000-2002, 2004, 2006-2018 Glenn Randers-Pehrson, are +derived from libpng-1.0.6, and are distributed according to the same +disclaimer and license as libpng-1.0.6 with the following individuals +added to the list of Contributing Authors: + + Simon-Pierre Cadieux + Eric S. Raymond + Mans Rullgard + Cosmin Truta + Gilles Vollant + James Yu + Mandar Sahastrabuddhe + Google Inc. + Vadim Barkov + +and with the following additions to the disclaimer: + + There is no warranty against interference with your enjoyment of + the library or against infringement. There is no warranty that our + efforts or the library will fulfill any of your particular purposes + or needs. This library is provided with all faults, and the entire + risk of satisfactory quality, performance, accuracy, and effort is + with the user. + +Some files in the "contrib" directory and some configure-generated +files that are distributed with libpng have other copyright owners, and +are released under other open source licenses. + +libpng versions 0.97, January 1998, through 1.0.6, March 20, 2000, are +Copyright (c) 1998-2000 Glenn Randers-Pehrson, are derived from +libpng-0.96, and are distributed according to the same disclaimer and +license as libpng-0.96, with the following individuals added to the +list of Contributing Authors: + + Tom Lane + Glenn Randers-Pehrson + Willem van Schaik + +libpng versions 0.89, June 1996, through 0.96, May 1997, are +Copyright (c) 1996-1997 Andreas Dilger, are derived from libpng-0.88, +and are distributed according to the same disclaimer and license as +libpng-0.88, with the following individuals added to the list of +Contributing Authors: + + John Bowler + Kevin Bracey + Sam Bushell + Magnus Holmgren + Greg Roelofs + Tom Tanner + +Some files in the "scripts" directory have other copyright owners, +but are released under this license. + +libpng versions 0.5, May 1995, through 0.88, January 1996, are +Copyright (c) 1995-1996 Guy Eric Schalnat, Group 42, Inc. + +For the purposes of this copyright and license, "Contributing Authors" +is defined as the following set of individuals: + + Andreas Dilger + Dave Martindale + Guy Eric Schalnat + Paul Schmidt + Tim Wegner + +The PNG Reference Library is supplied "AS IS". The Contributing +Authors and Group 42, Inc. disclaim all warranties, expressed or +implied, including, without limitation, the warranties of +merchantability and of fitness for any purpose. The Contributing +Authors and Group 42, Inc. assume no liability for direct, indirect, +incidental, special, exemplary, or consequential damages, which may +result from the use of the PNG Reference Library, even if advised of +the possibility of such damage. + +Permission is hereby granted to use, copy, modify, and distribute this +source code, or portions hereof, for any purpose, without fee, subject +to the following restrictions: + + 1. The origin of this source code must not be misrepresented. + + 2. Altered versions must be plainly marked as such and must not + be misrepresented as being the original source. + + 3. This Copyright notice may not be removed or altered from any + source or altered source distribution. + +The Contributing Authors and Group 42, Inc. specifically permit, +without fee, and encourage the use of this source code as a component +to supporting the PNG file format in commercial products. If you use +this source code in a product, acknowledgment is not required but would +be appreciated. + +---- + +RAQM + +The MIT License (MIT) + +Copyright © 2015 Information Technology Authority (ITA) +Copyright © 2016 Khaled Hosny + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +---- + +XAU + +Copyright 1988, 1993, 1994, 1998 The Open Group + +Permission to use, copy, modify, distribute, and sell this software and its +documentation for any purpose is hereby granted without fee, provided that +the above copyright notice appear in all copies and that both that +copyright notice and this permission notice appear in supporting +documentation. + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the name of The Open Group shall not be +used in advertising or otherwise to promote the sale, use or other dealings +in this Software without prior written authorization from The Open Group. + +---- + +XCB + +Copyright (C) 2001-2006 Bart Massey, Jamey Sharp, and Josh Triplett. +All Rights Reserved. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the names of the authors +or their institutions shall not be used in advertising or +otherwise to promote the sale, use or other dealings in this +Software without prior written authorization from the +authors. + +---- + +XDMCP + +Copyright 1989, 1998 The Open Group + +Permission to use, copy, modify, distribute, and sell this software and its +documentation for any purpose is hereby granted without fee, provided that +the above copyright notice appear in all copies and that both that +copyright notice and this permission notice appear in supporting +documentation. + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Except as contained in this notice, the name of The Open Group shall not be +used in advertising or otherwise to promote the sale, use or other dealings +in this Software without prior written authorization from The Open Group. + +Author: Keith Packard, MIT X Consortium + +---- + +ZLIB + + (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +If you use the zlib library in a product, we would appreciate *not* receiving +lengthy legal documents to sign. The sources are provided for free but without +warranty of any kind. The library has been entirely written by Jean-loup +Gailly and Mark Adler; it does not include third-party code. + +If you redistribute modified sources, we would appreciate that you include in +the file ChangeLog history information documenting your changes. Please read +the FAQ for more information on the distribution of modified source versions. \ No newline at end of file diff --git a/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/METADATA b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/METADATA new file mode 100644 index 00000000..11bb79ee --- /dev/null +++ b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/METADATA @@ -0,0 +1,143 @@ +Metadata-Version: 2.1 +Name: Pillow +Version: 8.4.0 +Summary: Python Imaging Library (Fork) +Home-page: https://python-pillow.org +Author: Alex Clark (PIL Fork Author) +Author-email: aclark@python-pillow.org +License: HPND +Project-URL: Documentation, https://pillow.readthedocs.io +Project-URL: Source, https://github.com/python-pillow/Pillow +Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-pillow?utm_source=pypi-pillow&utm_medium=pypi +Project-URL: Release notes, https://pillow.readthedocs.io/en/stable/releasenotes/index.html +Project-URL: Changelog, https://github.com/python-pillow/Pillow/blob/master/CHANGES.rst +Project-URL: Twitter, https://twitter.com/PythonPillow +Keywords: Imaging +Platform: UNKNOWN +Classifier: Development Status :: 6 - Mature +Classifier: License :: OSI Approved :: Historical Permission Notice and Disclaimer (HPND) +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Multimedia :: Graphics +Classifier: Topic :: Multimedia :: Graphics :: Capture :: Digital Camera +Classifier: Topic :: Multimedia :: Graphics :: Capture :: Screen Capture +Classifier: Topic :: Multimedia :: Graphics :: Graphics Conversion +Classifier: Topic :: Multimedia :: Graphics :: Viewers +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +License-File: LICENSE + +

+ Pillow logo +

+ +# Pillow + +## Python Imaging Library (Fork) + +Pillow is the friendly PIL fork by [Alex Clark and +Contributors](https://github.com/python-pillow/Pillow/graphs/contributors). +PIL is the Python Imaging Library by Fredrik Lundh and Contributors. +As of 2019, Pillow development is +[supported by Tidelift](https://tidelift.com/subscription/pkg/pypi-pillow?utm_source=pypi-pillow&utm_medium=readme&utm_campaign=enterprise). + + + + + + + + + + + + + + + + + + +
docs + Documentation Status +
tests + GitHub Actions build status (Lint) + GitHub Actions build status (Test Linux and macOS) + GitHub Actions build status (Test Windows) + GitHub Actions build status (Test Docker) + AppVeyor CI build status (Windows) + GitHub Actions wheels build status (Wheels) + Travis CI wheels build status (aarch64) + Code coverage +
package + Zenodo + Tidelift + Newest PyPI version + Number of PyPI downloads +
social + Join the chat at https://gitter.im/python-pillow/Pillow + Follow on https://twitter.com/PythonPillow +
+ +## Overview + +The Python Imaging Library adds image processing capabilities to your Python interpreter. + +This library provides extensive file format support, an efficient internal representation, and fairly powerful image processing capabilities. + +The core image library is designed for fast access to data stored in a few basic pixel formats. It should provide a solid foundation for a general image processing tool. + +## More Information + +- [Documentation](https://pillow.readthedocs.io/) + - [Installation](https://pillow.readthedocs.io/en/latest/installation.html) + - [Handbook](https://pillow.readthedocs.io/en/latest/handbook/index.html) +- [Contribute](https://github.com/python-pillow/Pillow/blob/master/.github/CONTRIBUTING.md) + - [Issues](https://github.com/python-pillow/Pillow/issues) + - [Pull requests](https://github.com/python-pillow/Pillow/pulls) +- [Release notes](https://pillow.readthedocs.io/en/stable/releasenotes/index.html) +- [Changelog](https://github.com/python-pillow/Pillow/blob/master/CHANGES.rst) + - [Pre-fork](https://github.com/python-pillow/Pillow/blob/master/CHANGES.rst#pre-fork) + +## Report a Vulnerability + +To report a security vulnerability, please follow the procedure described in the [Tidelift security policy](https://tidelift.com/docs/security). + + diff --git a/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/RECORD b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/RECORD new file mode 100644 index 00000000..e6dd26de --- /dev/null +++ b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/RECORD @@ -0,0 +1,212 @@ +PIL/BdfFontFile.py,sha256=hRnSgFZOIiTgWfJIaRHRQpU4TKVok2E31KJY6sbZPwc,2817 +PIL/BlpImagePlugin.py,sha256=mueMvKLQrS_b082agtFskRnzhCoxuOEyX3VwDm5wzdg,14569 +PIL/BmpImagePlugin.py,sha256=7XdbhPAcXDY93O0rHg8ogcFz5FmhF1G9ZKmuzG4gI2c,14117 +PIL/BufrStubImagePlugin.py,sha256=Zq60GwcqQJTmZJrA9EQq94QvYpNqwYvQzHojh4U7SDw,1520 +PIL/ContainerIO.py,sha256=1U15zUXjWO8uWK-MyCp66Eh7djQEU-oUeCDoBqewNkA,2883 +PIL/CurImagePlugin.py,sha256=er_bI3V1Ezly0QfFJq0fZMlGwrD5izDutwF1FrOwiMA,1679 +PIL/DcxImagePlugin.py,sha256=bfESLTji9GerqI4oYsy5oTFyRMlr2mjSsXzpY9IuLsk,2145 +PIL/DdsImagePlugin.py,sha256=cUSGUNx_sf5UFryyMLrOf1vJQIMx9GwoMfuvxz3Bg1U,7987 +PIL/EpsImagePlugin.py,sha256=mKiRyVh5NcX9MI1ByBzlnSB1u8xx_kX7Q_wcalcZkBY,11918 +PIL/ExifTags.py,sha256=0YRoKyMwPabWOZZgVeLL6mlaGjbZgfF-z8WuUc6Ibb0,9446 +PIL/FitsStubImagePlugin.py,sha256=F9NJsro-OyxKmt9SLBemx5LZaCqFXIejBVbZY9nuUPA,2555 +PIL/FliImagePlugin.py,sha256=pGeC1JI6d5xdYWRhsKz0_3yeFzGII_jYbQhJYNo6n7Y,4260 +PIL/FontFile.py,sha256=LkQcbwUu1C4fokMnbg-ao9ksp2RX-saaPRie-z2rpH4,2765 +PIL/FpxImagePlugin.py,sha256=nKGioxa5C0q9X9qva3t_htRV_3jXQcFkclVxTEaSusk,6658 +PIL/FtexImagePlugin.py,sha256=rHNkZXfhF21i3klylBqo8nJrIm41TxdCLHiv0Zgwbb0,3305 +PIL/GbrImagePlugin.py,sha256=u9kOIdBxYMRrXfXfIwGcz0uyvvxNRCwO3U1xcfa51T4,2794 +PIL/GdImageFile.py,sha256=JFWSUssG1z1r884GQtBbZ3T7uhPF4cDXSuW3ctgf3TU,2465 +PIL/GifImagePlugin.py,sha256=aY77KxVMCvXimrYmENZms_kqTk9EkBLeuSw79AhiiDg,30330 +PIL/GimpGradientFile.py,sha256=G0ClRmjRHIJoU0nmG-P-tgehLHZip5i0rY4-5pjJ7bc,3353 +PIL/GimpPaletteFile.py,sha256=_wWvNmB40AfQ1M5sTxoYYXOMApWQji7rrubqZhfd1dU,1274 +PIL/GribStubImagePlugin.py,sha256=sSBrTisTcunuC0WcSQ4_55nV6uFvLCQ0JLSd62dgURw,1515 +PIL/Hdf5StubImagePlugin.py,sha256=zjtFPZIcVkWXvYRPnHow6XA9kElEi772w7PFSuEqmq4,1517 +PIL/IcnsImagePlugin.py,sha256=lXyu80uDDCIltyFmCuThVLzjGU7r7VnjdcRHlgJOVAw,11596 +PIL/IcoImagePlugin.py,sha256=hru3Wu7soWoRl-CGKs7Wv14FbeiHvxv_l5HkSVsnK30,10822 +PIL/ImImagePlugin.py,sha256=RFFyRlFJTVuti-TZ9yWsqP7vJJydgX1MC6mjYwwdw-0,10729 +PIL/Image.py,sha256=a3QVr3M1kRfI82tiOXspyJvwbL732fd-KAZGYEZrtck,122031 +PIL/ImageChops.py,sha256=HOGSnuU4EcCbdeUzEGPm54zewppHWWe12XLyOLLPgCw,7297 +PIL/ImageCms.py,sha256=nx8mwBoN9npRwz7y5fhPdm_jFhl7wRtIcwB8RPXAaco,37088 +PIL/ImageColor.py,sha256=2e9xfO08S6afUzoahUIzyMN8RJcQsMz9E92rFnEhfP0,8727 +PIL/ImageDraw.py,sha256=Vx9suYwaqZ-mz5JJQFuJTFi8ix-z-LXHyfYOOEki0Vc,33946 +PIL/ImageDraw2.py,sha256=oBhpBTZhx3bd4D0s8E2kDjBzgThRkDU_TE_987l501k,5019 +PIL/ImageEnhance.py,sha256=CJnCouiBmxN2fE0xW7m_uMdBqcm-Fp0S3ruHhkygal4,3190 +PIL/ImageFile.py,sha256=MzpDDInyeANJxLho-T15CV1n1G-jAhN37cJxbEVbEMs,21144 +PIL/ImageFilter.py,sha256=SBdX7_KqGKFOJxXjv9Uc5gUP1LkvQ-r-2cbiRtcXoeM,16129 +PIL/ImageFont.py,sha256=6DHShfMCcIYoYswrktiZZe5_LFYPSIFT_mI4APVxuZw,45311 +PIL/ImageGrab.py,sha256=2o1aA0_vP-KeRJsJtIxYhi61yCK4k_Khh6NHQD7HO2Q,3625 +PIL/ImageMath.py,sha256=iQPtbXgdhcCchGTXbDop7AiI_Fe-fNmq8m1YHsHMBgc,7048 +PIL/ImageMode.py,sha256=woBgGcqCT5yTkS5yNWJyst4aaLdSMZsPpPoXDgnqo6M,2075 +PIL/ImageMorph.py,sha256=KL2843wgfLyXPOWEJnTXRvySfbpRrlTqA_0M1j5xuD0,7773 +PIL/ImageOps.py,sha256=MyLWGMdwa-5b4CDldBRpRZyoWw9DCssZzpPgKhYLUc8,20291 +PIL/ImagePalette.py,sha256=MCReMV6Ej9GvalXyAFoojKB8z4PVkigX153Lfrk2UaU,7841 +PIL/ImagePath.py,sha256=lVmH1-lCd0SyrFoqyhlstAFW2iJuC14fPcW8iewvxCQ,336 +PIL/ImageQt.py,sha256=oZFNAntkAYxTbS99jU8F6L9U15U8xDtNU0QGZddXMsk,6380 +PIL/ImageSequence.py,sha256=3djA7vDH6wafTGbt4e_lPlVhy2TaKfdSrA1XQ4n-Uoc,1850 +PIL/ImageShow.py,sha256=spuSP6ObqyfqC_sturCcZgbGwCqsC9pkqCx5ozHEEDs,6863 +PIL/ImageStat.py,sha256=PieQi44mRHE6jod7NqujwGr6WCntuZuNGmC2z9PaoDY,3901 +PIL/ImageTk.py,sha256=rLPqAnLH61y2XRHgRPUdesYLQqnDQ__LeRK66KL_fPQ,9324 +PIL/ImageTransform.py,sha256=V2l6tsjmymMIF7HQBMI21UPn4mlicarrm4NF3Kazvio,2843 +PIL/ImageWin.py,sha256=1MQBJS7tVrQzI9jN0nmeNeFpIaq8fXra9kQocHkiFxM,7191 +PIL/ImtImagePlugin.py,sha256=cn60lqUVnK2oh_sPqPBORr_rZ4zuF_6FU0V96IAh8Ww,2203 +PIL/IptcImagePlugin.py,sha256=-RZBUUodHcF5wLKanW1MxJj7cbLOpx5LvXqm0vDM22U,5714 +PIL/Jpeg2KImagePlugin.py,sha256=fC5qVN7kGz9awyF8P0EEI5XFVOPJDUdJgu3uVWhDLRk,10386 +PIL/JpegImagePlugin.py,sha256=FfxP12Zi2quaR2t2dw-IREvX0wrndxFvLvGOjeMHD9M,28396 +PIL/JpegPresets.py,sha256=6nVnX_H8eA8ZO7AOVvkUx8gEN6QfI8zKnV6od16XgWE,12347 +PIL/McIdasImagePlugin.py,sha256=LrP5nA7l8IQG3WhlMI0Xs8fGXY_uf6IDmzNCERl3tGw,1754 +PIL/MicImagePlugin.py,sha256=Eh94vjTurXYkmm27hhooyNm9NkWWyVxP8Nq4thNLV6Y,2607 +PIL/MpegImagePlugin.py,sha256=n16Zgdy8Hcfke16lQwZWs53PZq4BA_OxPCMPDkW62nw,1803 +PIL/MpoImagePlugin.py,sha256=2C07_0-G0XepZnJRNSAKp3Os8t_qo32N2WquMomXR9I,4399 +PIL/MspImagePlugin.py,sha256=RdQb1e5-KDWdWy-3MUhchTlxf9TPgyw2axONt_vWRUE,5526 +PIL/PSDraw.py,sha256=xmJ6GVUvDm1SC3QuUpYdeNfGu9lYBLX1ndCt96tObcc,6719 +PIL/PaletteFile.py,sha256=s3KtsDuY5S04MKDyiXK3iIbiOGzV9PvCDUpOQHI7yqc,1106 +PIL/PalmImagePlugin.py,sha256=lTVwwSPFrQ-IPFGU8_gRCMZ1Lb73cuVhQ-nkx1Q0oqc,9108 +PIL/PcdImagePlugin.py,sha256=cnBm_xKcpLGT6hZ8QKai9Up0gZERMxZwhDXl1hQtBm0,1476 +PIL/PcfFontFile.py,sha256=njhgblsjSVcITVz1DpWdEligmJgPMh5nTk_zDDWWTik,6348 +PIL/PcxImagePlugin.py,sha256=J-Pm2QBt5Hi4ObPeXDnc87X7nl1hbtTGqy4sTov6tug,5864 +PIL/PdfImagePlugin.py,sha256=5G3tVYhuR_1n6N5SRHU5URo6WKN-lgCWJsd9I7g78vs,7344 +PIL/PdfParser.py,sha256=ShZFc_iR0T7NzA44KoLtcDIRzAoDFY0RE-xC_iBw_10,34551 +PIL/PixarImagePlugin.py,sha256=5MMcrrShVr511QKevK1ziKyJn0WllokWQxBhs8NWttY,1631 +PIL/PngImagePlugin.py,sha256=m3s82KcpJ_awnsp4GKrFQd2-7zy6nq7SzUsdGTntBwU,44148 +PIL/PpmImagePlugin.py,sha256=UNwCp3h7psEK8i0p3P93VVXUBz9_8tUVzUWsITux6HQ,4447 +PIL/PsdImagePlugin.py,sha256=tSkfdEw---66vlBu4OA9y7zsQ5y4gIIThhyFi6orT0o,8072 +PIL/PyAccess.py,sha256=SaGs2ZE4kjh-dybpAA5_Og4wuhA6d0LTPKK8t2aHffY,9607 +PIL/SgiImagePlugin.py,sha256=mqpi0G4aiKzWmJHk22WKZ0oGqsglcTNgDfp4H8S-GCM,6097 +PIL/SpiderImagePlugin.py,sha256=gJI4peH7axhNNW37An9ixeFFAYooHh4DZSYPotXnQfo,9535 +PIL/SunImagePlugin.py,sha256=bnjnVFRjvApCH1QC1F9HeynoCe5AZk3wa1tOhPvHzKU,4282 +PIL/TarIO.py,sha256=E_pjAxk9wHezXUuR_99liySBXfJoL2wjzdNDf0g1hTo,1440 +PIL/TgaImagePlugin.py,sha256=jf7cIHVLCqrgxrV6RRTh0ViH5vr2308QqI0k_CL2RwE,6272 +PIL/TiffImagePlugin.py,sha256=j6IXb0fwIxsCcS8vhrugVl6qsd7SD716LjQ90WUnPTM,72522 +PIL/TiffTags.py,sha256=s9sOrIxxDdZSgi06YdalbZOn_p8V1Gh5V2TZg3B4DEg,15286 +PIL/WalImageFile.py,sha256=jJNdRLIjbEsPkpkIdY2n6WlStDz1ttVQS7y3DJE7qTU,5546 +PIL/WebPImagePlugin.py,sha256=VkCNYJRoJd8wXT4JG31CF4crjjPfuoOvDH81o4vqJ1w,10830 +PIL/WmfImagePlugin.py,sha256=2dDhAUW8-uebXmBJbI8TDJapK49ocUros1hbUUDlmO8,4639 +PIL/XVThumbImagePlugin.py,sha256=zmZ8Z4B8Kr6NOdUqSipW9_X5mKiLBLs-wxvPRRg1l0M,1940 +PIL/XbmImagePlugin.py,sha256=oIEt_uqwKKU6lLS_IVFwEjotwE1FI4_IHUnx_6Ul_gk,2430 +PIL/XpmImagePlugin.py,sha256=1EBt-g678p0A0NXOkxq7sGM8dymneDMHHQmwJzAbrlw,3062 +PIL/__init__.py,sha256=NnlpBykSA7dIeA6k7aHKD2ikvrCKhpieYVv7UieVoyk,3260 +PIL/__main__.py,sha256=axR7PO-HtXp-o0rBhKIxs0wark0rBfaDIhAIWqtWUo4,41 +PIL/__pycache__/BdfFontFile.cpython-39.pyc,, +PIL/__pycache__/BlpImagePlugin.cpython-39.pyc,, +PIL/__pycache__/BmpImagePlugin.cpython-39.pyc,, +PIL/__pycache__/BufrStubImagePlugin.cpython-39.pyc,, +PIL/__pycache__/ContainerIO.cpython-39.pyc,, +PIL/__pycache__/CurImagePlugin.cpython-39.pyc,, +PIL/__pycache__/DcxImagePlugin.cpython-39.pyc,, +PIL/__pycache__/DdsImagePlugin.cpython-39.pyc,, +PIL/__pycache__/EpsImagePlugin.cpython-39.pyc,, +PIL/__pycache__/ExifTags.cpython-39.pyc,, +PIL/__pycache__/FitsStubImagePlugin.cpython-39.pyc,, +PIL/__pycache__/FliImagePlugin.cpython-39.pyc,, +PIL/__pycache__/FontFile.cpython-39.pyc,, +PIL/__pycache__/FpxImagePlugin.cpython-39.pyc,, +PIL/__pycache__/FtexImagePlugin.cpython-39.pyc,, +PIL/__pycache__/GbrImagePlugin.cpython-39.pyc,, +PIL/__pycache__/GdImageFile.cpython-39.pyc,, +PIL/__pycache__/GifImagePlugin.cpython-39.pyc,, +PIL/__pycache__/GimpGradientFile.cpython-39.pyc,, +PIL/__pycache__/GimpPaletteFile.cpython-39.pyc,, +PIL/__pycache__/GribStubImagePlugin.cpython-39.pyc,, +PIL/__pycache__/Hdf5StubImagePlugin.cpython-39.pyc,, +PIL/__pycache__/IcnsImagePlugin.cpython-39.pyc,, +PIL/__pycache__/IcoImagePlugin.cpython-39.pyc,, +PIL/__pycache__/ImImagePlugin.cpython-39.pyc,, +PIL/__pycache__/Image.cpython-39.pyc,, +PIL/__pycache__/ImageChops.cpython-39.pyc,, +PIL/__pycache__/ImageCms.cpython-39.pyc,, +PIL/__pycache__/ImageColor.cpython-39.pyc,, +PIL/__pycache__/ImageDraw.cpython-39.pyc,, +PIL/__pycache__/ImageDraw2.cpython-39.pyc,, +PIL/__pycache__/ImageEnhance.cpython-39.pyc,, +PIL/__pycache__/ImageFile.cpython-39.pyc,, +PIL/__pycache__/ImageFilter.cpython-39.pyc,, +PIL/__pycache__/ImageFont.cpython-39.pyc,, +PIL/__pycache__/ImageGrab.cpython-39.pyc,, +PIL/__pycache__/ImageMath.cpython-39.pyc,, +PIL/__pycache__/ImageMode.cpython-39.pyc,, +PIL/__pycache__/ImageMorph.cpython-39.pyc,, +PIL/__pycache__/ImageOps.cpython-39.pyc,, +PIL/__pycache__/ImagePalette.cpython-39.pyc,, +PIL/__pycache__/ImagePath.cpython-39.pyc,, +PIL/__pycache__/ImageQt.cpython-39.pyc,, +PIL/__pycache__/ImageSequence.cpython-39.pyc,, +PIL/__pycache__/ImageShow.cpython-39.pyc,, +PIL/__pycache__/ImageStat.cpython-39.pyc,, +PIL/__pycache__/ImageTk.cpython-39.pyc,, +PIL/__pycache__/ImageTransform.cpython-39.pyc,, +PIL/__pycache__/ImageWin.cpython-39.pyc,, +PIL/__pycache__/ImtImagePlugin.cpython-39.pyc,, +PIL/__pycache__/IptcImagePlugin.cpython-39.pyc,, +PIL/__pycache__/Jpeg2KImagePlugin.cpython-39.pyc,, +PIL/__pycache__/JpegImagePlugin.cpython-39.pyc,, +PIL/__pycache__/JpegPresets.cpython-39.pyc,, +PIL/__pycache__/McIdasImagePlugin.cpython-39.pyc,, +PIL/__pycache__/MicImagePlugin.cpython-39.pyc,, +PIL/__pycache__/MpegImagePlugin.cpython-39.pyc,, +PIL/__pycache__/MpoImagePlugin.cpython-39.pyc,, +PIL/__pycache__/MspImagePlugin.cpython-39.pyc,, +PIL/__pycache__/PSDraw.cpython-39.pyc,, +PIL/__pycache__/PaletteFile.cpython-39.pyc,, +PIL/__pycache__/PalmImagePlugin.cpython-39.pyc,, +PIL/__pycache__/PcdImagePlugin.cpython-39.pyc,, +PIL/__pycache__/PcfFontFile.cpython-39.pyc,, +PIL/__pycache__/PcxImagePlugin.cpython-39.pyc,, +PIL/__pycache__/PdfImagePlugin.cpython-39.pyc,, +PIL/__pycache__/PdfParser.cpython-39.pyc,, +PIL/__pycache__/PixarImagePlugin.cpython-39.pyc,, +PIL/__pycache__/PngImagePlugin.cpython-39.pyc,, +PIL/__pycache__/PpmImagePlugin.cpython-39.pyc,, +PIL/__pycache__/PsdImagePlugin.cpython-39.pyc,, +PIL/__pycache__/PyAccess.cpython-39.pyc,, +PIL/__pycache__/SgiImagePlugin.cpython-39.pyc,, +PIL/__pycache__/SpiderImagePlugin.cpython-39.pyc,, +PIL/__pycache__/SunImagePlugin.cpython-39.pyc,, +PIL/__pycache__/TarIO.cpython-39.pyc,, +PIL/__pycache__/TgaImagePlugin.cpython-39.pyc,, +PIL/__pycache__/TiffImagePlugin.cpython-39.pyc,, +PIL/__pycache__/TiffTags.cpython-39.pyc,, +PIL/__pycache__/WalImageFile.cpython-39.pyc,, +PIL/__pycache__/WebPImagePlugin.cpython-39.pyc,, +PIL/__pycache__/WmfImagePlugin.cpython-39.pyc,, +PIL/__pycache__/XVThumbImagePlugin.cpython-39.pyc,, +PIL/__pycache__/XbmImagePlugin.cpython-39.pyc,, +PIL/__pycache__/XpmImagePlugin.cpython-39.pyc,, +PIL/__pycache__/__init__.cpython-39.pyc,, +PIL/__pycache__/__main__.cpython-39.pyc,, +PIL/__pycache__/_binary.cpython-39.pyc,, +PIL/__pycache__/_tkinter_finder.cpython-39.pyc,, +PIL/__pycache__/_util.cpython-39.pyc,, +PIL/__pycache__/_version.cpython-39.pyc,, +PIL/__pycache__/features.cpython-39.pyc,, +PIL/_binary.py,sha256=E5qhxNJ7hhbEoqu0mODOXHT8z-FDRShXG3jTJhsDdas,2043 +PIL/_imaging.cpython-39-x86_64-linux-gnu.so,sha256=3fd3n1yNLuikWPEjYX4PMYgvDtloGxkC3mlPoYpf7eI,690408 +PIL/_imagingcms.cpython-39-x86_64-linux-gnu.so,sha256=ahHNRyzxSGhKIi_3-S1G_MRy1AzDMtIsYzNOZ3AKmUg,47120 +PIL/_imagingft.cpython-39-x86_64-linux-gnu.so,sha256=xV0z35S8wz-_8sy5SVjc4ezgzkc20nFhPbJTaVLPkq0,68616 +PIL/_imagingmath.cpython-39-x86_64-linux-gnu.so,sha256=0KJXtj2tPNs_WL16e44Ms9XnUG9jwfbHAstX0EgSqoc,30976 +PIL/_imagingmorph.cpython-39-x86_64-linux-gnu.so,sha256=obJSPi6AGhsOV8Du7NWaPG9j3tQt12HAkdEDVed2ARQ,14656 +PIL/_imagingtk.cpython-39-x86_64-linux-gnu.so,sha256=wvuzENwFMjPjsSmP2haO4OtqbX-H_IFkrYko1n-9IcM,14656 +PIL/_tkinter_finder.py,sha256=-X7xba1HO66pG1K5KSNf4Yo2eORwFTXcoFtHsmmNEcQ,525 +PIL/_util.py,sha256=pbjX5KY1W2oZyYVC4TE9ai2PfrJZrAsO5hAnz_JMees,359 +PIL/_version.py,sha256=z-VuA4bGtBQYCROyUtgkMY80ki9XAWbicT7vKIWTu84,50 +PIL/_webp.cpython-39-x86_64-linux-gnu.so,sha256=8mDlaooP7ajsomzNhAc7UK6BeK8wSn_fwf_vIJ9aXF8,45816 +PIL/features.py,sha256=j2LT6v78cHWbR8z8OVaAGIbJWI-Bs62pfiB1i1fminM,9387 +Pillow-8.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Pillow-8.4.0.dist-info/LICENSE,sha256=AYeYtqcrcjZrfTw8imgknuKkpPYeu0IUynv45BY00PU,27652 +Pillow-8.4.0.dist-info/METADATA,sha256=kM4lC8c4s460AJxQIo0m3hhjI83n8SCkpkpuh28mlDw,7453 +Pillow-8.4.0.dist-info/RECORD,, +Pillow-8.4.0.dist-info/WHEEL,sha256=ts1NGDem03kTrzsJp50lKy9cpDxDoGil0Q-wLa_TR_0,148 +Pillow-8.4.0.dist-info/top_level.txt,sha256=riZqrk-hyZqh5f1Z0Zwii3dKfxEsByhu9cU9IODF-NY,4 +Pillow-8.4.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +Pillow.libs/libXau-00ec42fe.so.6.0.0,sha256=d75mSMNgdE9Ubbyh6DWZcVKSq3R4m8pD6eltRC2w49o,17048 +Pillow.libs/libfreetype-804dfcff.so.6.18.0,sha256=dedyzFf3aN0wvm7CSdDJAT80q2vdfrsbkSyxx3sGitE,1410192 +Pillow.libs/libharfbuzz-a69be65e.so.0.30000.0,sha256=-AGASwIdNaZrseoFkFxsswv1t7KfZFa-Yg6rYASgccg,2923504 +Pillow.libs/libjpeg-183418da.so.9.4.0,sha256=1B9nLxyvPsvWrkPtehO8-RePJe4qziv76IZITFnQF3c,250480 +Pillow.libs/liblcms2-035b9744.so.2.0.12,sha256=tCf-Iw-t7FyvGMML4Uc4iNfhvrf1jWwxzWw4nHyDOXQ,481912 +Pillow.libs/liblzma-d540a118.so.5.2.5,sha256=JbIyQEIYTjuWokwsiGf-sItr6eJBCFWYtfeWpZJ9o64,220808 +Pillow.libs/libopenjp2-430a98fc.so.2.4.0,sha256=xYk9Bl7aTIe1OD9RO8v-2bE4chHpieehUC-zlKPyW0Q,532568 +Pillow.libs/libpng16-213e245f.so.16.37.0,sha256=VG5oYdhMJaFXlX2G7wDk7Ig4m0gQ5BLTjskGjZraGK4,283968 +Pillow.libs/libtiff-9ffe9659.so.5.7.0,sha256=iFSY1IGiiOqw5YJdd2LSVuJMWOC-ZKdUMw1f9_aaCEM,689944 +Pillow.libs/libwebp-d8a3db66.so.7.1.2,sha256=DXWJlpikvPnqcBydQvgbLNy2ylG9_FuKtvAq5EeEVHE,641712 +Pillow.libs/libwebpdemux-f117ddb4.so.2.0.8,sha256=sSJ4GgrP8E4RPUOLo3zBaNKM542R6CHtWB5EJM1GMpU,29456 +Pillow.libs/libwebpmux-fe44437b.so.3.0.7,sha256=gIPOQVN5Cf9knVeCjU4dHi52ciCqeO8tQyxOIJu4UCY,54456 +Pillow.libs/libxcb-1122e22b.so.1.1.0,sha256=Ghohd8ctbBf5_jE5i6MExypVbwyX-uv1QjLvW_ADCHQ,243216 +Pillow.libs/libz-dd453c56.so.1.2.11,sha256=nYOJa1r68aU19J0jCVgalS9ypvHhC606cwm05SBt3rU,129328 diff --git a/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/WHEEL b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/WHEEL new file mode 100644 index 00000000..ade37309 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.0) +Root-Is-Purelib: false +Tag: cp39-cp39-manylinux_2_17_x86_64 +Tag: cp39-cp39-manylinux2014_x86_64 + diff --git a/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/top_level.txt b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/top_level.txt new file mode 100644 index 00000000..b338169c --- /dev/null +++ b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/top_level.txt @@ -0,0 +1 @@ +PIL diff --git a/.venv/lib/python3.9/site-packages/mathprolib-0.1.7-py3.9.egg-info/dependency_links.txt b/.venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/zip-safe similarity index 100% rename from .venv/lib/python3.9/site-packages/mathprolib-0.1.7-py3.9.egg-info/dependency_links.txt rename to .venv/lib/python3.9/site-packages/Pillow-8.4.0.dist-info/zip-safe diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libXau-00ec42fe.so.6.0.0 b/.venv/lib/python3.9/site-packages/Pillow.libs/libXau-00ec42fe.so.6.0.0 new file mode 100644 index 00000000..936dbcdd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libXau-00ec42fe.so.6.0.0 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libfreetype-804dfcff.so.6.18.0 b/.venv/lib/python3.9/site-packages/Pillow.libs/libfreetype-804dfcff.so.6.18.0 new file mode 100644 index 00000000..0325ee85 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libfreetype-804dfcff.so.6.18.0 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libharfbuzz-a69be65e.so.0.30000.0 b/.venv/lib/python3.9/site-packages/Pillow.libs/libharfbuzz-a69be65e.so.0.30000.0 new file mode 100644 index 00000000..feba38c4 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libharfbuzz-a69be65e.so.0.30000.0 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libjpeg-183418da.so.9.4.0 b/.venv/lib/python3.9/site-packages/Pillow.libs/libjpeg-183418da.so.9.4.0 new file mode 100644 index 00000000..55b08285 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libjpeg-183418da.so.9.4.0 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/liblcms2-035b9744.so.2.0.12 b/.venv/lib/python3.9/site-packages/Pillow.libs/liblcms2-035b9744.so.2.0.12 new file mode 100644 index 00000000..f29c7651 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/liblcms2-035b9744.so.2.0.12 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/liblzma-d540a118.so.5.2.5 b/.venv/lib/python3.9/site-packages/Pillow.libs/liblzma-d540a118.so.5.2.5 new file mode 100644 index 00000000..0c41d1d7 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/liblzma-d540a118.so.5.2.5 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libopenjp2-430a98fc.so.2.4.0 b/.venv/lib/python3.9/site-packages/Pillow.libs/libopenjp2-430a98fc.so.2.4.0 new file mode 100644 index 00000000..5619b137 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libopenjp2-430a98fc.so.2.4.0 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libpng16-213e245f.so.16.37.0 b/.venv/lib/python3.9/site-packages/Pillow.libs/libpng16-213e245f.so.16.37.0 new file mode 100644 index 00000000..b9ed0f16 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libpng16-213e245f.so.16.37.0 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libtiff-9ffe9659.so.5.7.0 b/.venv/lib/python3.9/site-packages/Pillow.libs/libtiff-9ffe9659.so.5.7.0 new file mode 100644 index 00000000..4c905698 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libtiff-9ffe9659.so.5.7.0 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libwebp-d8a3db66.so.7.1.2 b/.venv/lib/python3.9/site-packages/Pillow.libs/libwebp-d8a3db66.so.7.1.2 new file mode 100644 index 00000000..7b9bb2be Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libwebp-d8a3db66.so.7.1.2 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libwebpdemux-f117ddb4.so.2.0.8 b/.venv/lib/python3.9/site-packages/Pillow.libs/libwebpdemux-f117ddb4.so.2.0.8 new file mode 100644 index 00000000..f8ad57aa Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libwebpdemux-f117ddb4.so.2.0.8 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libwebpmux-fe44437b.so.3.0.7 b/.venv/lib/python3.9/site-packages/Pillow.libs/libwebpmux-fe44437b.so.3.0.7 new file mode 100644 index 00000000..86b59521 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libwebpmux-fe44437b.so.3.0.7 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libxcb-1122e22b.so.1.1.0 b/.venv/lib/python3.9/site-packages/Pillow.libs/libxcb-1122e22b.so.1.1.0 new file mode 100644 index 00000000..ec04044e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libxcb-1122e22b.so.1.1.0 differ diff --git a/.venv/lib/python3.9/site-packages/Pillow.libs/libz-dd453c56.so.1.2.11 b/.venv/lib/python3.9/site-packages/Pillow.libs/libz-dd453c56.so.1.2.11 new file mode 100644 index 00000000..bec57efd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/Pillow.libs/libz-dd453c56.so.1.2.11 differ diff --git a/.venv/lib/python3.9/site-packages/__pycache__/cycler.cpython-39.pyc b/.venv/lib/python3.9/site-packages/__pycache__/cycler.cpython-39.pyc new file mode 100644 index 00000000..365efc6c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/__pycache__/cycler.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/__pycache__/pylab.cpython-39.pyc b/.venv/lib/python3.9/site-packages/__pycache__/pylab.cpython-39.pyc new file mode 100644 index 00000000..61cd1f7e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/__pycache__/pylab.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/INSTALLER b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/INSTALLER new file mode 100644 index 00000000..a1b589e3 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/LICENSE b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/LICENSE new file mode 100644 index 00000000..d41d8089 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, matplotlib project +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the matplotlib project nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/METADATA b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/METADATA new file mode 100644 index 00000000..d6901101 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/METADATA @@ -0,0 +1,26 @@ +Metadata-Version: 2.1 +Name: cycler +Version: 0.11.0 +Summary: Composable style cycles +Home-page: https://github.com/matplotlib/cycler +Author: Thomas A Caswell +Author-email: matplotlib-users@python.org +License: BSD +Keywords: cycle kwargs +Platform: Cross platform (Linux +Platform: macOS +Platform: Windows) +Classifier: License :: OSI Approved :: BSD License +Classifier: Development Status :: 4 - Beta +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3 :: Only +Requires-Python: >=3.6 + +UNKNOWN + + diff --git a/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/RECORD b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/RECORD new file mode 100644 index 00000000..d95679ac --- /dev/null +++ b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/RECORD @@ -0,0 +1,8 @@ +__pycache__/cycler.cpython-39.pyc,, +cycler-0.11.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +cycler-0.11.0.dist-info/LICENSE,sha256=8SGBQ9dm2j_qZvEzlrfxXfRqgzA_Kb-Wum6Y601C9Ag,1497 +cycler-0.11.0.dist-info/METADATA,sha256=zQAWfiW6iWRaNH7P4681fDrNnJ72n0zYZjo-likNrBo,785 +cycler-0.11.0.dist-info/RECORD,, +cycler-0.11.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 +cycler-0.11.0.dist-info/top_level.txt,sha256=D8BVVDdAAelLb2FOEz7lDpc6-AL21ylKPrMhtG6yzyE,7 +cycler.py,sha256=HNLU4VZism2JQG6Rk03-lkmOcXccAvDjAXmOhGxpHrA,14519 diff --git a/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/WHEEL b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/WHEEL new file mode 100644 index 00000000..385faab0 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/top_level.txt b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/top_level.txt new file mode 100644 index 00000000..22546440 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/cycler-0.11.0.dist-info/top_level.txt @@ -0,0 +1 @@ +cycler diff --git a/.venv/lib/python3.9/site-packages/cycler.py b/.venv/lib/python3.9/site-packages/cycler.py new file mode 100644 index 00000000..f86b68de --- /dev/null +++ b/.venv/lib/python3.9/site-packages/cycler.py @@ -0,0 +1,501 @@ +""" +Cycler +====== + +Cycling through combinations of values, producing dictionaries. + +You can add cyclers:: + + from cycler import cycler + cc = (cycler(color=list('rgb')) + + cycler(linestyle=['-', '--', '-.'])) + for d in cc: + print(d) + +Results in:: + + {'color': 'r', 'linestyle': '-'} + {'color': 'g', 'linestyle': '--'} + {'color': 'b', 'linestyle': '-.'} + + +You can multiply cyclers:: + + from cycler import cycler + cc = (cycler(color=list('rgb')) * + cycler(linestyle=['-', '--', '-.'])) + for d in cc: + print(d) + +Results in:: + + {'color': 'r', 'linestyle': '-'} + {'color': 'r', 'linestyle': '--'} + {'color': 'r', 'linestyle': '-.'} + {'color': 'g', 'linestyle': '-'} + {'color': 'g', 'linestyle': '--'} + {'color': 'g', 'linestyle': '-.'} + {'color': 'b', 'linestyle': '-'} + {'color': 'b', 'linestyle': '--'} + {'color': 'b', 'linestyle': '-.'} +""" + + +import copy +from functools import reduce +from itertools import product, cycle +from operator import mul, add + +__version__ = '0.10.0' + + +def _process_keys(left, right): + """ + Helper function to compose cycler keys. + + Parameters + ---------- + left, right : iterable of dictionaries or None + The cyclers to be composed. + + Returns + ------- + keys : set + The keys in the composition of the two cyclers. + """ + l_peek = next(iter(left)) if left is not None else {} + r_peek = next(iter(right)) if right is not None else {} + l_key = set(l_peek.keys()) + r_key = set(r_peek.keys()) + if l_key & r_key: + raise ValueError("Can not compose overlapping cycles") + return l_key | r_key + + +def concat(left, right): + r""" + Concatenate `Cycler`\s, as if chained using `itertools.chain`. + + The keys must match exactly. + + Examples + -------- + >>> num = cycler('a', range(3)) + >>> let = cycler('a', 'abc') + >>> num.concat(let) + cycler('a', [0, 1, 2, 'a', 'b', 'c']) + + Returns + ------- + `Cycler` + The concatenated cycler. + """ + if left.keys != right.keys: + raise ValueError("Keys do not match:\n" + "\tIntersection: {both!r}\n" + "\tDisjoint: {just_one!r}".format( + both=left.keys & right.keys, + just_one=left.keys ^ right.keys)) + _l = left.by_key() + _r = right.by_key() + return reduce(add, (_cycler(k, _l[k] + _r[k]) for k in left.keys)) + + +class Cycler: + """ + Composable cycles. + + This class has compositions methods: + + ``+`` + for 'inner' products (zip) + + ``+=`` + in-place ``+`` + + ``*`` + for outer products (`itertools.product`) and integer multiplication + + ``*=`` + in-place ``*`` + + and supports basic slicing via ``[]``. + + Parameters + ---------- + left, right : Cycler or None + The 'left' and 'right' cyclers. + op : func or None + Function which composes the 'left' and 'right' cyclers. + """ + + def __call__(self): + return cycle(self) + + def __init__(self, left, right=None, op=None): + """ + Semi-private init. + + Do not use this directly, use `cycler` function instead. + """ + if isinstance(left, Cycler): + self._left = Cycler(left._left, left._right, left._op) + elif left is not None: + # Need to copy the dictionary or else that will be a residual + # mutable that could lead to strange errors + self._left = [copy.copy(v) for v in left] + else: + self._left = None + + if isinstance(right, Cycler): + self._right = Cycler(right._left, right._right, right._op) + elif right is not None: + # Need to copy the dictionary or else that will be a residual + # mutable that could lead to strange errors + self._right = [copy.copy(v) for v in right] + else: + self._right = None + + self._keys = _process_keys(self._left, self._right) + self._op = op + + def __contains__(self, k): + return k in self._keys + + @property + def keys(self): + """The keys this Cycler knows about.""" + return set(self._keys) + + def change_key(self, old, new): + """ + Change a key in this cycler to a new name. + Modification is performed in-place. + + Does nothing if the old key is the same as the new key. + Raises a ValueError if the new key is already a key. + Raises a KeyError if the old key isn't a key. + """ + if old == new: + return + if new in self._keys: + raise ValueError( + "Can't replace {old} with {new}, {new} is already a key" + .format(old=old, new=new) + ) + if old not in self._keys: + raise KeyError("Can't replace {old} with {new}, {old} is not a key" + .format(old=old, new=new)) + + self._keys.remove(old) + self._keys.add(new) + + if self._right is not None and old in self._right.keys: + self._right.change_key(old, new) + + # self._left should always be non-None + # if self._keys is non-empty. + elif isinstance(self._left, Cycler): + self._left.change_key(old, new) + else: + # It should be completely safe at this point to + # assume that the old key can be found in each + # iteration. + self._left = [{new: entry[old]} for entry in self._left] + + @classmethod + def _from_iter(cls, label, itr): + """ + Class method to create 'base' Cycler objects + that do not have a 'right' or 'op' and for which + the 'left' object is not another Cycler. + + Parameters + ---------- + label : str + The property key. + + itr : iterable + Finite length iterable of the property values. + + Returns + ------- + `Cycler` + New 'base' cycler. + """ + ret = cls(None) + ret._left = list({label: v} for v in itr) + ret._keys = {label} + return ret + + def __getitem__(self, key): + # TODO : maybe add numpy style fancy slicing + if isinstance(key, slice): + trans = self.by_key() + return reduce(add, (_cycler(k, v[key]) for k, v in trans.items())) + else: + raise ValueError("Can only use slices with Cycler.__getitem__") + + def __iter__(self): + if self._right is None: + for left in self._left: + yield dict(left) + else: + for a, b in self._op(self._left, self._right): + out = {} + out.update(a) + out.update(b) + yield out + + def __add__(self, other): + """ + Pair-wise combine two equal length cyclers (zip). + + Parameters + ---------- + other : Cycler + """ + if len(self) != len(other): + raise ValueError("Can only add equal length cycles, " + f"not {len(self)} and {len(other)}") + return Cycler(self, other, zip) + + def __mul__(self, other): + """ + Outer product of two cyclers (`itertools.product`) or integer + multiplication. + + Parameters + ---------- + other : Cycler or int + """ + if isinstance(other, Cycler): + return Cycler(self, other, product) + elif isinstance(other, int): + trans = self.by_key() + return reduce(add, (_cycler(k, v*other) for k, v in trans.items())) + else: + return NotImplemented + + def __rmul__(self, other): + return self * other + + def __len__(self): + op_dict = {zip: min, product: mul} + if self._right is None: + return len(self._left) + l_len = len(self._left) + r_len = len(self._right) + return op_dict[self._op](l_len, r_len) + + def __iadd__(self, other): + """ + In-place pair-wise combine two equal length cyclers (zip). + + Parameters + ---------- + other : Cycler + """ + if not isinstance(other, Cycler): + raise TypeError("Cannot += with a non-Cycler object") + # True shallow copy of self is fine since this is in-place + old_self = copy.copy(self) + self._keys = _process_keys(old_self, other) + self._left = old_self + self._op = zip + self._right = Cycler(other._left, other._right, other._op) + return self + + def __imul__(self, other): + """ + In-place outer product of two cyclers (`itertools.product`). + + Parameters + ---------- + other : Cycler + """ + if not isinstance(other, Cycler): + raise TypeError("Cannot *= with a non-Cycler object") + # True shallow copy of self is fine since this is in-place + old_self = copy.copy(self) + self._keys = _process_keys(old_self, other) + self._left = old_self + self._op = product + self._right = Cycler(other._left, other._right, other._op) + return self + + def __eq__(self, other): + if len(self) != len(other): + return False + if self.keys ^ other.keys: + return False + return all(a == b for a, b in zip(self, other)) + + def __ne__(self, other): + return not (self == other) + + __hash__ = None + + def __repr__(self): + op_map = {zip: '+', product: '*'} + if self._right is None: + lab = self.keys.pop() + itr = list(v[lab] for v in self) + return f"cycler({lab!r}, {itr!r})" + else: + op = op_map.get(self._op, '?') + msg = "({left!r} {op} {right!r})" + return msg.format(left=self._left, op=op, right=self._right) + + def _repr_html_(self): + # an table showing the value of each key through a full cycle + output = "" + sorted_keys = sorted(self.keys, key=repr) + for key in sorted_keys: + output += f"" + for d in iter(self): + output += "" + for k in sorted_keys: + output += f"" + output += "" + output += "
{key!r}
{d[k]!r}
" + return output + + def by_key(self): + """ + Values by key. + + This returns the transposed values of the cycler. Iterating + over a `Cycler` yields dicts with a single value for each key, + this method returns a `dict` of `list` which are the values + for the given key. + + The returned value can be used to create an equivalent `Cycler` + using only `+`. + + Returns + ------- + transpose : dict + dict of lists of the values for each key. + """ + + # TODO : sort out if this is a bottle neck, if there is a better way + # and if we care. + + keys = self.keys + out = {k: list() for k in keys} + + for d in self: + for k in keys: + out[k].append(d[k]) + return out + + # for back compatibility + _transpose = by_key + + def simplify(self): + """ + Simplify the cycler into a sum (but no products) of cyclers. + + Returns + ------- + simple : Cycler + """ + # TODO: sort out if it is worth the effort to make sure this is + # balanced. Currently it is is + # (((a + b) + c) + d) vs + # ((a + b) + (c + d)) + # I would believe that there is some performance implications + trans = self.by_key() + return reduce(add, (_cycler(k, v) for k, v in trans.items())) + + concat = concat + + +def cycler(*args, **kwargs): + """ + Create a new `Cycler` object from a single positional argument, + a pair of positional arguments, or the combination of keyword arguments. + + cycler(arg) + cycler(label1=itr1[, label2=iter2[, ...]]) + cycler(label, itr) + + Form 1 simply copies a given `Cycler` object. + + Form 2 composes a `Cycler` as an inner product of the + pairs of keyword arguments. In other words, all of the + iterables are cycled simultaneously, as if through zip(). + + Form 3 creates a `Cycler` from a label and an iterable. + This is useful for when the label cannot be a keyword argument + (e.g., an integer or a name that has a space in it). + + Parameters + ---------- + arg : Cycler + Copy constructor for Cycler (does a shallow copy of iterables). + label : name + The property key. In the 2-arg form of the function, + the label can be any hashable object. In the keyword argument + form of the function, it must be a valid python identifier. + itr : iterable + Finite length iterable of the property values. + Can be a single-property `Cycler` that would + be like a key change, but as a shallow copy. + + Returns + ------- + cycler : Cycler + New `Cycler` for the given property + + """ + if args and kwargs: + raise TypeError("cyl() can only accept positional OR keyword " + "arguments -- not both.") + + if len(args) == 1: + if not isinstance(args[0], Cycler): + raise TypeError("If only one positional argument given, it must " + "be a Cycler instance.") + return Cycler(args[0]) + elif len(args) == 2: + return _cycler(*args) + elif len(args) > 2: + raise TypeError("Only a single Cycler can be accepted as the lone " + "positional argument. Use keyword arguments instead.") + + if kwargs: + return reduce(add, (_cycler(k, v) for k, v in kwargs.items())) + + raise TypeError("Must have at least a positional OR keyword arguments") + + +def _cycler(label, itr): + """ + Create a new `Cycler` object from a property name and iterable of values. + + Parameters + ---------- + label : hashable + The property key. + itr : iterable + Finite length iterable of the property values. + + Returns + ------- + cycler : Cycler + New `Cycler` for the given property + """ + if isinstance(itr, Cycler): + keys = itr.keys + if len(keys) != 1: + msg = "Can not create Cycler from a multi-property Cycler" + raise ValueError(msg) + + lab = keys.pop() + # Doesn't need to be a new list because + # _from_iter() will be creating that new list anyway. + itr = (v[lab] for v in itr) + + return Cycler._from_iter(label, itr) diff --git a/.venv/lib/python3.9/site-packages/fontTools/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/__init__.py new file mode 100644 index 00000000..ef13e216 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/__init__.py @@ -0,0 +1,8 @@ +import logging +from fontTools.misc.loggingTools import configLogger + +log = logging.getLogger(__name__) + +version = __version__ = "4.28.2" + +__all__ = ["version", "log", "configLogger"] diff --git a/.venv/lib/python3.9/site-packages/fontTools/__main__.py b/.venv/lib/python3.9/site-packages/fontTools/__main__.py new file mode 100644 index 00000000..9b978aaa --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/__main__.py @@ -0,0 +1,34 @@ +import sys + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + # TODO Handle library-wide options. Eg.: + # --unicodedata + # --verbose / other logging stuff + + # TODO Allow a way to run arbitrary modules? Useful for setting + # library-wide options and calling another library. Eg.: + # + # $ fonttools --unicodedata=... fontmake ... + # + # This allows for a git-like command where thirdparty commands + # can be added. Should we just try importing the fonttools + # module first and try without if it fails? + + if len(sys.argv) < 2: + sys.argv.append("help") + if sys.argv[1] == "-h" or sys.argv[1] == "--help": + sys.argv[1] = "help" + mod = 'fontTools.'+sys.argv[1] + sys.argv[1] = sys.argv[0] + ' ' + sys.argv[1] + del sys.argv[0] + + import runpy + runpy.run_module(mod, run_name='__main__') + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..176ea9eb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/__main__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/__main__.cpython-39.pyc new file mode 100644 index 00000000..c0b23417 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/__main__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/afmLib.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/afmLib.cpython-39.pyc new file mode 100644 index 00000000..671d406d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/afmLib.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/agl.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/agl.cpython-39.pyc new file mode 100644 index 00000000..bbba246e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/agl.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/fontBuilder.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/fontBuilder.cpython-39.pyc new file mode 100644 index 00000000..66c36b6f Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/fontBuilder.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/help.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/help.cpython-39.pyc new file mode 100644 index 00000000..2326d1bb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/help.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/merge.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/merge.cpython-39.pyc new file mode 100644 index 00000000..85c128a6 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/merge.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/tfmLib.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/tfmLib.cpython-39.pyc new file mode 100644 index 00000000..973c77f4 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/tfmLib.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/ttx.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/ttx.cpython-39.pyc new file mode 100644 index 00000000..4b5c160f Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/ttx.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/__pycache__/unicode.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/unicode.cpython-39.pyc new file mode 100644 index 00000000..3125326c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/__pycache__/unicode.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/afmLib.py b/.venv/lib/python3.9/site-packages/fontTools/afmLib.py new file mode 100644 index 00000000..49d99512 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/afmLib.py @@ -0,0 +1,430 @@ +"""Module for reading and writing AFM (Adobe Font Metrics) files. + +Note that this has been designed to read in AFM files generated by Fontographer +and has not been tested on many other files. In particular, it does not +implement the whole Adobe AFM specification [#f1]_ but, it should read most +"common" AFM files. + +Here is an example of using `afmLib` to read, modify and write an AFM file: + + >>> from fontTools.afmLib import AFM + >>> f = AFM("Tests/afmLib/data/TestAFM.afm") + >>> + >>> # Accessing a pair gets you the kern value + >>> f[("V","A")] + -60 + >>> + >>> # Accessing a glyph name gets you metrics + >>> f["A"] + (65, 668, (8, -25, 660, 666)) + >>> # (charnum, width, bounding box) + >>> + >>> # Accessing an attribute gets you metadata + >>> f.FontName + 'TestFont-Regular' + >>> f.FamilyName + 'TestFont' + >>> f.Weight + 'Regular' + >>> f.XHeight + 500 + >>> f.Ascender + 750 + >>> + >>> # Attributes and items can also be set + >>> f[("A","V")] = -150 # Tighten kerning + >>> f.FontName = "TestFont Squished" + >>> + >>> # And the font written out again (remove the # in front) + >>> #f.write("testfont-squished.afm") + +.. rubric:: Footnotes + +.. [#f1] `Adobe Technote 5004 `_, + Adobe Font Metrics File Format Specification. + +""" + + +import re + +# every single line starts with a "word" +identifierRE = re.compile(r"^([A-Za-z]+).*") + +# regular expression to parse char lines +charRE = re.compile( + r"(-?\d+)" # charnum + r"\s*;\s*WX\s+" # ; WX + r"(-?\d+)" # width + r"\s*;\s*N\s+" # ; N + r"([.A-Za-z0-9_]+)" # charname + r"\s*;\s*B\s+" # ; B + r"(-?\d+)" # left + r"\s+" + r"(-?\d+)" # bottom + r"\s+" + r"(-?\d+)" # right + r"\s+" + r"(-?\d+)" # top + r"\s*;\s*" # ; + ) + +# regular expression to parse kerning lines +kernRE = re.compile( + r"([.A-Za-z0-9_]+)" # leftchar + r"\s+" + r"([.A-Za-z0-9_]+)" # rightchar + r"\s+" + r"(-?\d+)" # value + r"\s*" + ) + +# regular expressions to parse composite info lines of the form: +# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; +compositeRE = re.compile( + r"([.A-Za-z0-9_]+)" # char name + r"\s+" + r"(\d+)" # number of parts + r"\s*;\s*" + ) +componentRE = re.compile( + r"PCC\s+" # PPC + r"([.A-Za-z0-9_]+)" # base char name + r"\s+" + r"(-?\d+)" # x offset + r"\s+" + r"(-?\d+)" # y offset + r"\s*;\s*" + ) + +preferredAttributeOrder = [ + "FontName", + "FullName", + "FamilyName", + "Weight", + "ItalicAngle", + "IsFixedPitch", + "FontBBox", + "UnderlinePosition", + "UnderlineThickness", + "Version", + "Notice", + "EncodingScheme", + "CapHeight", + "XHeight", + "Ascender", + "Descender", +] + + +class error(Exception): + pass + + +class AFM(object): + + _attrs = None + + _keywords = ['StartFontMetrics', + 'EndFontMetrics', + 'StartCharMetrics', + 'EndCharMetrics', + 'StartKernData', + 'StartKernPairs', + 'EndKernPairs', + 'EndKernData', + 'StartComposites', + 'EndComposites', + ] + + def __init__(self, path=None): + """AFM file reader. + + Instantiating an object with a path name will cause the file to be opened, + read, and parsed. Alternatively the path can be left unspecified, and a + file can be parsed later with the :meth:`read` method.""" + self._attrs = {} + self._chars = {} + self._kerning = {} + self._index = {} + self._comments = [] + self._composites = {} + if path is not None: + self.read(path) + + def read(self, path): + """Opens, reads and parses a file.""" + lines = readlines(path) + for line in lines: + if not line.strip(): + continue + m = identifierRE.match(line) + if m is None: + raise error("syntax error in AFM file: " + repr(line)) + + pos = m.regs[1][1] + word = line[:pos] + rest = line[pos:].strip() + if word in self._keywords: + continue + if word == "C": + self.parsechar(rest) + elif word == "KPX": + self.parsekernpair(rest) + elif word == "CC": + self.parsecomposite(rest) + else: + self.parseattr(word, rest) + + def parsechar(self, rest): + m = charRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + charname = things[2] + del things[2] + charnum, width, l, b, r, t = (int(thing) for thing in things) + self._chars[charname] = charnum, width, (l, b, r, t) + + def parsekernpair(self, rest): + m = kernRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + leftchar, rightchar, value = things + value = int(value) + self._kerning[(leftchar, rightchar)] = value + + def parseattr(self, word, rest): + if word == "FontBBox": + l, b, r, t = [int(thing) for thing in rest.split()] + self._attrs[word] = l, b, r, t + elif word == "Comment": + self._comments.append(rest) + else: + try: + value = int(rest) + except (ValueError, OverflowError): + self._attrs[word] = rest + else: + self._attrs[word] = value + + def parsecomposite(self, rest): + m = compositeRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + charname = m.group(1) + ncomponents = int(m.group(2)) + rest = rest[m.regs[0][1]:] + components = [] + while True: + m = componentRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + basechar = m.group(1) + xoffset = int(m.group(2)) + yoffset = int(m.group(3)) + components.append((basechar, xoffset, yoffset)) + rest = rest[m.regs[0][1]:] + if not rest: + break + assert len(components) == ncomponents + self._composites[charname] = components + + def write(self, path, sep='\r'): + """Writes out an AFM font to the given path.""" + import time + lines = [ "StartFontMetrics 2.0", + "Comment Generated by afmLib; at %s" % ( + time.strftime("%m/%d/%Y %H:%M:%S", + time.localtime(time.time())))] + + # write comments, assuming (possibly wrongly!) they should + # all appear at the top + for comment in self._comments: + lines.append("Comment " + comment) + + # write attributes, first the ones we know about, in + # a preferred order + attrs = self._attrs + for attr in preferredAttributeOrder: + if attr in attrs: + value = attrs[attr] + if attr == "FontBBox": + value = "%s %s %s %s" % value + lines.append(attr + " " + str(value)) + # then write the attributes we don't know about, + # in alphabetical order + items = sorted(attrs.items()) + for attr, value in items: + if attr in preferredAttributeOrder: + continue + lines.append(attr + " " + str(value)) + + # write char metrics + lines.append("StartCharMetrics " + repr(len(self._chars))) + items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()] + + def myKey(a): + """Custom key function to make sure unencoded chars (-1) + end up at the end of the list after sorting.""" + if a[0] == -1: + a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number + return a + items.sort(key=myKey) + + for charnum, (charname, width, (l, b, r, t)) in items: + lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" % + (charnum, width, charname, l, b, r, t)) + lines.append("EndCharMetrics") + + # write kerning info + lines.append("StartKernData") + lines.append("StartKernPairs " + repr(len(self._kerning))) + items = sorted(self._kerning.items()) + for (leftchar, rightchar), value in items: + lines.append("KPX %s %s %d" % (leftchar, rightchar, value)) + lines.append("EndKernPairs") + lines.append("EndKernData") + + if self._composites: + composites = sorted(self._composites.items()) + lines.append("StartComposites %s" % len(self._composites)) + for charname, components in composites: + line = "CC %s %s ;" % (charname, len(components)) + for basechar, xoffset, yoffset in components: + line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset) + lines.append(line) + lines.append("EndComposites") + + lines.append("EndFontMetrics") + + writelines(path, lines, sep) + + def has_kernpair(self, pair): + """Returns `True` if the given glyph pair (specified as a tuple) exists + in the kerning dictionary.""" + return pair in self._kerning + + def kernpairs(self): + """Returns a list of all kern pairs in the kerning dictionary.""" + return list(self._kerning.keys()) + + def has_char(self, char): + """Returns `True` if the given glyph exists in the font.""" + return char in self._chars + + def chars(self): + """Returns a list of all glyph names in the font.""" + return list(self._chars.keys()) + + def comments(self): + """Returns all comments from the file.""" + return self._comments + + def addComment(self, comment): + """Adds a new comment to the file.""" + self._comments.append(comment) + + def addComposite(self, glyphName, components): + """Specifies that the glyph `glyphName` is made up of the given components. + The components list should be of the following form:: + + [ + (glyphname, xOffset, yOffset), + ... + ] + + """ + self._composites[glyphName] = components + + def __getattr__(self, attr): + if attr in self._attrs: + return self._attrs[attr] + else: + raise AttributeError(attr) + + def __setattr__(self, attr, value): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + self.__dict__[attr] = value + else: + self._attrs[attr] = value + + def __delattr__(self, attr): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + try: + del self.__dict__[attr] + except KeyError: + raise AttributeError(attr) + else: + try: + del self._attrs[attr] + except KeyError: + raise AttributeError(attr) + + def __getitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, return the kernpair + return self._kerning[key] + else: + # return the metrics instead + return self._chars[key] + + def __setitem__(self, key, value): + if isinstance(key, tuple): + # key is a tuple, set kernpair + self._kerning[key] = value + else: + # set char metrics + self._chars[key] = value + + def __delitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, del kernpair + del self._kerning[key] + else: + # del char metrics + del self._chars[key] + + def __repr__(self): + if hasattr(self, "FullName"): + return '' % self.FullName + else: + return '' % id(self) + + +def readlines(path): + with open(path, "r", encoding="ascii") as f: + data = f.read() + return data.splitlines() + +def writelines(path, lines, sep='\r'): + with open(path, "w", encoding="ascii", newline=sep) as f: + f.write("\n".join(lines) + "\n") + + +if __name__ == "__main__": + import EasyDialogs + path = EasyDialogs.AskFileForOpen() + if path: + afm = AFM(path) + char = 'A' + if afm.has_char(char): + print(afm[char]) # print charnum, width and boundingbox + pair = ('A', 'V') + if afm.has_kernpair(pair): + print(afm[pair]) # print kerning value for pair + print(afm.Version) # various other afm entries have become attributes + print(afm.Weight) + # afm.comments() returns a list of all Comment lines found in the AFM + print(afm.comments()) + #print afm.chars() + #print afm.kernpairs() + print(afm) + afm.write(path + ".muck") diff --git a/.venv/lib/python3.9/site-packages/fontTools/agl.py b/.venv/lib/python3.9/site-packages/fontTools/agl.py new file mode 100644 index 00000000..cc286e42 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/agl.py @@ -0,0 +1,5232 @@ +# -*- coding: utf-8 -*- +# The tables below are taken from +# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/glyphlist.txt +# and +# https://github.com/adobe-type-tools/agl-aglfn/raw/4036a9ca80a62f64f9de4f7321a9a045ad0ecfd6/aglfn.txt +""" +Interface to the Adobe Glyph List + +This module exists to convert glyph names from the Adobe Glyph List +to their Unicode equivalents. Example usage: + + >>> from fontTools.agl import toUnicode + >>> toUnicode("nahiragana") + 'ãª' + +It also contains two dictionaries, ``UV2AGL`` and ``AGL2UV``, which map from +Unicode codepoints to AGL names and vice versa: + + >>> import fontTools + >>> fontTools.agl.UV2AGL[ord("?")] + 'question' + >>> fontTools.agl.AGL2UV["wcircumflex"] + 373 + +This is used by fontTools when it has to construct glyph names for a font which +doesn't include any (e.g. format 3.0 post tables). +""" + +from fontTools.misc.textTools import tostr +import re + + +_aglText = """\ +# ----------------------------------------------------------- +# Copyright 2002-2019 Adobe (http://www.adobe.com/). +# +# Redistribution and use in source and binary forms, with or +# without modification, are permitted provided that the +# following conditions are met: +# +# Redistributions of source code must retain the above +# copyright notice, this list of conditions and the following +# disclaimer. +# +# Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# Neither the name of Adobe nor the names of its contributors +# may be used to endorse or promote products derived from this +# software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------- +# Name: Adobe Glyph List +# Table version: 2.0 +# Date: September 20, 2002 +# URL: https://github.com/adobe-type-tools/agl-aglfn +# +# Format: two semicolon-delimited fields: +# (1) glyph name--upper/lowercase letters and digits +# (2) Unicode scalar value--four uppercase hexadecimal digits +# +A;0041 +AE;00C6 +AEacute;01FC +AEmacron;01E2 +AEsmall;F7E6 +Aacute;00C1 +Aacutesmall;F7E1 +Abreve;0102 +Abreveacute;1EAE +Abrevecyrillic;04D0 +Abrevedotbelow;1EB6 +Abrevegrave;1EB0 +Abrevehookabove;1EB2 +Abrevetilde;1EB4 +Acaron;01CD +Acircle;24B6 +Acircumflex;00C2 +Acircumflexacute;1EA4 +Acircumflexdotbelow;1EAC +Acircumflexgrave;1EA6 +Acircumflexhookabove;1EA8 +Acircumflexsmall;F7E2 +Acircumflextilde;1EAA +Acute;F6C9 +Acutesmall;F7B4 +Acyrillic;0410 +Adblgrave;0200 +Adieresis;00C4 +Adieresiscyrillic;04D2 +Adieresismacron;01DE +Adieresissmall;F7E4 +Adotbelow;1EA0 +Adotmacron;01E0 +Agrave;00C0 +Agravesmall;F7E0 +Ahookabove;1EA2 +Aiecyrillic;04D4 +Ainvertedbreve;0202 +Alpha;0391 +Alphatonos;0386 +Amacron;0100 +Amonospace;FF21 +Aogonek;0104 +Aring;00C5 +Aringacute;01FA +Aringbelow;1E00 +Aringsmall;F7E5 +Asmall;F761 +Atilde;00C3 +Atildesmall;F7E3 +Aybarmenian;0531 +B;0042 +Bcircle;24B7 +Bdotaccent;1E02 +Bdotbelow;1E04 +Becyrillic;0411 +Benarmenian;0532 +Beta;0392 +Bhook;0181 +Blinebelow;1E06 +Bmonospace;FF22 +Brevesmall;F6F4 +Bsmall;F762 +Btopbar;0182 +C;0043 +Caarmenian;053E +Cacute;0106 +Caron;F6CA +Caronsmall;F6F5 +Ccaron;010C +Ccedilla;00C7 +Ccedillaacute;1E08 +Ccedillasmall;F7E7 +Ccircle;24B8 +Ccircumflex;0108 +Cdot;010A +Cdotaccent;010A +Cedillasmall;F7B8 +Chaarmenian;0549 +Cheabkhasiancyrillic;04BC +Checyrillic;0427 +Chedescenderabkhasiancyrillic;04BE +Chedescendercyrillic;04B6 +Chedieresiscyrillic;04F4 +Cheharmenian;0543 +Chekhakassiancyrillic;04CB +Cheverticalstrokecyrillic;04B8 +Chi;03A7 +Chook;0187 +Circumflexsmall;F6F6 +Cmonospace;FF23 +Coarmenian;0551 +Csmall;F763 +D;0044 +DZ;01F1 +DZcaron;01C4 +Daarmenian;0534 +Dafrican;0189 +Dcaron;010E +Dcedilla;1E10 +Dcircle;24B9 +Dcircumflexbelow;1E12 +Dcroat;0110 +Ddotaccent;1E0A +Ddotbelow;1E0C +Decyrillic;0414 +Deicoptic;03EE +Delta;2206 +Deltagreek;0394 +Dhook;018A +Dieresis;F6CB +DieresisAcute;F6CC +DieresisGrave;F6CD +Dieresissmall;F7A8 +Digammagreek;03DC +Djecyrillic;0402 +Dlinebelow;1E0E +Dmonospace;FF24 +Dotaccentsmall;F6F7 +Dslash;0110 +Dsmall;F764 +Dtopbar;018B +Dz;01F2 +Dzcaron;01C5 +Dzeabkhasiancyrillic;04E0 +Dzecyrillic;0405 +Dzhecyrillic;040F +E;0045 +Eacute;00C9 +Eacutesmall;F7E9 +Ebreve;0114 +Ecaron;011A +Ecedillabreve;1E1C +Echarmenian;0535 +Ecircle;24BA +Ecircumflex;00CA +Ecircumflexacute;1EBE +Ecircumflexbelow;1E18 +Ecircumflexdotbelow;1EC6 +Ecircumflexgrave;1EC0 +Ecircumflexhookabove;1EC2 +Ecircumflexsmall;F7EA +Ecircumflextilde;1EC4 +Ecyrillic;0404 +Edblgrave;0204 +Edieresis;00CB +Edieresissmall;F7EB +Edot;0116 +Edotaccent;0116 +Edotbelow;1EB8 +Efcyrillic;0424 +Egrave;00C8 +Egravesmall;F7E8 +Eharmenian;0537 +Ehookabove;1EBA +Eightroman;2167 +Einvertedbreve;0206 +Eiotifiedcyrillic;0464 +Elcyrillic;041B +Elevenroman;216A +Emacron;0112 +Emacronacute;1E16 +Emacrongrave;1E14 +Emcyrillic;041C +Emonospace;FF25 +Encyrillic;041D +Endescendercyrillic;04A2 +Eng;014A +Enghecyrillic;04A4 +Enhookcyrillic;04C7 +Eogonek;0118 +Eopen;0190 +Epsilon;0395 +Epsilontonos;0388 +Ercyrillic;0420 +Ereversed;018E +Ereversedcyrillic;042D +Escyrillic;0421 +Esdescendercyrillic;04AA +Esh;01A9 +Esmall;F765 +Eta;0397 +Etarmenian;0538 +Etatonos;0389 +Eth;00D0 +Ethsmall;F7F0 +Etilde;1EBC +Etildebelow;1E1A +Euro;20AC +Ezh;01B7 +Ezhcaron;01EE +Ezhreversed;01B8 +F;0046 +Fcircle;24BB +Fdotaccent;1E1E +Feharmenian;0556 +Feicoptic;03E4 +Fhook;0191 +Fitacyrillic;0472 +Fiveroman;2164 +Fmonospace;FF26 +Fourroman;2163 +Fsmall;F766 +G;0047 +GBsquare;3387 +Gacute;01F4 +Gamma;0393 +Gammaafrican;0194 +Gangiacoptic;03EA +Gbreve;011E +Gcaron;01E6 +Gcedilla;0122 +Gcircle;24BC +Gcircumflex;011C +Gcommaaccent;0122 +Gdot;0120 +Gdotaccent;0120 +Gecyrillic;0413 +Ghadarmenian;0542 +Ghemiddlehookcyrillic;0494 +Ghestrokecyrillic;0492 +Gheupturncyrillic;0490 +Ghook;0193 +Gimarmenian;0533 +Gjecyrillic;0403 +Gmacron;1E20 +Gmonospace;FF27 +Grave;F6CE +Gravesmall;F760 +Gsmall;F767 +Gsmallhook;029B +Gstroke;01E4 +H;0048 +H18533;25CF +H18543;25AA +H18551;25AB +H22073;25A1 +HPsquare;33CB +Haabkhasiancyrillic;04A8 +Hadescendercyrillic;04B2 +Hardsigncyrillic;042A +Hbar;0126 +Hbrevebelow;1E2A +Hcedilla;1E28 +Hcircle;24BD +Hcircumflex;0124 +Hdieresis;1E26 +Hdotaccent;1E22 +Hdotbelow;1E24 +Hmonospace;FF28 +Hoarmenian;0540 +Horicoptic;03E8 +Hsmall;F768 +Hungarumlaut;F6CF +Hungarumlautsmall;F6F8 +Hzsquare;3390 +I;0049 +IAcyrillic;042F +IJ;0132 +IUcyrillic;042E +Iacute;00CD +Iacutesmall;F7ED +Ibreve;012C +Icaron;01CF +Icircle;24BE +Icircumflex;00CE +Icircumflexsmall;F7EE +Icyrillic;0406 +Idblgrave;0208 +Idieresis;00CF +Idieresisacute;1E2E +Idieresiscyrillic;04E4 +Idieresissmall;F7EF +Idot;0130 +Idotaccent;0130 +Idotbelow;1ECA +Iebrevecyrillic;04D6 +Iecyrillic;0415 +Ifraktur;2111 +Igrave;00CC +Igravesmall;F7EC +Ihookabove;1EC8 +Iicyrillic;0418 +Iinvertedbreve;020A +Iishortcyrillic;0419 +Imacron;012A +Imacroncyrillic;04E2 +Imonospace;FF29 +Iniarmenian;053B +Iocyrillic;0401 +Iogonek;012E +Iota;0399 +Iotaafrican;0196 +Iotadieresis;03AA +Iotatonos;038A +Ismall;F769 +Istroke;0197 +Itilde;0128 +Itildebelow;1E2C +Izhitsacyrillic;0474 +Izhitsadblgravecyrillic;0476 +J;004A +Jaarmenian;0541 +Jcircle;24BF +Jcircumflex;0134 +Jecyrillic;0408 +Jheharmenian;054B +Jmonospace;FF2A +Jsmall;F76A +K;004B +KBsquare;3385 +KKsquare;33CD +Kabashkircyrillic;04A0 +Kacute;1E30 +Kacyrillic;041A +Kadescendercyrillic;049A +Kahookcyrillic;04C3 +Kappa;039A +Kastrokecyrillic;049E +Kaverticalstrokecyrillic;049C +Kcaron;01E8 +Kcedilla;0136 +Kcircle;24C0 +Kcommaaccent;0136 +Kdotbelow;1E32 +Keharmenian;0554 +Kenarmenian;053F +Khacyrillic;0425 +Kheicoptic;03E6 +Khook;0198 +Kjecyrillic;040C +Klinebelow;1E34 +Kmonospace;FF2B +Koppacyrillic;0480 +Koppagreek;03DE +Ksicyrillic;046E +Ksmall;F76B +L;004C +LJ;01C7 +LL;F6BF +Lacute;0139 +Lambda;039B +Lcaron;013D +Lcedilla;013B +Lcircle;24C1 +Lcircumflexbelow;1E3C +Lcommaaccent;013B +Ldot;013F +Ldotaccent;013F +Ldotbelow;1E36 +Ldotbelowmacron;1E38 +Liwnarmenian;053C +Lj;01C8 +Ljecyrillic;0409 +Llinebelow;1E3A +Lmonospace;FF2C +Lslash;0141 +Lslashsmall;F6F9 +Lsmall;F76C +M;004D +MBsquare;3386 +Macron;F6D0 +Macronsmall;F7AF +Macute;1E3E +Mcircle;24C2 +Mdotaccent;1E40 +Mdotbelow;1E42 +Menarmenian;0544 +Mmonospace;FF2D +Msmall;F76D +Mturned;019C +Mu;039C +N;004E +NJ;01CA +Nacute;0143 +Ncaron;0147 +Ncedilla;0145 +Ncircle;24C3 +Ncircumflexbelow;1E4A +Ncommaaccent;0145 +Ndotaccent;1E44 +Ndotbelow;1E46 +Nhookleft;019D +Nineroman;2168 +Nj;01CB +Njecyrillic;040A +Nlinebelow;1E48 +Nmonospace;FF2E +Nowarmenian;0546 +Nsmall;F76E +Ntilde;00D1 +Ntildesmall;F7F1 +Nu;039D +O;004F +OE;0152 +OEsmall;F6FA +Oacute;00D3 +Oacutesmall;F7F3 +Obarredcyrillic;04E8 +Obarreddieresiscyrillic;04EA +Obreve;014E +Ocaron;01D1 +Ocenteredtilde;019F +Ocircle;24C4 +Ocircumflex;00D4 +Ocircumflexacute;1ED0 +Ocircumflexdotbelow;1ED8 +Ocircumflexgrave;1ED2 +Ocircumflexhookabove;1ED4 +Ocircumflexsmall;F7F4 +Ocircumflextilde;1ED6 +Ocyrillic;041E +Odblacute;0150 +Odblgrave;020C +Odieresis;00D6 +Odieresiscyrillic;04E6 +Odieresissmall;F7F6 +Odotbelow;1ECC +Ogoneksmall;F6FB +Ograve;00D2 +Ogravesmall;F7F2 +Oharmenian;0555 +Ohm;2126 +Ohookabove;1ECE +Ohorn;01A0 +Ohornacute;1EDA +Ohorndotbelow;1EE2 +Ohorngrave;1EDC +Ohornhookabove;1EDE +Ohorntilde;1EE0 +Ohungarumlaut;0150 +Oi;01A2 +Oinvertedbreve;020E +Omacron;014C +Omacronacute;1E52 +Omacrongrave;1E50 +Omega;2126 +Omegacyrillic;0460 +Omegagreek;03A9 +Omegaroundcyrillic;047A +Omegatitlocyrillic;047C +Omegatonos;038F +Omicron;039F +Omicrontonos;038C +Omonospace;FF2F +Oneroman;2160 +Oogonek;01EA +Oogonekmacron;01EC +Oopen;0186 +Oslash;00D8 +Oslashacute;01FE +Oslashsmall;F7F8 +Osmall;F76F +Ostrokeacute;01FE +Otcyrillic;047E +Otilde;00D5 +Otildeacute;1E4C +Otildedieresis;1E4E +Otildesmall;F7F5 +P;0050 +Pacute;1E54 +Pcircle;24C5 +Pdotaccent;1E56 +Pecyrillic;041F +Peharmenian;054A +Pemiddlehookcyrillic;04A6 +Phi;03A6 +Phook;01A4 +Pi;03A0 +Piwrarmenian;0553 +Pmonospace;FF30 +Psi;03A8 +Psicyrillic;0470 +Psmall;F770 +Q;0051 +Qcircle;24C6 +Qmonospace;FF31 +Qsmall;F771 +R;0052 +Raarmenian;054C +Racute;0154 +Rcaron;0158 +Rcedilla;0156 +Rcircle;24C7 +Rcommaaccent;0156 +Rdblgrave;0210 +Rdotaccent;1E58 +Rdotbelow;1E5A +Rdotbelowmacron;1E5C +Reharmenian;0550 +Rfraktur;211C +Rho;03A1 +Ringsmall;F6FC +Rinvertedbreve;0212 +Rlinebelow;1E5E +Rmonospace;FF32 +Rsmall;F772 +Rsmallinverted;0281 +Rsmallinvertedsuperior;02B6 +S;0053 +SF010000;250C +SF020000;2514 +SF030000;2510 +SF040000;2518 +SF050000;253C +SF060000;252C +SF070000;2534 +SF080000;251C +SF090000;2524 +SF100000;2500 +SF110000;2502 +SF190000;2561 +SF200000;2562 +SF210000;2556 +SF220000;2555 +SF230000;2563 +SF240000;2551 +SF250000;2557 +SF260000;255D +SF270000;255C +SF280000;255B +SF360000;255E +SF370000;255F +SF380000;255A +SF390000;2554 +SF400000;2569 +SF410000;2566 +SF420000;2560 +SF430000;2550 +SF440000;256C +SF450000;2567 +SF460000;2568 +SF470000;2564 +SF480000;2565 +SF490000;2559 +SF500000;2558 +SF510000;2552 +SF520000;2553 +SF530000;256B +SF540000;256A +Sacute;015A +Sacutedotaccent;1E64 +Sampigreek;03E0 +Scaron;0160 +Scarondotaccent;1E66 +Scaronsmall;F6FD +Scedilla;015E +Schwa;018F +Schwacyrillic;04D8 +Schwadieresiscyrillic;04DA +Scircle;24C8 +Scircumflex;015C +Scommaaccent;0218 +Sdotaccent;1E60 +Sdotbelow;1E62 +Sdotbelowdotaccent;1E68 +Seharmenian;054D +Sevenroman;2166 +Shaarmenian;0547 +Shacyrillic;0428 +Shchacyrillic;0429 +Sheicoptic;03E2 +Shhacyrillic;04BA +Shimacoptic;03EC +Sigma;03A3 +Sixroman;2165 +Smonospace;FF33 +Softsigncyrillic;042C +Ssmall;F773 +Stigmagreek;03DA +T;0054 +Tau;03A4 +Tbar;0166 +Tcaron;0164 +Tcedilla;0162 +Tcircle;24C9 +Tcircumflexbelow;1E70 +Tcommaaccent;0162 +Tdotaccent;1E6A +Tdotbelow;1E6C +Tecyrillic;0422 +Tedescendercyrillic;04AC +Tenroman;2169 +Tetsecyrillic;04B4 +Theta;0398 +Thook;01AC +Thorn;00DE +Thornsmall;F7FE +Threeroman;2162 +Tildesmall;F6FE +Tiwnarmenian;054F +Tlinebelow;1E6E +Tmonospace;FF34 +Toarmenian;0539 +Tonefive;01BC +Tonesix;0184 +Tonetwo;01A7 +Tretroflexhook;01AE +Tsecyrillic;0426 +Tshecyrillic;040B +Tsmall;F774 +Twelveroman;216B +Tworoman;2161 +U;0055 +Uacute;00DA +Uacutesmall;F7FA +Ubreve;016C +Ucaron;01D3 +Ucircle;24CA +Ucircumflex;00DB +Ucircumflexbelow;1E76 +Ucircumflexsmall;F7FB +Ucyrillic;0423 +Udblacute;0170 +Udblgrave;0214 +Udieresis;00DC +Udieresisacute;01D7 +Udieresisbelow;1E72 +Udieresiscaron;01D9 +Udieresiscyrillic;04F0 +Udieresisgrave;01DB +Udieresismacron;01D5 +Udieresissmall;F7FC +Udotbelow;1EE4 +Ugrave;00D9 +Ugravesmall;F7F9 +Uhookabove;1EE6 +Uhorn;01AF +Uhornacute;1EE8 +Uhorndotbelow;1EF0 +Uhorngrave;1EEA +Uhornhookabove;1EEC +Uhorntilde;1EEE +Uhungarumlaut;0170 +Uhungarumlautcyrillic;04F2 +Uinvertedbreve;0216 +Ukcyrillic;0478 +Umacron;016A +Umacroncyrillic;04EE +Umacrondieresis;1E7A +Umonospace;FF35 +Uogonek;0172 +Upsilon;03A5 +Upsilon1;03D2 +Upsilonacutehooksymbolgreek;03D3 +Upsilonafrican;01B1 +Upsilondieresis;03AB +Upsilondieresishooksymbolgreek;03D4 +Upsilonhooksymbol;03D2 +Upsilontonos;038E +Uring;016E +Ushortcyrillic;040E +Usmall;F775 +Ustraightcyrillic;04AE +Ustraightstrokecyrillic;04B0 +Utilde;0168 +Utildeacute;1E78 +Utildebelow;1E74 +V;0056 +Vcircle;24CB +Vdotbelow;1E7E +Vecyrillic;0412 +Vewarmenian;054E +Vhook;01B2 +Vmonospace;FF36 +Voarmenian;0548 +Vsmall;F776 +Vtilde;1E7C +W;0057 +Wacute;1E82 +Wcircle;24CC +Wcircumflex;0174 +Wdieresis;1E84 +Wdotaccent;1E86 +Wdotbelow;1E88 +Wgrave;1E80 +Wmonospace;FF37 +Wsmall;F777 +X;0058 +Xcircle;24CD +Xdieresis;1E8C +Xdotaccent;1E8A +Xeharmenian;053D +Xi;039E +Xmonospace;FF38 +Xsmall;F778 +Y;0059 +Yacute;00DD +Yacutesmall;F7FD +Yatcyrillic;0462 +Ycircle;24CE +Ycircumflex;0176 +Ydieresis;0178 +Ydieresissmall;F7FF +Ydotaccent;1E8E +Ydotbelow;1EF4 +Yericyrillic;042B +Yerudieresiscyrillic;04F8 +Ygrave;1EF2 +Yhook;01B3 +Yhookabove;1EF6 +Yiarmenian;0545 +Yicyrillic;0407 +Yiwnarmenian;0552 +Ymonospace;FF39 +Ysmall;F779 +Ytilde;1EF8 +Yusbigcyrillic;046A +Yusbigiotifiedcyrillic;046C +Yuslittlecyrillic;0466 +Yuslittleiotifiedcyrillic;0468 +Z;005A +Zaarmenian;0536 +Zacute;0179 +Zcaron;017D +Zcaronsmall;F6FF +Zcircle;24CF +Zcircumflex;1E90 +Zdot;017B +Zdotaccent;017B +Zdotbelow;1E92 +Zecyrillic;0417 +Zedescendercyrillic;0498 +Zedieresiscyrillic;04DE +Zeta;0396 +Zhearmenian;053A +Zhebrevecyrillic;04C1 +Zhecyrillic;0416 +Zhedescendercyrillic;0496 +Zhedieresiscyrillic;04DC +Zlinebelow;1E94 +Zmonospace;FF3A +Zsmall;F77A +Zstroke;01B5 +a;0061 +aabengali;0986 +aacute;00E1 +aadeva;0906 +aagujarati;0A86 +aagurmukhi;0A06 +aamatragurmukhi;0A3E +aarusquare;3303 +aavowelsignbengali;09BE +aavowelsigndeva;093E +aavowelsigngujarati;0ABE +abbreviationmarkarmenian;055F +abbreviationsigndeva;0970 +abengali;0985 +abopomofo;311A +abreve;0103 +abreveacute;1EAF +abrevecyrillic;04D1 +abrevedotbelow;1EB7 +abrevegrave;1EB1 +abrevehookabove;1EB3 +abrevetilde;1EB5 +acaron;01CE +acircle;24D0 +acircumflex;00E2 +acircumflexacute;1EA5 +acircumflexdotbelow;1EAD +acircumflexgrave;1EA7 +acircumflexhookabove;1EA9 +acircumflextilde;1EAB +acute;00B4 +acutebelowcmb;0317 +acutecmb;0301 +acutecomb;0301 +acutedeva;0954 +acutelowmod;02CF +acutetonecmb;0341 +acyrillic;0430 +adblgrave;0201 +addakgurmukhi;0A71 +adeva;0905 +adieresis;00E4 +adieresiscyrillic;04D3 +adieresismacron;01DF +adotbelow;1EA1 +adotmacron;01E1 +ae;00E6 +aeacute;01FD +aekorean;3150 +aemacron;01E3 +afii00208;2015 +afii08941;20A4 +afii10017;0410 +afii10018;0411 +afii10019;0412 +afii10020;0413 +afii10021;0414 +afii10022;0415 +afii10023;0401 +afii10024;0416 +afii10025;0417 +afii10026;0418 +afii10027;0419 +afii10028;041A +afii10029;041B +afii10030;041C +afii10031;041D +afii10032;041E +afii10033;041F +afii10034;0420 +afii10035;0421 +afii10036;0422 +afii10037;0423 +afii10038;0424 +afii10039;0425 +afii10040;0426 +afii10041;0427 +afii10042;0428 +afii10043;0429 +afii10044;042A +afii10045;042B +afii10046;042C +afii10047;042D +afii10048;042E +afii10049;042F +afii10050;0490 +afii10051;0402 +afii10052;0403 +afii10053;0404 +afii10054;0405 +afii10055;0406 +afii10056;0407 +afii10057;0408 +afii10058;0409 +afii10059;040A +afii10060;040B +afii10061;040C +afii10062;040E +afii10063;F6C4 +afii10064;F6C5 +afii10065;0430 +afii10066;0431 +afii10067;0432 +afii10068;0433 +afii10069;0434 +afii10070;0435 +afii10071;0451 +afii10072;0436 +afii10073;0437 +afii10074;0438 +afii10075;0439 +afii10076;043A +afii10077;043B +afii10078;043C +afii10079;043D +afii10080;043E +afii10081;043F +afii10082;0440 +afii10083;0441 +afii10084;0442 +afii10085;0443 +afii10086;0444 +afii10087;0445 +afii10088;0446 +afii10089;0447 +afii10090;0448 +afii10091;0449 +afii10092;044A +afii10093;044B +afii10094;044C +afii10095;044D +afii10096;044E +afii10097;044F +afii10098;0491 +afii10099;0452 +afii10100;0453 +afii10101;0454 +afii10102;0455 +afii10103;0456 +afii10104;0457 +afii10105;0458 +afii10106;0459 +afii10107;045A +afii10108;045B +afii10109;045C +afii10110;045E +afii10145;040F +afii10146;0462 +afii10147;0472 +afii10148;0474 +afii10192;F6C6 +afii10193;045F +afii10194;0463 +afii10195;0473 +afii10196;0475 +afii10831;F6C7 +afii10832;F6C8 +afii10846;04D9 +afii299;200E +afii300;200F +afii301;200D +afii57381;066A +afii57388;060C +afii57392;0660 +afii57393;0661 +afii57394;0662 +afii57395;0663 +afii57396;0664 +afii57397;0665 +afii57398;0666 +afii57399;0667 +afii57400;0668 +afii57401;0669 +afii57403;061B +afii57407;061F +afii57409;0621 +afii57410;0622 +afii57411;0623 +afii57412;0624 +afii57413;0625 +afii57414;0626 +afii57415;0627 +afii57416;0628 +afii57417;0629 +afii57418;062A +afii57419;062B +afii57420;062C +afii57421;062D +afii57422;062E +afii57423;062F +afii57424;0630 +afii57425;0631 +afii57426;0632 +afii57427;0633 +afii57428;0634 +afii57429;0635 +afii57430;0636 +afii57431;0637 +afii57432;0638 +afii57433;0639 +afii57434;063A +afii57440;0640 +afii57441;0641 +afii57442;0642 +afii57443;0643 +afii57444;0644 +afii57445;0645 +afii57446;0646 +afii57448;0648 +afii57449;0649 +afii57450;064A +afii57451;064B +afii57452;064C +afii57453;064D +afii57454;064E +afii57455;064F +afii57456;0650 +afii57457;0651 +afii57458;0652 +afii57470;0647 +afii57505;06A4 +afii57506;067E +afii57507;0686 +afii57508;0698 +afii57509;06AF +afii57511;0679 +afii57512;0688 +afii57513;0691 +afii57514;06BA +afii57519;06D2 +afii57534;06D5 +afii57636;20AA +afii57645;05BE +afii57658;05C3 +afii57664;05D0 +afii57665;05D1 +afii57666;05D2 +afii57667;05D3 +afii57668;05D4 +afii57669;05D5 +afii57670;05D6 +afii57671;05D7 +afii57672;05D8 +afii57673;05D9 +afii57674;05DA +afii57675;05DB +afii57676;05DC +afii57677;05DD +afii57678;05DE +afii57679;05DF +afii57680;05E0 +afii57681;05E1 +afii57682;05E2 +afii57683;05E3 +afii57684;05E4 +afii57685;05E5 +afii57686;05E6 +afii57687;05E7 +afii57688;05E8 +afii57689;05E9 +afii57690;05EA +afii57694;FB2A +afii57695;FB2B +afii57700;FB4B +afii57705;FB1F +afii57716;05F0 +afii57717;05F1 +afii57718;05F2 +afii57723;FB35 +afii57793;05B4 +afii57794;05B5 +afii57795;05B6 +afii57796;05BB +afii57797;05B8 +afii57798;05B7 +afii57799;05B0 +afii57800;05B2 +afii57801;05B1 +afii57802;05B3 +afii57803;05C2 +afii57804;05C1 +afii57806;05B9 +afii57807;05BC +afii57839;05BD +afii57841;05BF +afii57842;05C0 +afii57929;02BC +afii61248;2105 +afii61289;2113 +afii61352;2116 +afii61573;202C +afii61574;202D +afii61575;202E +afii61664;200C +afii63167;066D +afii64937;02BD +agrave;00E0 +agujarati;0A85 +agurmukhi;0A05 +ahiragana;3042 +ahookabove;1EA3 +aibengali;0990 +aibopomofo;311E +aideva;0910 +aiecyrillic;04D5 +aigujarati;0A90 +aigurmukhi;0A10 +aimatragurmukhi;0A48 +ainarabic;0639 +ainfinalarabic;FECA +aininitialarabic;FECB +ainmedialarabic;FECC +ainvertedbreve;0203 +aivowelsignbengali;09C8 +aivowelsigndeva;0948 +aivowelsigngujarati;0AC8 +akatakana;30A2 +akatakanahalfwidth;FF71 +akorean;314F +alef;05D0 +alefarabic;0627 +alefdageshhebrew;FB30 +aleffinalarabic;FE8E +alefhamzaabovearabic;0623 +alefhamzaabovefinalarabic;FE84 +alefhamzabelowarabic;0625 +alefhamzabelowfinalarabic;FE88 +alefhebrew;05D0 +aleflamedhebrew;FB4F +alefmaddaabovearabic;0622 +alefmaddaabovefinalarabic;FE82 +alefmaksuraarabic;0649 +alefmaksurafinalarabic;FEF0 +alefmaksurainitialarabic;FEF3 +alefmaksuramedialarabic;FEF4 +alefpatahhebrew;FB2E +alefqamatshebrew;FB2F +aleph;2135 +allequal;224C +alpha;03B1 +alphatonos;03AC +amacron;0101 +amonospace;FF41 +ampersand;0026 +ampersandmonospace;FF06 +ampersandsmall;F726 +amsquare;33C2 +anbopomofo;3122 +angbopomofo;3124 +angkhankhuthai;0E5A +angle;2220 +anglebracketleft;3008 +anglebracketleftvertical;FE3F +anglebracketright;3009 +anglebracketrightvertical;FE40 +angleleft;2329 +angleright;232A +angstrom;212B +anoteleia;0387 +anudattadeva;0952 +anusvarabengali;0982 +anusvaradeva;0902 +anusvaragujarati;0A82 +aogonek;0105 +apaatosquare;3300 +aparen;249C +apostrophearmenian;055A +apostrophemod;02BC +apple;F8FF +approaches;2250 +approxequal;2248 +approxequalorimage;2252 +approximatelyequal;2245 +araeaekorean;318E +araeakorean;318D +arc;2312 +arighthalfring;1E9A +aring;00E5 +aringacute;01FB +aringbelow;1E01 +arrowboth;2194 +arrowdashdown;21E3 +arrowdashleft;21E0 +arrowdashright;21E2 +arrowdashup;21E1 +arrowdblboth;21D4 +arrowdbldown;21D3 +arrowdblleft;21D0 +arrowdblright;21D2 +arrowdblup;21D1 +arrowdown;2193 +arrowdownleft;2199 +arrowdownright;2198 +arrowdownwhite;21E9 +arrowheaddownmod;02C5 +arrowheadleftmod;02C2 +arrowheadrightmod;02C3 +arrowheadupmod;02C4 +arrowhorizex;F8E7 +arrowleft;2190 +arrowleftdbl;21D0 +arrowleftdblstroke;21CD +arrowleftoverright;21C6 +arrowleftwhite;21E6 +arrowright;2192 +arrowrightdblstroke;21CF +arrowrightheavy;279E +arrowrightoverleft;21C4 +arrowrightwhite;21E8 +arrowtableft;21E4 +arrowtabright;21E5 +arrowup;2191 +arrowupdn;2195 +arrowupdnbse;21A8 +arrowupdownbase;21A8 +arrowupleft;2196 +arrowupleftofdown;21C5 +arrowupright;2197 +arrowupwhite;21E7 +arrowvertex;F8E6 +asciicircum;005E +asciicircummonospace;FF3E +asciitilde;007E +asciitildemonospace;FF5E +ascript;0251 +ascriptturned;0252 +asmallhiragana;3041 +asmallkatakana;30A1 +asmallkatakanahalfwidth;FF67 +asterisk;002A +asteriskaltonearabic;066D +asteriskarabic;066D +asteriskmath;2217 +asteriskmonospace;FF0A +asterisksmall;FE61 +asterism;2042 +asuperior;F6E9 +asymptoticallyequal;2243 +at;0040 +atilde;00E3 +atmonospace;FF20 +atsmall;FE6B +aturned;0250 +aubengali;0994 +aubopomofo;3120 +audeva;0914 +augujarati;0A94 +augurmukhi;0A14 +aulengthmarkbengali;09D7 +aumatragurmukhi;0A4C +auvowelsignbengali;09CC +auvowelsigndeva;094C +auvowelsigngujarati;0ACC +avagrahadeva;093D +aybarmenian;0561 +ayin;05E2 +ayinaltonehebrew;FB20 +ayinhebrew;05E2 +b;0062 +babengali;09AC +backslash;005C +backslashmonospace;FF3C +badeva;092C +bagujarati;0AAC +bagurmukhi;0A2C +bahiragana;3070 +bahtthai;0E3F +bakatakana;30D0 +bar;007C +barmonospace;FF5C +bbopomofo;3105 +bcircle;24D1 +bdotaccent;1E03 +bdotbelow;1E05 +beamedsixteenthnotes;266C +because;2235 +becyrillic;0431 +beharabic;0628 +behfinalarabic;FE90 +behinitialarabic;FE91 +behiragana;3079 +behmedialarabic;FE92 +behmeeminitialarabic;FC9F +behmeemisolatedarabic;FC08 +behnoonfinalarabic;FC6D +bekatakana;30D9 +benarmenian;0562 +bet;05D1 +beta;03B2 +betasymbolgreek;03D0 +betdagesh;FB31 +betdageshhebrew;FB31 +bethebrew;05D1 +betrafehebrew;FB4C +bhabengali;09AD +bhadeva;092D +bhagujarati;0AAD +bhagurmukhi;0A2D +bhook;0253 +bihiragana;3073 +bikatakana;30D3 +bilabialclick;0298 +bindigurmukhi;0A02 +birusquare;3331 +blackcircle;25CF +blackdiamond;25C6 +blackdownpointingtriangle;25BC +blackleftpointingpointer;25C4 +blackleftpointingtriangle;25C0 +blacklenticularbracketleft;3010 +blacklenticularbracketleftvertical;FE3B +blacklenticularbracketright;3011 +blacklenticularbracketrightvertical;FE3C +blacklowerlefttriangle;25E3 +blacklowerrighttriangle;25E2 +blackrectangle;25AC +blackrightpointingpointer;25BA +blackrightpointingtriangle;25B6 +blacksmallsquare;25AA +blacksmilingface;263B +blacksquare;25A0 +blackstar;2605 +blackupperlefttriangle;25E4 +blackupperrighttriangle;25E5 +blackuppointingsmalltriangle;25B4 +blackuppointingtriangle;25B2 +blank;2423 +blinebelow;1E07 +block;2588 +bmonospace;FF42 +bobaimaithai;0E1A +bohiragana;307C +bokatakana;30DC +bparen;249D +bqsquare;33C3 +braceex;F8F4 +braceleft;007B +braceleftbt;F8F3 +braceleftmid;F8F2 +braceleftmonospace;FF5B +braceleftsmall;FE5B +bracelefttp;F8F1 +braceleftvertical;FE37 +braceright;007D +bracerightbt;F8FE +bracerightmid;F8FD +bracerightmonospace;FF5D +bracerightsmall;FE5C +bracerighttp;F8FC +bracerightvertical;FE38 +bracketleft;005B +bracketleftbt;F8F0 +bracketleftex;F8EF +bracketleftmonospace;FF3B +bracketlefttp;F8EE +bracketright;005D +bracketrightbt;F8FB +bracketrightex;F8FA +bracketrightmonospace;FF3D +bracketrighttp;F8F9 +breve;02D8 +brevebelowcmb;032E +brevecmb;0306 +breveinvertedbelowcmb;032F +breveinvertedcmb;0311 +breveinverteddoublecmb;0361 +bridgebelowcmb;032A +bridgeinvertedbelowcmb;033A +brokenbar;00A6 +bstroke;0180 +bsuperior;F6EA +btopbar;0183 +buhiragana;3076 +bukatakana;30D6 +bullet;2022 +bulletinverse;25D8 +bulletoperator;2219 +bullseye;25CE +c;0063 +caarmenian;056E +cabengali;099A +cacute;0107 +cadeva;091A +cagujarati;0A9A +cagurmukhi;0A1A +calsquare;3388 +candrabindubengali;0981 +candrabinducmb;0310 +candrabindudeva;0901 +candrabindugujarati;0A81 +capslock;21EA +careof;2105 +caron;02C7 +caronbelowcmb;032C +caroncmb;030C +carriagereturn;21B5 +cbopomofo;3118 +ccaron;010D +ccedilla;00E7 +ccedillaacute;1E09 +ccircle;24D2 +ccircumflex;0109 +ccurl;0255 +cdot;010B +cdotaccent;010B +cdsquare;33C5 +cedilla;00B8 +cedillacmb;0327 +cent;00A2 +centigrade;2103 +centinferior;F6DF +centmonospace;FFE0 +centoldstyle;F7A2 +centsuperior;F6E0 +chaarmenian;0579 +chabengali;099B +chadeva;091B +chagujarati;0A9B +chagurmukhi;0A1B +chbopomofo;3114 +cheabkhasiancyrillic;04BD +checkmark;2713 +checyrillic;0447 +chedescenderabkhasiancyrillic;04BF +chedescendercyrillic;04B7 +chedieresiscyrillic;04F5 +cheharmenian;0573 +chekhakassiancyrillic;04CC +cheverticalstrokecyrillic;04B9 +chi;03C7 +chieuchacirclekorean;3277 +chieuchaparenkorean;3217 +chieuchcirclekorean;3269 +chieuchkorean;314A +chieuchparenkorean;3209 +chochangthai;0E0A +chochanthai;0E08 +chochingthai;0E09 +chochoethai;0E0C +chook;0188 +cieucacirclekorean;3276 +cieucaparenkorean;3216 +cieuccirclekorean;3268 +cieuckorean;3148 +cieucparenkorean;3208 +cieucuparenkorean;321C +circle;25CB +circlemultiply;2297 +circleot;2299 +circleplus;2295 +circlepostalmark;3036 +circlewithlefthalfblack;25D0 +circlewithrighthalfblack;25D1 +circumflex;02C6 +circumflexbelowcmb;032D +circumflexcmb;0302 +clear;2327 +clickalveolar;01C2 +clickdental;01C0 +clicklateral;01C1 +clickretroflex;01C3 +club;2663 +clubsuitblack;2663 +clubsuitwhite;2667 +cmcubedsquare;33A4 +cmonospace;FF43 +cmsquaredsquare;33A0 +coarmenian;0581 +colon;003A +colonmonetary;20A1 +colonmonospace;FF1A +colonsign;20A1 +colonsmall;FE55 +colontriangularhalfmod;02D1 +colontriangularmod;02D0 +comma;002C +commaabovecmb;0313 +commaaboverightcmb;0315 +commaaccent;F6C3 +commaarabic;060C +commaarmenian;055D +commainferior;F6E1 +commamonospace;FF0C +commareversedabovecmb;0314 +commareversedmod;02BD +commasmall;FE50 +commasuperior;F6E2 +commaturnedabovecmb;0312 +commaturnedmod;02BB +compass;263C +congruent;2245 +contourintegral;222E +control;2303 +controlACK;0006 +controlBEL;0007 +controlBS;0008 +controlCAN;0018 +controlCR;000D +controlDC1;0011 +controlDC2;0012 +controlDC3;0013 +controlDC4;0014 +controlDEL;007F +controlDLE;0010 +controlEM;0019 +controlENQ;0005 +controlEOT;0004 +controlESC;001B +controlETB;0017 +controlETX;0003 +controlFF;000C +controlFS;001C +controlGS;001D +controlHT;0009 +controlLF;000A +controlNAK;0015 +controlRS;001E +controlSI;000F +controlSO;000E +controlSOT;0002 +controlSTX;0001 +controlSUB;001A +controlSYN;0016 +controlUS;001F +controlVT;000B +copyright;00A9 +copyrightsans;F8E9 +copyrightserif;F6D9 +cornerbracketleft;300C +cornerbracketlefthalfwidth;FF62 +cornerbracketleftvertical;FE41 +cornerbracketright;300D +cornerbracketrighthalfwidth;FF63 +cornerbracketrightvertical;FE42 +corporationsquare;337F +cosquare;33C7 +coverkgsquare;33C6 +cparen;249E +cruzeiro;20A2 +cstretched;0297 +curlyand;22CF +curlyor;22CE +currency;00A4 +cyrBreve;F6D1 +cyrFlex;F6D2 +cyrbreve;F6D4 +cyrflex;F6D5 +d;0064 +daarmenian;0564 +dabengali;09A6 +dadarabic;0636 +dadeva;0926 +dadfinalarabic;FEBE +dadinitialarabic;FEBF +dadmedialarabic;FEC0 +dagesh;05BC +dageshhebrew;05BC +dagger;2020 +daggerdbl;2021 +dagujarati;0AA6 +dagurmukhi;0A26 +dahiragana;3060 +dakatakana;30C0 +dalarabic;062F +dalet;05D3 +daletdagesh;FB33 +daletdageshhebrew;FB33 +dalethatafpatah;05D3 05B2 +dalethatafpatahhebrew;05D3 05B2 +dalethatafsegol;05D3 05B1 +dalethatafsegolhebrew;05D3 05B1 +dalethebrew;05D3 +dalethiriq;05D3 05B4 +dalethiriqhebrew;05D3 05B4 +daletholam;05D3 05B9 +daletholamhebrew;05D3 05B9 +daletpatah;05D3 05B7 +daletpatahhebrew;05D3 05B7 +daletqamats;05D3 05B8 +daletqamatshebrew;05D3 05B8 +daletqubuts;05D3 05BB +daletqubutshebrew;05D3 05BB +daletsegol;05D3 05B6 +daletsegolhebrew;05D3 05B6 +daletsheva;05D3 05B0 +daletshevahebrew;05D3 05B0 +dalettsere;05D3 05B5 +dalettserehebrew;05D3 05B5 +dalfinalarabic;FEAA +dammaarabic;064F +dammalowarabic;064F +dammatanaltonearabic;064C +dammatanarabic;064C +danda;0964 +dargahebrew;05A7 +dargalefthebrew;05A7 +dasiapneumatacyrilliccmb;0485 +dblGrave;F6D3 +dblanglebracketleft;300A +dblanglebracketleftvertical;FE3D +dblanglebracketright;300B +dblanglebracketrightvertical;FE3E +dblarchinvertedbelowcmb;032B +dblarrowleft;21D4 +dblarrowright;21D2 +dbldanda;0965 +dblgrave;F6D6 +dblgravecmb;030F +dblintegral;222C +dbllowline;2017 +dbllowlinecmb;0333 +dbloverlinecmb;033F +dblprimemod;02BA +dblverticalbar;2016 +dblverticallineabovecmb;030E +dbopomofo;3109 +dbsquare;33C8 +dcaron;010F +dcedilla;1E11 +dcircle;24D3 +dcircumflexbelow;1E13 +dcroat;0111 +ddabengali;09A1 +ddadeva;0921 +ddagujarati;0AA1 +ddagurmukhi;0A21 +ddalarabic;0688 +ddalfinalarabic;FB89 +dddhadeva;095C +ddhabengali;09A2 +ddhadeva;0922 +ddhagujarati;0AA2 +ddhagurmukhi;0A22 +ddotaccent;1E0B +ddotbelow;1E0D +decimalseparatorarabic;066B +decimalseparatorpersian;066B +decyrillic;0434 +degree;00B0 +dehihebrew;05AD +dehiragana;3067 +deicoptic;03EF +dekatakana;30C7 +deleteleft;232B +deleteright;2326 +delta;03B4 +deltaturned;018D +denominatorminusonenumeratorbengali;09F8 +dezh;02A4 +dhabengali;09A7 +dhadeva;0927 +dhagujarati;0AA7 +dhagurmukhi;0A27 +dhook;0257 +dialytikatonos;0385 +dialytikatonoscmb;0344 +diamond;2666 +diamondsuitwhite;2662 +dieresis;00A8 +dieresisacute;F6D7 +dieresisbelowcmb;0324 +dieresiscmb;0308 +dieresisgrave;F6D8 +dieresistonos;0385 +dihiragana;3062 +dikatakana;30C2 +dittomark;3003 +divide;00F7 +divides;2223 +divisionslash;2215 +djecyrillic;0452 +dkshade;2593 +dlinebelow;1E0F +dlsquare;3397 +dmacron;0111 +dmonospace;FF44 +dnblock;2584 +dochadathai;0E0E +dodekthai;0E14 +dohiragana;3069 +dokatakana;30C9 +dollar;0024 +dollarinferior;F6E3 +dollarmonospace;FF04 +dollaroldstyle;F724 +dollarsmall;FE69 +dollarsuperior;F6E4 +dong;20AB +dorusquare;3326 +dotaccent;02D9 +dotaccentcmb;0307 +dotbelowcmb;0323 +dotbelowcomb;0323 +dotkatakana;30FB +dotlessi;0131 +dotlessj;F6BE +dotlessjstrokehook;0284 +dotmath;22C5 +dottedcircle;25CC +doubleyodpatah;FB1F +doubleyodpatahhebrew;FB1F +downtackbelowcmb;031E +downtackmod;02D5 +dparen;249F +dsuperior;F6EB +dtail;0256 +dtopbar;018C +duhiragana;3065 +dukatakana;30C5 +dz;01F3 +dzaltone;02A3 +dzcaron;01C6 +dzcurl;02A5 +dzeabkhasiancyrillic;04E1 +dzecyrillic;0455 +dzhecyrillic;045F +e;0065 +eacute;00E9 +earth;2641 +ebengali;098F +ebopomofo;311C +ebreve;0115 +ecandradeva;090D +ecandragujarati;0A8D +ecandravowelsigndeva;0945 +ecandravowelsigngujarati;0AC5 +ecaron;011B +ecedillabreve;1E1D +echarmenian;0565 +echyiwnarmenian;0587 +ecircle;24D4 +ecircumflex;00EA +ecircumflexacute;1EBF +ecircumflexbelow;1E19 +ecircumflexdotbelow;1EC7 +ecircumflexgrave;1EC1 +ecircumflexhookabove;1EC3 +ecircumflextilde;1EC5 +ecyrillic;0454 +edblgrave;0205 +edeva;090F +edieresis;00EB +edot;0117 +edotaccent;0117 +edotbelow;1EB9 +eegurmukhi;0A0F +eematragurmukhi;0A47 +efcyrillic;0444 +egrave;00E8 +egujarati;0A8F +eharmenian;0567 +ehbopomofo;311D +ehiragana;3048 +ehookabove;1EBB +eibopomofo;311F +eight;0038 +eightarabic;0668 +eightbengali;09EE +eightcircle;2467 +eightcircleinversesansserif;2791 +eightdeva;096E +eighteencircle;2471 +eighteenparen;2485 +eighteenperiod;2499 +eightgujarati;0AEE +eightgurmukhi;0A6E +eighthackarabic;0668 +eighthangzhou;3028 +eighthnotebeamed;266B +eightideographicparen;3227 +eightinferior;2088 +eightmonospace;FF18 +eightoldstyle;F738 +eightparen;247B +eightperiod;248F +eightpersian;06F8 +eightroman;2177 +eightsuperior;2078 +eightthai;0E58 +einvertedbreve;0207 +eiotifiedcyrillic;0465 +ekatakana;30A8 +ekatakanahalfwidth;FF74 +ekonkargurmukhi;0A74 +ekorean;3154 +elcyrillic;043B +element;2208 +elevencircle;246A +elevenparen;247E +elevenperiod;2492 +elevenroman;217A +ellipsis;2026 +ellipsisvertical;22EE +emacron;0113 +emacronacute;1E17 +emacrongrave;1E15 +emcyrillic;043C +emdash;2014 +emdashvertical;FE31 +emonospace;FF45 +emphasismarkarmenian;055B +emptyset;2205 +enbopomofo;3123 +encyrillic;043D +endash;2013 +endashvertical;FE32 +endescendercyrillic;04A3 +eng;014B +engbopomofo;3125 +enghecyrillic;04A5 +enhookcyrillic;04C8 +enspace;2002 +eogonek;0119 +eokorean;3153 +eopen;025B +eopenclosed;029A +eopenreversed;025C +eopenreversedclosed;025E +eopenreversedhook;025D +eparen;24A0 +epsilon;03B5 +epsilontonos;03AD +equal;003D +equalmonospace;FF1D +equalsmall;FE66 +equalsuperior;207C +equivalence;2261 +erbopomofo;3126 +ercyrillic;0440 +ereversed;0258 +ereversedcyrillic;044D +escyrillic;0441 +esdescendercyrillic;04AB +esh;0283 +eshcurl;0286 +eshortdeva;090E +eshortvowelsigndeva;0946 +eshreversedloop;01AA +eshsquatreversed;0285 +esmallhiragana;3047 +esmallkatakana;30A7 +esmallkatakanahalfwidth;FF6A +estimated;212E +esuperior;F6EC +eta;03B7 +etarmenian;0568 +etatonos;03AE +eth;00F0 +etilde;1EBD +etildebelow;1E1B +etnahtafoukhhebrew;0591 +etnahtafoukhlefthebrew;0591 +etnahtahebrew;0591 +etnahtalefthebrew;0591 +eturned;01DD +eukorean;3161 +euro;20AC +evowelsignbengali;09C7 +evowelsigndeva;0947 +evowelsigngujarati;0AC7 +exclam;0021 +exclamarmenian;055C +exclamdbl;203C +exclamdown;00A1 +exclamdownsmall;F7A1 +exclammonospace;FF01 +exclamsmall;F721 +existential;2203 +ezh;0292 +ezhcaron;01EF +ezhcurl;0293 +ezhreversed;01B9 +ezhtail;01BA +f;0066 +fadeva;095E +fagurmukhi;0A5E +fahrenheit;2109 +fathaarabic;064E +fathalowarabic;064E +fathatanarabic;064B +fbopomofo;3108 +fcircle;24D5 +fdotaccent;1E1F +feharabic;0641 +feharmenian;0586 +fehfinalarabic;FED2 +fehinitialarabic;FED3 +fehmedialarabic;FED4 +feicoptic;03E5 +female;2640 +ff;FB00 +ffi;FB03 +ffl;FB04 +fi;FB01 +fifteencircle;246E +fifteenparen;2482 +fifteenperiod;2496 +figuredash;2012 +filledbox;25A0 +filledrect;25AC +finalkaf;05DA +finalkafdagesh;FB3A +finalkafdageshhebrew;FB3A +finalkafhebrew;05DA +finalkafqamats;05DA 05B8 +finalkafqamatshebrew;05DA 05B8 +finalkafsheva;05DA 05B0 +finalkafshevahebrew;05DA 05B0 +finalmem;05DD +finalmemhebrew;05DD +finalnun;05DF +finalnunhebrew;05DF +finalpe;05E3 +finalpehebrew;05E3 +finaltsadi;05E5 +finaltsadihebrew;05E5 +firsttonechinese;02C9 +fisheye;25C9 +fitacyrillic;0473 +five;0035 +fivearabic;0665 +fivebengali;09EB +fivecircle;2464 +fivecircleinversesansserif;278E +fivedeva;096B +fiveeighths;215D +fivegujarati;0AEB +fivegurmukhi;0A6B +fivehackarabic;0665 +fivehangzhou;3025 +fiveideographicparen;3224 +fiveinferior;2085 +fivemonospace;FF15 +fiveoldstyle;F735 +fiveparen;2478 +fiveperiod;248C +fivepersian;06F5 +fiveroman;2174 +fivesuperior;2075 +fivethai;0E55 +fl;FB02 +florin;0192 +fmonospace;FF46 +fmsquare;3399 +fofanthai;0E1F +fofathai;0E1D +fongmanthai;0E4F +forall;2200 +four;0034 +fourarabic;0664 +fourbengali;09EA +fourcircle;2463 +fourcircleinversesansserif;278D +fourdeva;096A +fourgujarati;0AEA +fourgurmukhi;0A6A +fourhackarabic;0664 +fourhangzhou;3024 +fourideographicparen;3223 +fourinferior;2084 +fourmonospace;FF14 +fournumeratorbengali;09F7 +fouroldstyle;F734 +fourparen;2477 +fourperiod;248B +fourpersian;06F4 +fourroman;2173 +foursuperior;2074 +fourteencircle;246D +fourteenparen;2481 +fourteenperiod;2495 +fourthai;0E54 +fourthtonechinese;02CB +fparen;24A1 +fraction;2044 +franc;20A3 +g;0067 +gabengali;0997 +gacute;01F5 +gadeva;0917 +gafarabic;06AF +gaffinalarabic;FB93 +gafinitialarabic;FB94 +gafmedialarabic;FB95 +gagujarati;0A97 +gagurmukhi;0A17 +gahiragana;304C +gakatakana;30AC +gamma;03B3 +gammalatinsmall;0263 +gammasuperior;02E0 +gangiacoptic;03EB +gbopomofo;310D +gbreve;011F +gcaron;01E7 +gcedilla;0123 +gcircle;24D6 +gcircumflex;011D +gcommaaccent;0123 +gdot;0121 +gdotaccent;0121 +gecyrillic;0433 +gehiragana;3052 +gekatakana;30B2 +geometricallyequal;2251 +gereshaccenthebrew;059C +gereshhebrew;05F3 +gereshmuqdamhebrew;059D +germandbls;00DF +gershayimaccenthebrew;059E +gershayimhebrew;05F4 +getamark;3013 +ghabengali;0998 +ghadarmenian;0572 +ghadeva;0918 +ghagujarati;0A98 +ghagurmukhi;0A18 +ghainarabic;063A +ghainfinalarabic;FECE +ghaininitialarabic;FECF +ghainmedialarabic;FED0 +ghemiddlehookcyrillic;0495 +ghestrokecyrillic;0493 +gheupturncyrillic;0491 +ghhadeva;095A +ghhagurmukhi;0A5A +ghook;0260 +ghzsquare;3393 +gihiragana;304E +gikatakana;30AE +gimarmenian;0563 +gimel;05D2 +gimeldagesh;FB32 +gimeldageshhebrew;FB32 +gimelhebrew;05D2 +gjecyrillic;0453 +glottalinvertedstroke;01BE +glottalstop;0294 +glottalstopinverted;0296 +glottalstopmod;02C0 +glottalstopreversed;0295 +glottalstopreversedmod;02C1 +glottalstopreversedsuperior;02E4 +glottalstopstroke;02A1 +glottalstopstrokereversed;02A2 +gmacron;1E21 +gmonospace;FF47 +gohiragana;3054 +gokatakana;30B4 +gparen;24A2 +gpasquare;33AC +gradient;2207 +grave;0060 +gravebelowcmb;0316 +gravecmb;0300 +gravecomb;0300 +gravedeva;0953 +gravelowmod;02CE +gravemonospace;FF40 +gravetonecmb;0340 +greater;003E +greaterequal;2265 +greaterequalorless;22DB +greatermonospace;FF1E +greaterorequivalent;2273 +greaterorless;2277 +greateroverequal;2267 +greatersmall;FE65 +gscript;0261 +gstroke;01E5 +guhiragana;3050 +guillemotleft;00AB +guillemotright;00BB +guilsinglleft;2039 +guilsinglright;203A +gukatakana;30B0 +guramusquare;3318 +gysquare;33C9 +h;0068 +haabkhasiancyrillic;04A9 +haaltonearabic;06C1 +habengali;09B9 +hadescendercyrillic;04B3 +hadeva;0939 +hagujarati;0AB9 +hagurmukhi;0A39 +haharabic;062D +hahfinalarabic;FEA2 +hahinitialarabic;FEA3 +hahiragana;306F +hahmedialarabic;FEA4 +haitusquare;332A +hakatakana;30CF +hakatakanahalfwidth;FF8A +halantgurmukhi;0A4D +hamzaarabic;0621 +hamzadammaarabic;0621 064F +hamzadammatanarabic;0621 064C +hamzafathaarabic;0621 064E +hamzafathatanarabic;0621 064B +hamzalowarabic;0621 +hamzalowkasraarabic;0621 0650 +hamzalowkasratanarabic;0621 064D +hamzasukunarabic;0621 0652 +hangulfiller;3164 +hardsigncyrillic;044A +harpoonleftbarbup;21BC +harpoonrightbarbup;21C0 +hasquare;33CA +hatafpatah;05B2 +hatafpatah16;05B2 +hatafpatah23;05B2 +hatafpatah2f;05B2 +hatafpatahhebrew;05B2 +hatafpatahnarrowhebrew;05B2 +hatafpatahquarterhebrew;05B2 +hatafpatahwidehebrew;05B2 +hatafqamats;05B3 +hatafqamats1b;05B3 +hatafqamats28;05B3 +hatafqamats34;05B3 +hatafqamatshebrew;05B3 +hatafqamatsnarrowhebrew;05B3 +hatafqamatsquarterhebrew;05B3 +hatafqamatswidehebrew;05B3 +hatafsegol;05B1 +hatafsegol17;05B1 +hatafsegol24;05B1 +hatafsegol30;05B1 +hatafsegolhebrew;05B1 +hatafsegolnarrowhebrew;05B1 +hatafsegolquarterhebrew;05B1 +hatafsegolwidehebrew;05B1 +hbar;0127 +hbopomofo;310F +hbrevebelow;1E2B +hcedilla;1E29 +hcircle;24D7 +hcircumflex;0125 +hdieresis;1E27 +hdotaccent;1E23 +hdotbelow;1E25 +he;05D4 +heart;2665 +heartsuitblack;2665 +heartsuitwhite;2661 +hedagesh;FB34 +hedageshhebrew;FB34 +hehaltonearabic;06C1 +heharabic;0647 +hehebrew;05D4 +hehfinalaltonearabic;FBA7 +hehfinalalttwoarabic;FEEA +hehfinalarabic;FEEA +hehhamzaabovefinalarabic;FBA5 +hehhamzaaboveisolatedarabic;FBA4 +hehinitialaltonearabic;FBA8 +hehinitialarabic;FEEB +hehiragana;3078 +hehmedialaltonearabic;FBA9 +hehmedialarabic;FEEC +heiseierasquare;337B +hekatakana;30D8 +hekatakanahalfwidth;FF8D +hekutaarusquare;3336 +henghook;0267 +herutusquare;3339 +het;05D7 +hethebrew;05D7 +hhook;0266 +hhooksuperior;02B1 +hieuhacirclekorean;327B +hieuhaparenkorean;321B +hieuhcirclekorean;326D +hieuhkorean;314E +hieuhparenkorean;320D +hihiragana;3072 +hikatakana;30D2 +hikatakanahalfwidth;FF8B +hiriq;05B4 +hiriq14;05B4 +hiriq21;05B4 +hiriq2d;05B4 +hiriqhebrew;05B4 +hiriqnarrowhebrew;05B4 +hiriqquarterhebrew;05B4 +hiriqwidehebrew;05B4 +hlinebelow;1E96 +hmonospace;FF48 +hoarmenian;0570 +hohipthai;0E2B +hohiragana;307B +hokatakana;30DB +hokatakanahalfwidth;FF8E +holam;05B9 +holam19;05B9 +holam26;05B9 +holam32;05B9 +holamhebrew;05B9 +holamnarrowhebrew;05B9 +holamquarterhebrew;05B9 +holamwidehebrew;05B9 +honokhukthai;0E2E +hookabovecomb;0309 +hookcmb;0309 +hookpalatalizedbelowcmb;0321 +hookretroflexbelowcmb;0322 +hoonsquare;3342 +horicoptic;03E9 +horizontalbar;2015 +horncmb;031B +hotsprings;2668 +house;2302 +hparen;24A3 +hsuperior;02B0 +hturned;0265 +huhiragana;3075 +huiitosquare;3333 +hukatakana;30D5 +hukatakanahalfwidth;FF8C +hungarumlaut;02DD +hungarumlautcmb;030B +hv;0195 +hyphen;002D +hypheninferior;F6E5 +hyphenmonospace;FF0D +hyphensmall;FE63 +hyphensuperior;F6E6 +hyphentwo;2010 +i;0069 +iacute;00ED +iacyrillic;044F +ibengali;0987 +ibopomofo;3127 +ibreve;012D +icaron;01D0 +icircle;24D8 +icircumflex;00EE +icyrillic;0456 +idblgrave;0209 +ideographearthcircle;328F +ideographfirecircle;328B +ideographicallianceparen;323F +ideographiccallparen;323A +ideographiccentrecircle;32A5 +ideographicclose;3006 +ideographiccomma;3001 +ideographiccommaleft;FF64 +ideographiccongratulationparen;3237 +ideographiccorrectcircle;32A3 +ideographicearthparen;322F +ideographicenterpriseparen;323D +ideographicexcellentcircle;329D +ideographicfestivalparen;3240 +ideographicfinancialcircle;3296 +ideographicfinancialparen;3236 +ideographicfireparen;322B +ideographichaveparen;3232 +ideographichighcircle;32A4 +ideographiciterationmark;3005 +ideographiclaborcircle;3298 +ideographiclaborparen;3238 +ideographicleftcircle;32A7 +ideographiclowcircle;32A6 +ideographicmedicinecircle;32A9 +ideographicmetalparen;322E +ideographicmoonparen;322A +ideographicnameparen;3234 +ideographicperiod;3002 +ideographicprintcircle;329E +ideographicreachparen;3243 +ideographicrepresentparen;3239 +ideographicresourceparen;323E +ideographicrightcircle;32A8 +ideographicsecretcircle;3299 +ideographicselfparen;3242 +ideographicsocietyparen;3233 +ideographicspace;3000 +ideographicspecialparen;3235 +ideographicstockparen;3231 +ideographicstudyparen;323B +ideographicsunparen;3230 +ideographicsuperviseparen;323C +ideographicwaterparen;322C +ideographicwoodparen;322D +ideographiczero;3007 +ideographmetalcircle;328E +ideographmooncircle;328A +ideographnamecircle;3294 +ideographsuncircle;3290 +ideographwatercircle;328C +ideographwoodcircle;328D +ideva;0907 +idieresis;00EF +idieresisacute;1E2F +idieresiscyrillic;04E5 +idotbelow;1ECB +iebrevecyrillic;04D7 +iecyrillic;0435 +ieungacirclekorean;3275 +ieungaparenkorean;3215 +ieungcirclekorean;3267 +ieungkorean;3147 +ieungparenkorean;3207 +igrave;00EC +igujarati;0A87 +igurmukhi;0A07 +ihiragana;3044 +ihookabove;1EC9 +iibengali;0988 +iicyrillic;0438 +iideva;0908 +iigujarati;0A88 +iigurmukhi;0A08 +iimatragurmukhi;0A40 +iinvertedbreve;020B +iishortcyrillic;0439 +iivowelsignbengali;09C0 +iivowelsigndeva;0940 +iivowelsigngujarati;0AC0 +ij;0133 +ikatakana;30A4 +ikatakanahalfwidth;FF72 +ikorean;3163 +ilde;02DC +iluyhebrew;05AC +imacron;012B +imacroncyrillic;04E3 +imageorapproximatelyequal;2253 +imatragurmukhi;0A3F +imonospace;FF49 +increment;2206 +infinity;221E +iniarmenian;056B +integral;222B +integralbottom;2321 +integralbt;2321 +integralex;F8F5 +integraltop;2320 +integraltp;2320 +intersection;2229 +intisquare;3305 +invbullet;25D8 +invcircle;25D9 +invsmileface;263B +iocyrillic;0451 +iogonek;012F +iota;03B9 +iotadieresis;03CA +iotadieresistonos;0390 +iotalatin;0269 +iotatonos;03AF +iparen;24A4 +irigurmukhi;0A72 +ismallhiragana;3043 +ismallkatakana;30A3 +ismallkatakanahalfwidth;FF68 +issharbengali;09FA +istroke;0268 +isuperior;F6ED +iterationhiragana;309D +iterationkatakana;30FD +itilde;0129 +itildebelow;1E2D +iubopomofo;3129 +iucyrillic;044E +ivowelsignbengali;09BF +ivowelsigndeva;093F +ivowelsigngujarati;0ABF +izhitsacyrillic;0475 +izhitsadblgravecyrillic;0477 +j;006A +jaarmenian;0571 +jabengali;099C +jadeva;091C +jagujarati;0A9C +jagurmukhi;0A1C +jbopomofo;3110 +jcaron;01F0 +jcircle;24D9 +jcircumflex;0135 +jcrossedtail;029D +jdotlessstroke;025F +jecyrillic;0458 +jeemarabic;062C +jeemfinalarabic;FE9E +jeeminitialarabic;FE9F +jeemmedialarabic;FEA0 +jeharabic;0698 +jehfinalarabic;FB8B +jhabengali;099D +jhadeva;091D +jhagujarati;0A9D +jhagurmukhi;0A1D +jheharmenian;057B +jis;3004 +jmonospace;FF4A +jparen;24A5 +jsuperior;02B2 +k;006B +kabashkircyrillic;04A1 +kabengali;0995 +kacute;1E31 +kacyrillic;043A +kadescendercyrillic;049B +kadeva;0915 +kaf;05DB +kafarabic;0643 +kafdagesh;FB3B +kafdageshhebrew;FB3B +kaffinalarabic;FEDA +kafhebrew;05DB +kafinitialarabic;FEDB +kafmedialarabic;FEDC +kafrafehebrew;FB4D +kagujarati;0A95 +kagurmukhi;0A15 +kahiragana;304B +kahookcyrillic;04C4 +kakatakana;30AB +kakatakanahalfwidth;FF76 +kappa;03BA +kappasymbolgreek;03F0 +kapyeounmieumkorean;3171 +kapyeounphieuphkorean;3184 +kapyeounpieupkorean;3178 +kapyeounssangpieupkorean;3179 +karoriisquare;330D +kashidaautoarabic;0640 +kashidaautonosidebearingarabic;0640 +kasmallkatakana;30F5 +kasquare;3384 +kasraarabic;0650 +kasratanarabic;064D +kastrokecyrillic;049F +katahiraprolongmarkhalfwidth;FF70 +kaverticalstrokecyrillic;049D +kbopomofo;310E +kcalsquare;3389 +kcaron;01E9 +kcedilla;0137 +kcircle;24DA +kcommaaccent;0137 +kdotbelow;1E33 +keharmenian;0584 +kehiragana;3051 +kekatakana;30B1 +kekatakanahalfwidth;FF79 +kenarmenian;056F +kesmallkatakana;30F6 +kgreenlandic;0138 +khabengali;0996 +khacyrillic;0445 +khadeva;0916 +khagujarati;0A96 +khagurmukhi;0A16 +khaharabic;062E +khahfinalarabic;FEA6 +khahinitialarabic;FEA7 +khahmedialarabic;FEA8 +kheicoptic;03E7 +khhadeva;0959 +khhagurmukhi;0A59 +khieukhacirclekorean;3278 +khieukhaparenkorean;3218 +khieukhcirclekorean;326A +khieukhkorean;314B +khieukhparenkorean;320A +khokhaithai;0E02 +khokhonthai;0E05 +khokhuatthai;0E03 +khokhwaithai;0E04 +khomutthai;0E5B +khook;0199 +khorakhangthai;0E06 +khzsquare;3391 +kihiragana;304D +kikatakana;30AD +kikatakanahalfwidth;FF77 +kiroguramusquare;3315 +kiromeetorusquare;3316 +kirosquare;3314 +kiyeokacirclekorean;326E +kiyeokaparenkorean;320E +kiyeokcirclekorean;3260 +kiyeokkorean;3131 +kiyeokparenkorean;3200 +kiyeoksioskorean;3133 +kjecyrillic;045C +klinebelow;1E35 +klsquare;3398 +kmcubedsquare;33A6 +kmonospace;FF4B +kmsquaredsquare;33A2 +kohiragana;3053 +kohmsquare;33C0 +kokaithai;0E01 +kokatakana;30B3 +kokatakanahalfwidth;FF7A +kooposquare;331E +koppacyrillic;0481 +koreanstandardsymbol;327F +koroniscmb;0343 +kparen;24A6 +kpasquare;33AA +ksicyrillic;046F +ktsquare;33CF +kturned;029E +kuhiragana;304F +kukatakana;30AF +kukatakanahalfwidth;FF78 +kvsquare;33B8 +kwsquare;33BE +l;006C +labengali;09B2 +lacute;013A +ladeva;0932 +lagujarati;0AB2 +lagurmukhi;0A32 +lakkhangyaothai;0E45 +lamaleffinalarabic;FEFC +lamalefhamzaabovefinalarabic;FEF8 +lamalefhamzaaboveisolatedarabic;FEF7 +lamalefhamzabelowfinalarabic;FEFA +lamalefhamzabelowisolatedarabic;FEF9 +lamalefisolatedarabic;FEFB +lamalefmaddaabovefinalarabic;FEF6 +lamalefmaddaaboveisolatedarabic;FEF5 +lamarabic;0644 +lambda;03BB +lambdastroke;019B +lamed;05DC +lameddagesh;FB3C +lameddageshhebrew;FB3C +lamedhebrew;05DC +lamedholam;05DC 05B9 +lamedholamdagesh;05DC 05B9 05BC +lamedholamdageshhebrew;05DC 05B9 05BC +lamedholamhebrew;05DC 05B9 +lamfinalarabic;FEDE +lamhahinitialarabic;FCCA +laminitialarabic;FEDF +lamjeeminitialarabic;FCC9 +lamkhahinitialarabic;FCCB +lamlamhehisolatedarabic;FDF2 +lammedialarabic;FEE0 +lammeemhahinitialarabic;FD88 +lammeeminitialarabic;FCCC +lammeemjeeminitialarabic;FEDF FEE4 FEA0 +lammeemkhahinitialarabic;FEDF FEE4 FEA8 +largecircle;25EF +lbar;019A +lbelt;026C +lbopomofo;310C +lcaron;013E +lcedilla;013C +lcircle;24DB +lcircumflexbelow;1E3D +lcommaaccent;013C +ldot;0140 +ldotaccent;0140 +ldotbelow;1E37 +ldotbelowmacron;1E39 +leftangleabovecmb;031A +lefttackbelowcmb;0318 +less;003C +lessequal;2264 +lessequalorgreater;22DA +lessmonospace;FF1C +lessorequivalent;2272 +lessorgreater;2276 +lessoverequal;2266 +lesssmall;FE64 +lezh;026E +lfblock;258C +lhookretroflex;026D +lira;20A4 +liwnarmenian;056C +lj;01C9 +ljecyrillic;0459 +ll;F6C0 +lladeva;0933 +llagujarati;0AB3 +llinebelow;1E3B +llladeva;0934 +llvocalicbengali;09E1 +llvocalicdeva;0961 +llvocalicvowelsignbengali;09E3 +llvocalicvowelsigndeva;0963 +lmiddletilde;026B +lmonospace;FF4C +lmsquare;33D0 +lochulathai;0E2C +logicaland;2227 +logicalnot;00AC +logicalnotreversed;2310 +logicalor;2228 +lolingthai;0E25 +longs;017F +lowlinecenterline;FE4E +lowlinecmb;0332 +lowlinedashed;FE4D +lozenge;25CA +lparen;24A7 +lslash;0142 +lsquare;2113 +lsuperior;F6EE +ltshade;2591 +luthai;0E26 +lvocalicbengali;098C +lvocalicdeva;090C +lvocalicvowelsignbengali;09E2 +lvocalicvowelsigndeva;0962 +lxsquare;33D3 +m;006D +mabengali;09AE +macron;00AF +macronbelowcmb;0331 +macroncmb;0304 +macronlowmod;02CD +macronmonospace;FFE3 +macute;1E3F +madeva;092E +magujarati;0AAE +magurmukhi;0A2E +mahapakhhebrew;05A4 +mahapakhlefthebrew;05A4 +mahiragana;307E +maichattawalowleftthai;F895 +maichattawalowrightthai;F894 +maichattawathai;0E4B +maichattawaupperleftthai;F893 +maieklowleftthai;F88C +maieklowrightthai;F88B +maiekthai;0E48 +maiekupperleftthai;F88A +maihanakatleftthai;F884 +maihanakatthai;0E31 +maitaikhuleftthai;F889 +maitaikhuthai;0E47 +maitholowleftthai;F88F +maitholowrightthai;F88E +maithothai;0E49 +maithoupperleftthai;F88D +maitrilowleftthai;F892 +maitrilowrightthai;F891 +maitrithai;0E4A +maitriupperleftthai;F890 +maiyamokthai;0E46 +makatakana;30DE +makatakanahalfwidth;FF8F +male;2642 +mansyonsquare;3347 +maqafhebrew;05BE +mars;2642 +masoracirclehebrew;05AF +masquare;3383 +mbopomofo;3107 +mbsquare;33D4 +mcircle;24DC +mcubedsquare;33A5 +mdotaccent;1E41 +mdotbelow;1E43 +meemarabic;0645 +meemfinalarabic;FEE2 +meeminitialarabic;FEE3 +meemmedialarabic;FEE4 +meemmeeminitialarabic;FCD1 +meemmeemisolatedarabic;FC48 +meetorusquare;334D +mehiragana;3081 +meizierasquare;337E +mekatakana;30E1 +mekatakanahalfwidth;FF92 +mem;05DE +memdagesh;FB3E +memdageshhebrew;FB3E +memhebrew;05DE +menarmenian;0574 +merkhahebrew;05A5 +merkhakefulahebrew;05A6 +merkhakefulalefthebrew;05A6 +merkhalefthebrew;05A5 +mhook;0271 +mhzsquare;3392 +middledotkatakanahalfwidth;FF65 +middot;00B7 +mieumacirclekorean;3272 +mieumaparenkorean;3212 +mieumcirclekorean;3264 +mieumkorean;3141 +mieumpansioskorean;3170 +mieumparenkorean;3204 +mieumpieupkorean;316E +mieumsioskorean;316F +mihiragana;307F +mikatakana;30DF +mikatakanahalfwidth;FF90 +minus;2212 +minusbelowcmb;0320 +minuscircle;2296 +minusmod;02D7 +minusplus;2213 +minute;2032 +miribaarusquare;334A +mirisquare;3349 +mlonglegturned;0270 +mlsquare;3396 +mmcubedsquare;33A3 +mmonospace;FF4D +mmsquaredsquare;339F +mohiragana;3082 +mohmsquare;33C1 +mokatakana;30E2 +mokatakanahalfwidth;FF93 +molsquare;33D6 +momathai;0E21 +moverssquare;33A7 +moverssquaredsquare;33A8 +mparen;24A8 +mpasquare;33AB +mssquare;33B3 +msuperior;F6EF +mturned;026F +mu;00B5 +mu1;00B5 +muasquare;3382 +muchgreater;226B +muchless;226A +mufsquare;338C +mugreek;03BC +mugsquare;338D +muhiragana;3080 +mukatakana;30E0 +mukatakanahalfwidth;FF91 +mulsquare;3395 +multiply;00D7 +mumsquare;339B +munahhebrew;05A3 +munahlefthebrew;05A3 +musicalnote;266A +musicalnotedbl;266B +musicflatsign;266D +musicsharpsign;266F +mussquare;33B2 +muvsquare;33B6 +muwsquare;33BC +mvmegasquare;33B9 +mvsquare;33B7 +mwmegasquare;33BF +mwsquare;33BD +n;006E +nabengali;09A8 +nabla;2207 +nacute;0144 +nadeva;0928 +nagujarati;0AA8 +nagurmukhi;0A28 +nahiragana;306A +nakatakana;30CA +nakatakanahalfwidth;FF85 +napostrophe;0149 +nasquare;3381 +nbopomofo;310B +nbspace;00A0 +ncaron;0148 +ncedilla;0146 +ncircle;24DD +ncircumflexbelow;1E4B +ncommaaccent;0146 +ndotaccent;1E45 +ndotbelow;1E47 +nehiragana;306D +nekatakana;30CD +nekatakanahalfwidth;FF88 +newsheqelsign;20AA +nfsquare;338B +ngabengali;0999 +ngadeva;0919 +ngagujarati;0A99 +ngagurmukhi;0A19 +ngonguthai;0E07 +nhiragana;3093 +nhookleft;0272 +nhookretroflex;0273 +nieunacirclekorean;326F +nieunaparenkorean;320F +nieuncieuckorean;3135 +nieuncirclekorean;3261 +nieunhieuhkorean;3136 +nieunkorean;3134 +nieunpansioskorean;3168 +nieunparenkorean;3201 +nieunsioskorean;3167 +nieuntikeutkorean;3166 +nihiragana;306B +nikatakana;30CB +nikatakanahalfwidth;FF86 +nikhahitleftthai;F899 +nikhahitthai;0E4D +nine;0039 +ninearabic;0669 +ninebengali;09EF +ninecircle;2468 +ninecircleinversesansserif;2792 +ninedeva;096F +ninegujarati;0AEF +ninegurmukhi;0A6F +ninehackarabic;0669 +ninehangzhou;3029 +nineideographicparen;3228 +nineinferior;2089 +ninemonospace;FF19 +nineoldstyle;F739 +nineparen;247C +nineperiod;2490 +ninepersian;06F9 +nineroman;2178 +ninesuperior;2079 +nineteencircle;2472 +nineteenparen;2486 +nineteenperiod;249A +ninethai;0E59 +nj;01CC +njecyrillic;045A +nkatakana;30F3 +nkatakanahalfwidth;FF9D +nlegrightlong;019E +nlinebelow;1E49 +nmonospace;FF4E +nmsquare;339A +nnabengali;09A3 +nnadeva;0923 +nnagujarati;0AA3 +nnagurmukhi;0A23 +nnnadeva;0929 +nohiragana;306E +nokatakana;30CE +nokatakanahalfwidth;FF89 +nonbreakingspace;00A0 +nonenthai;0E13 +nonuthai;0E19 +noonarabic;0646 +noonfinalarabic;FEE6 +noonghunnaarabic;06BA +noonghunnafinalarabic;FB9F +noonhehinitialarabic;FEE7 FEEC +nooninitialarabic;FEE7 +noonjeeminitialarabic;FCD2 +noonjeemisolatedarabic;FC4B +noonmedialarabic;FEE8 +noonmeeminitialarabic;FCD5 +noonmeemisolatedarabic;FC4E +noonnoonfinalarabic;FC8D +notcontains;220C +notelement;2209 +notelementof;2209 +notequal;2260 +notgreater;226F +notgreaternorequal;2271 +notgreaternorless;2279 +notidentical;2262 +notless;226E +notlessnorequal;2270 +notparallel;2226 +notprecedes;2280 +notsubset;2284 +notsucceeds;2281 +notsuperset;2285 +nowarmenian;0576 +nparen;24A9 +nssquare;33B1 +nsuperior;207F +ntilde;00F1 +nu;03BD +nuhiragana;306C +nukatakana;30CC +nukatakanahalfwidth;FF87 +nuktabengali;09BC +nuktadeva;093C +nuktagujarati;0ABC +nuktagurmukhi;0A3C +numbersign;0023 +numbersignmonospace;FF03 +numbersignsmall;FE5F +numeralsigngreek;0374 +numeralsignlowergreek;0375 +numero;2116 +nun;05E0 +nundagesh;FB40 +nundageshhebrew;FB40 +nunhebrew;05E0 +nvsquare;33B5 +nwsquare;33BB +nyabengali;099E +nyadeva;091E +nyagujarati;0A9E +nyagurmukhi;0A1E +o;006F +oacute;00F3 +oangthai;0E2D +obarred;0275 +obarredcyrillic;04E9 +obarreddieresiscyrillic;04EB +obengali;0993 +obopomofo;311B +obreve;014F +ocandradeva;0911 +ocandragujarati;0A91 +ocandravowelsigndeva;0949 +ocandravowelsigngujarati;0AC9 +ocaron;01D2 +ocircle;24DE +ocircumflex;00F4 +ocircumflexacute;1ED1 +ocircumflexdotbelow;1ED9 +ocircumflexgrave;1ED3 +ocircumflexhookabove;1ED5 +ocircumflextilde;1ED7 +ocyrillic;043E +odblacute;0151 +odblgrave;020D +odeva;0913 +odieresis;00F6 +odieresiscyrillic;04E7 +odotbelow;1ECD +oe;0153 +oekorean;315A +ogonek;02DB +ogonekcmb;0328 +ograve;00F2 +ogujarati;0A93 +oharmenian;0585 +ohiragana;304A +ohookabove;1ECF +ohorn;01A1 +ohornacute;1EDB +ohorndotbelow;1EE3 +ohorngrave;1EDD +ohornhookabove;1EDF +ohorntilde;1EE1 +ohungarumlaut;0151 +oi;01A3 +oinvertedbreve;020F +okatakana;30AA +okatakanahalfwidth;FF75 +okorean;3157 +olehebrew;05AB +omacron;014D +omacronacute;1E53 +omacrongrave;1E51 +omdeva;0950 +omega;03C9 +omega1;03D6 +omegacyrillic;0461 +omegalatinclosed;0277 +omegaroundcyrillic;047B +omegatitlocyrillic;047D +omegatonos;03CE +omgujarati;0AD0 +omicron;03BF +omicrontonos;03CC +omonospace;FF4F +one;0031 +onearabic;0661 +onebengali;09E7 +onecircle;2460 +onecircleinversesansserif;278A +onedeva;0967 +onedotenleader;2024 +oneeighth;215B +onefitted;F6DC +onegujarati;0AE7 +onegurmukhi;0A67 +onehackarabic;0661 +onehalf;00BD +onehangzhou;3021 +oneideographicparen;3220 +oneinferior;2081 +onemonospace;FF11 +onenumeratorbengali;09F4 +oneoldstyle;F731 +oneparen;2474 +oneperiod;2488 +onepersian;06F1 +onequarter;00BC +oneroman;2170 +onesuperior;00B9 +onethai;0E51 +onethird;2153 +oogonek;01EB +oogonekmacron;01ED +oogurmukhi;0A13 +oomatragurmukhi;0A4B +oopen;0254 +oparen;24AA +openbullet;25E6 +option;2325 +ordfeminine;00AA +ordmasculine;00BA +orthogonal;221F +oshortdeva;0912 +oshortvowelsigndeva;094A +oslash;00F8 +oslashacute;01FF +osmallhiragana;3049 +osmallkatakana;30A9 +osmallkatakanahalfwidth;FF6B +ostrokeacute;01FF +osuperior;F6F0 +otcyrillic;047F +otilde;00F5 +otildeacute;1E4D +otildedieresis;1E4F +oubopomofo;3121 +overline;203E +overlinecenterline;FE4A +overlinecmb;0305 +overlinedashed;FE49 +overlinedblwavy;FE4C +overlinewavy;FE4B +overscore;00AF +ovowelsignbengali;09CB +ovowelsigndeva;094B +ovowelsigngujarati;0ACB +p;0070 +paampssquare;3380 +paasentosquare;332B +pabengali;09AA +pacute;1E55 +padeva;092A +pagedown;21DF +pageup;21DE +pagujarati;0AAA +pagurmukhi;0A2A +pahiragana;3071 +paiyannoithai;0E2F +pakatakana;30D1 +palatalizationcyrilliccmb;0484 +palochkacyrillic;04C0 +pansioskorean;317F +paragraph;00B6 +parallel;2225 +parenleft;0028 +parenleftaltonearabic;FD3E +parenleftbt;F8ED +parenleftex;F8EC +parenleftinferior;208D +parenleftmonospace;FF08 +parenleftsmall;FE59 +parenleftsuperior;207D +parenlefttp;F8EB +parenleftvertical;FE35 +parenright;0029 +parenrightaltonearabic;FD3F +parenrightbt;F8F8 +parenrightex;F8F7 +parenrightinferior;208E +parenrightmonospace;FF09 +parenrightsmall;FE5A +parenrightsuperior;207E +parenrighttp;F8F6 +parenrightvertical;FE36 +partialdiff;2202 +paseqhebrew;05C0 +pashtahebrew;0599 +pasquare;33A9 +patah;05B7 +patah11;05B7 +patah1d;05B7 +patah2a;05B7 +patahhebrew;05B7 +patahnarrowhebrew;05B7 +patahquarterhebrew;05B7 +patahwidehebrew;05B7 +pazerhebrew;05A1 +pbopomofo;3106 +pcircle;24DF +pdotaccent;1E57 +pe;05E4 +pecyrillic;043F +pedagesh;FB44 +pedageshhebrew;FB44 +peezisquare;333B +pefinaldageshhebrew;FB43 +peharabic;067E +peharmenian;057A +pehebrew;05E4 +pehfinalarabic;FB57 +pehinitialarabic;FB58 +pehiragana;307A +pehmedialarabic;FB59 +pekatakana;30DA +pemiddlehookcyrillic;04A7 +perafehebrew;FB4E +percent;0025 +percentarabic;066A +percentmonospace;FF05 +percentsmall;FE6A +period;002E +periodarmenian;0589 +periodcentered;00B7 +periodhalfwidth;FF61 +periodinferior;F6E7 +periodmonospace;FF0E +periodsmall;FE52 +periodsuperior;F6E8 +perispomenigreekcmb;0342 +perpendicular;22A5 +perthousand;2030 +peseta;20A7 +pfsquare;338A +phabengali;09AB +phadeva;092B +phagujarati;0AAB +phagurmukhi;0A2B +phi;03C6 +phi1;03D5 +phieuphacirclekorean;327A +phieuphaparenkorean;321A +phieuphcirclekorean;326C +phieuphkorean;314D +phieuphparenkorean;320C +philatin;0278 +phinthuthai;0E3A +phisymbolgreek;03D5 +phook;01A5 +phophanthai;0E1E +phophungthai;0E1C +phosamphaothai;0E20 +pi;03C0 +pieupacirclekorean;3273 +pieupaparenkorean;3213 +pieupcieuckorean;3176 +pieupcirclekorean;3265 +pieupkiyeokkorean;3172 +pieupkorean;3142 +pieupparenkorean;3205 +pieupsioskiyeokkorean;3174 +pieupsioskorean;3144 +pieupsiostikeutkorean;3175 +pieupthieuthkorean;3177 +pieuptikeutkorean;3173 +pihiragana;3074 +pikatakana;30D4 +pisymbolgreek;03D6 +piwrarmenian;0583 +plus;002B +plusbelowcmb;031F +pluscircle;2295 +plusminus;00B1 +plusmod;02D6 +plusmonospace;FF0B +plussmall;FE62 +plussuperior;207A +pmonospace;FF50 +pmsquare;33D8 +pohiragana;307D +pointingindexdownwhite;261F +pointingindexleftwhite;261C +pointingindexrightwhite;261E +pointingindexupwhite;261D +pokatakana;30DD +poplathai;0E1B +postalmark;3012 +postalmarkface;3020 +pparen;24AB +precedes;227A +prescription;211E +primemod;02B9 +primereversed;2035 +product;220F +projective;2305 +prolongedkana;30FC +propellor;2318 +propersubset;2282 +propersuperset;2283 +proportion;2237 +proportional;221D +psi;03C8 +psicyrillic;0471 +psilipneumatacyrilliccmb;0486 +pssquare;33B0 +puhiragana;3077 +pukatakana;30D7 +pvsquare;33B4 +pwsquare;33BA +q;0071 +qadeva;0958 +qadmahebrew;05A8 +qafarabic;0642 +qaffinalarabic;FED6 +qafinitialarabic;FED7 +qafmedialarabic;FED8 +qamats;05B8 +qamats10;05B8 +qamats1a;05B8 +qamats1c;05B8 +qamats27;05B8 +qamats29;05B8 +qamats33;05B8 +qamatsde;05B8 +qamatshebrew;05B8 +qamatsnarrowhebrew;05B8 +qamatsqatanhebrew;05B8 +qamatsqatannarrowhebrew;05B8 +qamatsqatanquarterhebrew;05B8 +qamatsqatanwidehebrew;05B8 +qamatsquarterhebrew;05B8 +qamatswidehebrew;05B8 +qarneyparahebrew;059F +qbopomofo;3111 +qcircle;24E0 +qhook;02A0 +qmonospace;FF51 +qof;05E7 +qofdagesh;FB47 +qofdageshhebrew;FB47 +qofhatafpatah;05E7 05B2 +qofhatafpatahhebrew;05E7 05B2 +qofhatafsegol;05E7 05B1 +qofhatafsegolhebrew;05E7 05B1 +qofhebrew;05E7 +qofhiriq;05E7 05B4 +qofhiriqhebrew;05E7 05B4 +qofholam;05E7 05B9 +qofholamhebrew;05E7 05B9 +qofpatah;05E7 05B7 +qofpatahhebrew;05E7 05B7 +qofqamats;05E7 05B8 +qofqamatshebrew;05E7 05B8 +qofqubuts;05E7 05BB +qofqubutshebrew;05E7 05BB +qofsegol;05E7 05B6 +qofsegolhebrew;05E7 05B6 +qofsheva;05E7 05B0 +qofshevahebrew;05E7 05B0 +qoftsere;05E7 05B5 +qoftserehebrew;05E7 05B5 +qparen;24AC +quarternote;2669 +qubuts;05BB +qubuts18;05BB +qubuts25;05BB +qubuts31;05BB +qubutshebrew;05BB +qubutsnarrowhebrew;05BB +qubutsquarterhebrew;05BB +qubutswidehebrew;05BB +question;003F +questionarabic;061F +questionarmenian;055E +questiondown;00BF +questiondownsmall;F7BF +questiongreek;037E +questionmonospace;FF1F +questionsmall;F73F +quotedbl;0022 +quotedblbase;201E +quotedblleft;201C +quotedblmonospace;FF02 +quotedblprime;301E +quotedblprimereversed;301D +quotedblright;201D +quoteleft;2018 +quoteleftreversed;201B +quotereversed;201B +quoteright;2019 +quoterightn;0149 +quotesinglbase;201A +quotesingle;0027 +quotesinglemonospace;FF07 +r;0072 +raarmenian;057C +rabengali;09B0 +racute;0155 +radeva;0930 +radical;221A +radicalex;F8E5 +radoverssquare;33AE +radoverssquaredsquare;33AF +radsquare;33AD +rafe;05BF +rafehebrew;05BF +ragujarati;0AB0 +ragurmukhi;0A30 +rahiragana;3089 +rakatakana;30E9 +rakatakanahalfwidth;FF97 +ralowerdiagonalbengali;09F1 +ramiddlediagonalbengali;09F0 +ramshorn;0264 +ratio;2236 +rbopomofo;3116 +rcaron;0159 +rcedilla;0157 +rcircle;24E1 +rcommaaccent;0157 +rdblgrave;0211 +rdotaccent;1E59 +rdotbelow;1E5B +rdotbelowmacron;1E5D +referencemark;203B +reflexsubset;2286 +reflexsuperset;2287 +registered;00AE +registersans;F8E8 +registerserif;F6DA +reharabic;0631 +reharmenian;0580 +rehfinalarabic;FEAE +rehiragana;308C +rehyehaleflamarabic;0631 FEF3 FE8E 0644 +rekatakana;30EC +rekatakanahalfwidth;FF9A +resh;05E8 +reshdageshhebrew;FB48 +reshhatafpatah;05E8 05B2 +reshhatafpatahhebrew;05E8 05B2 +reshhatafsegol;05E8 05B1 +reshhatafsegolhebrew;05E8 05B1 +reshhebrew;05E8 +reshhiriq;05E8 05B4 +reshhiriqhebrew;05E8 05B4 +reshholam;05E8 05B9 +reshholamhebrew;05E8 05B9 +reshpatah;05E8 05B7 +reshpatahhebrew;05E8 05B7 +reshqamats;05E8 05B8 +reshqamatshebrew;05E8 05B8 +reshqubuts;05E8 05BB +reshqubutshebrew;05E8 05BB +reshsegol;05E8 05B6 +reshsegolhebrew;05E8 05B6 +reshsheva;05E8 05B0 +reshshevahebrew;05E8 05B0 +reshtsere;05E8 05B5 +reshtserehebrew;05E8 05B5 +reversedtilde;223D +reviahebrew;0597 +reviamugrashhebrew;0597 +revlogicalnot;2310 +rfishhook;027E +rfishhookreversed;027F +rhabengali;09DD +rhadeva;095D +rho;03C1 +rhook;027D +rhookturned;027B +rhookturnedsuperior;02B5 +rhosymbolgreek;03F1 +rhotichookmod;02DE +rieulacirclekorean;3271 +rieulaparenkorean;3211 +rieulcirclekorean;3263 +rieulhieuhkorean;3140 +rieulkiyeokkorean;313A +rieulkiyeoksioskorean;3169 +rieulkorean;3139 +rieulmieumkorean;313B +rieulpansioskorean;316C +rieulparenkorean;3203 +rieulphieuphkorean;313F +rieulpieupkorean;313C +rieulpieupsioskorean;316B +rieulsioskorean;313D +rieulthieuthkorean;313E +rieultikeutkorean;316A +rieulyeorinhieuhkorean;316D +rightangle;221F +righttackbelowcmb;0319 +righttriangle;22BF +rihiragana;308A +rikatakana;30EA +rikatakanahalfwidth;FF98 +ring;02DA +ringbelowcmb;0325 +ringcmb;030A +ringhalfleft;02BF +ringhalfleftarmenian;0559 +ringhalfleftbelowcmb;031C +ringhalfleftcentered;02D3 +ringhalfright;02BE +ringhalfrightbelowcmb;0339 +ringhalfrightcentered;02D2 +rinvertedbreve;0213 +rittorusquare;3351 +rlinebelow;1E5F +rlongleg;027C +rlonglegturned;027A +rmonospace;FF52 +rohiragana;308D +rokatakana;30ED +rokatakanahalfwidth;FF9B +roruathai;0E23 +rparen;24AD +rrabengali;09DC +rradeva;0931 +rragurmukhi;0A5C +rreharabic;0691 +rrehfinalarabic;FB8D +rrvocalicbengali;09E0 +rrvocalicdeva;0960 +rrvocalicgujarati;0AE0 +rrvocalicvowelsignbengali;09C4 +rrvocalicvowelsigndeva;0944 +rrvocalicvowelsigngujarati;0AC4 +rsuperior;F6F1 +rtblock;2590 +rturned;0279 +rturnedsuperior;02B4 +ruhiragana;308B +rukatakana;30EB +rukatakanahalfwidth;FF99 +rupeemarkbengali;09F2 +rupeesignbengali;09F3 +rupiah;F6DD +ruthai;0E24 +rvocalicbengali;098B +rvocalicdeva;090B +rvocalicgujarati;0A8B +rvocalicvowelsignbengali;09C3 +rvocalicvowelsigndeva;0943 +rvocalicvowelsigngujarati;0AC3 +s;0073 +sabengali;09B8 +sacute;015B +sacutedotaccent;1E65 +sadarabic;0635 +sadeva;0938 +sadfinalarabic;FEBA +sadinitialarabic;FEBB +sadmedialarabic;FEBC +sagujarati;0AB8 +sagurmukhi;0A38 +sahiragana;3055 +sakatakana;30B5 +sakatakanahalfwidth;FF7B +sallallahoualayhewasallamarabic;FDFA +samekh;05E1 +samekhdagesh;FB41 +samekhdageshhebrew;FB41 +samekhhebrew;05E1 +saraaathai;0E32 +saraaethai;0E41 +saraaimaimalaithai;0E44 +saraaimaimuanthai;0E43 +saraamthai;0E33 +saraathai;0E30 +saraethai;0E40 +saraiileftthai;F886 +saraiithai;0E35 +saraileftthai;F885 +saraithai;0E34 +saraothai;0E42 +saraueeleftthai;F888 +saraueethai;0E37 +saraueleftthai;F887 +sarauethai;0E36 +sarauthai;0E38 +sarauuthai;0E39 +sbopomofo;3119 +scaron;0161 +scarondotaccent;1E67 +scedilla;015F +schwa;0259 +schwacyrillic;04D9 +schwadieresiscyrillic;04DB +schwahook;025A +scircle;24E2 +scircumflex;015D +scommaaccent;0219 +sdotaccent;1E61 +sdotbelow;1E63 +sdotbelowdotaccent;1E69 +seagullbelowcmb;033C +second;2033 +secondtonechinese;02CA +section;00A7 +seenarabic;0633 +seenfinalarabic;FEB2 +seeninitialarabic;FEB3 +seenmedialarabic;FEB4 +segol;05B6 +segol13;05B6 +segol1f;05B6 +segol2c;05B6 +segolhebrew;05B6 +segolnarrowhebrew;05B6 +segolquarterhebrew;05B6 +segoltahebrew;0592 +segolwidehebrew;05B6 +seharmenian;057D +sehiragana;305B +sekatakana;30BB +sekatakanahalfwidth;FF7E +semicolon;003B +semicolonarabic;061B +semicolonmonospace;FF1B +semicolonsmall;FE54 +semivoicedmarkkana;309C +semivoicedmarkkanahalfwidth;FF9F +sentisquare;3322 +sentosquare;3323 +seven;0037 +sevenarabic;0667 +sevenbengali;09ED +sevencircle;2466 +sevencircleinversesansserif;2790 +sevendeva;096D +seveneighths;215E +sevengujarati;0AED +sevengurmukhi;0A6D +sevenhackarabic;0667 +sevenhangzhou;3027 +sevenideographicparen;3226 +seveninferior;2087 +sevenmonospace;FF17 +sevenoldstyle;F737 +sevenparen;247A +sevenperiod;248E +sevenpersian;06F7 +sevenroman;2176 +sevensuperior;2077 +seventeencircle;2470 +seventeenparen;2484 +seventeenperiod;2498 +seventhai;0E57 +sfthyphen;00AD +shaarmenian;0577 +shabengali;09B6 +shacyrillic;0448 +shaddaarabic;0651 +shaddadammaarabic;FC61 +shaddadammatanarabic;FC5E +shaddafathaarabic;FC60 +shaddafathatanarabic;0651 064B +shaddakasraarabic;FC62 +shaddakasratanarabic;FC5F +shade;2592 +shadedark;2593 +shadelight;2591 +shademedium;2592 +shadeva;0936 +shagujarati;0AB6 +shagurmukhi;0A36 +shalshelethebrew;0593 +shbopomofo;3115 +shchacyrillic;0449 +sheenarabic;0634 +sheenfinalarabic;FEB6 +sheeninitialarabic;FEB7 +sheenmedialarabic;FEB8 +sheicoptic;03E3 +sheqel;20AA +sheqelhebrew;20AA +sheva;05B0 +sheva115;05B0 +sheva15;05B0 +sheva22;05B0 +sheva2e;05B0 +shevahebrew;05B0 +shevanarrowhebrew;05B0 +shevaquarterhebrew;05B0 +shevawidehebrew;05B0 +shhacyrillic;04BB +shimacoptic;03ED +shin;05E9 +shindagesh;FB49 +shindageshhebrew;FB49 +shindageshshindot;FB2C +shindageshshindothebrew;FB2C +shindageshsindot;FB2D +shindageshsindothebrew;FB2D +shindothebrew;05C1 +shinhebrew;05E9 +shinshindot;FB2A +shinshindothebrew;FB2A +shinsindot;FB2B +shinsindothebrew;FB2B +shook;0282 +sigma;03C3 +sigma1;03C2 +sigmafinal;03C2 +sigmalunatesymbolgreek;03F2 +sihiragana;3057 +sikatakana;30B7 +sikatakanahalfwidth;FF7C +siluqhebrew;05BD +siluqlefthebrew;05BD +similar;223C +sindothebrew;05C2 +siosacirclekorean;3274 +siosaparenkorean;3214 +sioscieuckorean;317E +sioscirclekorean;3266 +sioskiyeokkorean;317A +sioskorean;3145 +siosnieunkorean;317B +siosparenkorean;3206 +siospieupkorean;317D +siostikeutkorean;317C +six;0036 +sixarabic;0666 +sixbengali;09EC +sixcircle;2465 +sixcircleinversesansserif;278F +sixdeva;096C +sixgujarati;0AEC +sixgurmukhi;0A6C +sixhackarabic;0666 +sixhangzhou;3026 +sixideographicparen;3225 +sixinferior;2086 +sixmonospace;FF16 +sixoldstyle;F736 +sixparen;2479 +sixperiod;248D +sixpersian;06F6 +sixroman;2175 +sixsuperior;2076 +sixteencircle;246F +sixteencurrencydenominatorbengali;09F9 +sixteenparen;2483 +sixteenperiod;2497 +sixthai;0E56 +slash;002F +slashmonospace;FF0F +slong;017F +slongdotaccent;1E9B +smileface;263A +smonospace;FF53 +sofpasuqhebrew;05C3 +softhyphen;00AD +softsigncyrillic;044C +sohiragana;305D +sokatakana;30BD +sokatakanahalfwidth;FF7F +soliduslongoverlaycmb;0338 +solidusshortoverlaycmb;0337 +sorusithai;0E29 +sosalathai;0E28 +sosothai;0E0B +sosuathai;0E2A +space;0020 +spacehackarabic;0020 +spade;2660 +spadesuitblack;2660 +spadesuitwhite;2664 +sparen;24AE +squarebelowcmb;033B +squarecc;33C4 +squarecm;339D +squarediagonalcrosshatchfill;25A9 +squarehorizontalfill;25A4 +squarekg;338F +squarekm;339E +squarekmcapital;33CE +squareln;33D1 +squarelog;33D2 +squaremg;338E +squaremil;33D5 +squaremm;339C +squaremsquared;33A1 +squareorthogonalcrosshatchfill;25A6 +squareupperlefttolowerrightfill;25A7 +squareupperrighttolowerleftfill;25A8 +squareverticalfill;25A5 +squarewhitewithsmallblack;25A3 +srsquare;33DB +ssabengali;09B7 +ssadeva;0937 +ssagujarati;0AB7 +ssangcieuckorean;3149 +ssanghieuhkorean;3185 +ssangieungkorean;3180 +ssangkiyeokkorean;3132 +ssangnieunkorean;3165 +ssangpieupkorean;3143 +ssangsioskorean;3146 +ssangtikeutkorean;3138 +ssuperior;F6F2 +sterling;00A3 +sterlingmonospace;FFE1 +strokelongoverlaycmb;0336 +strokeshortoverlaycmb;0335 +subset;2282 +subsetnotequal;228A +subsetorequal;2286 +succeeds;227B +suchthat;220B +suhiragana;3059 +sukatakana;30B9 +sukatakanahalfwidth;FF7D +sukunarabic;0652 +summation;2211 +sun;263C +superset;2283 +supersetnotequal;228B +supersetorequal;2287 +svsquare;33DC +syouwaerasquare;337C +t;0074 +tabengali;09A4 +tackdown;22A4 +tackleft;22A3 +tadeva;0924 +tagujarati;0AA4 +tagurmukhi;0A24 +taharabic;0637 +tahfinalarabic;FEC2 +tahinitialarabic;FEC3 +tahiragana;305F +tahmedialarabic;FEC4 +taisyouerasquare;337D +takatakana;30BF +takatakanahalfwidth;FF80 +tatweelarabic;0640 +tau;03C4 +tav;05EA +tavdages;FB4A +tavdagesh;FB4A +tavdageshhebrew;FB4A +tavhebrew;05EA +tbar;0167 +tbopomofo;310A +tcaron;0165 +tccurl;02A8 +tcedilla;0163 +tcheharabic;0686 +tchehfinalarabic;FB7B +tchehinitialarabic;FB7C +tchehmedialarabic;FB7D +tchehmeeminitialarabic;FB7C FEE4 +tcircle;24E3 +tcircumflexbelow;1E71 +tcommaaccent;0163 +tdieresis;1E97 +tdotaccent;1E6B +tdotbelow;1E6D +tecyrillic;0442 +tedescendercyrillic;04AD +teharabic;062A +tehfinalarabic;FE96 +tehhahinitialarabic;FCA2 +tehhahisolatedarabic;FC0C +tehinitialarabic;FE97 +tehiragana;3066 +tehjeeminitialarabic;FCA1 +tehjeemisolatedarabic;FC0B +tehmarbutaarabic;0629 +tehmarbutafinalarabic;FE94 +tehmedialarabic;FE98 +tehmeeminitialarabic;FCA4 +tehmeemisolatedarabic;FC0E +tehnoonfinalarabic;FC73 +tekatakana;30C6 +tekatakanahalfwidth;FF83 +telephone;2121 +telephoneblack;260E +telishagedolahebrew;05A0 +telishaqetanahebrew;05A9 +tencircle;2469 +tenideographicparen;3229 +tenparen;247D +tenperiod;2491 +tenroman;2179 +tesh;02A7 +tet;05D8 +tetdagesh;FB38 +tetdageshhebrew;FB38 +tethebrew;05D8 +tetsecyrillic;04B5 +tevirhebrew;059B +tevirlefthebrew;059B +thabengali;09A5 +thadeva;0925 +thagujarati;0AA5 +thagurmukhi;0A25 +thalarabic;0630 +thalfinalarabic;FEAC +thanthakhatlowleftthai;F898 +thanthakhatlowrightthai;F897 +thanthakhatthai;0E4C +thanthakhatupperleftthai;F896 +theharabic;062B +thehfinalarabic;FE9A +thehinitialarabic;FE9B +thehmedialarabic;FE9C +thereexists;2203 +therefore;2234 +theta;03B8 +theta1;03D1 +thetasymbolgreek;03D1 +thieuthacirclekorean;3279 +thieuthaparenkorean;3219 +thieuthcirclekorean;326B +thieuthkorean;314C +thieuthparenkorean;320B +thirteencircle;246C +thirteenparen;2480 +thirteenperiod;2494 +thonangmonthothai;0E11 +thook;01AD +thophuthaothai;0E12 +thorn;00FE +thothahanthai;0E17 +thothanthai;0E10 +thothongthai;0E18 +thothungthai;0E16 +thousandcyrillic;0482 +thousandsseparatorarabic;066C +thousandsseparatorpersian;066C +three;0033 +threearabic;0663 +threebengali;09E9 +threecircle;2462 +threecircleinversesansserif;278C +threedeva;0969 +threeeighths;215C +threegujarati;0AE9 +threegurmukhi;0A69 +threehackarabic;0663 +threehangzhou;3023 +threeideographicparen;3222 +threeinferior;2083 +threemonospace;FF13 +threenumeratorbengali;09F6 +threeoldstyle;F733 +threeparen;2476 +threeperiod;248A +threepersian;06F3 +threequarters;00BE +threequartersemdash;F6DE +threeroman;2172 +threesuperior;00B3 +threethai;0E53 +thzsquare;3394 +tihiragana;3061 +tikatakana;30C1 +tikatakanahalfwidth;FF81 +tikeutacirclekorean;3270 +tikeutaparenkorean;3210 +tikeutcirclekorean;3262 +tikeutkorean;3137 +tikeutparenkorean;3202 +tilde;02DC +tildebelowcmb;0330 +tildecmb;0303 +tildecomb;0303 +tildedoublecmb;0360 +tildeoperator;223C +tildeoverlaycmb;0334 +tildeverticalcmb;033E +timescircle;2297 +tipehahebrew;0596 +tipehalefthebrew;0596 +tippigurmukhi;0A70 +titlocyrilliccmb;0483 +tiwnarmenian;057F +tlinebelow;1E6F +tmonospace;FF54 +toarmenian;0569 +tohiragana;3068 +tokatakana;30C8 +tokatakanahalfwidth;FF84 +tonebarextrahighmod;02E5 +tonebarextralowmod;02E9 +tonebarhighmod;02E6 +tonebarlowmod;02E8 +tonebarmidmod;02E7 +tonefive;01BD +tonesix;0185 +tonetwo;01A8 +tonos;0384 +tonsquare;3327 +topatakthai;0E0F +tortoiseshellbracketleft;3014 +tortoiseshellbracketleftsmall;FE5D +tortoiseshellbracketleftvertical;FE39 +tortoiseshellbracketright;3015 +tortoiseshellbracketrightsmall;FE5E +tortoiseshellbracketrightvertical;FE3A +totaothai;0E15 +tpalatalhook;01AB +tparen;24AF +trademark;2122 +trademarksans;F8EA +trademarkserif;F6DB +tretroflexhook;0288 +triagdn;25BC +triaglf;25C4 +triagrt;25BA +triagup;25B2 +ts;02A6 +tsadi;05E6 +tsadidagesh;FB46 +tsadidageshhebrew;FB46 +tsadihebrew;05E6 +tsecyrillic;0446 +tsere;05B5 +tsere12;05B5 +tsere1e;05B5 +tsere2b;05B5 +tserehebrew;05B5 +tserenarrowhebrew;05B5 +tserequarterhebrew;05B5 +tserewidehebrew;05B5 +tshecyrillic;045B +tsuperior;F6F3 +ttabengali;099F +ttadeva;091F +ttagujarati;0A9F +ttagurmukhi;0A1F +tteharabic;0679 +ttehfinalarabic;FB67 +ttehinitialarabic;FB68 +ttehmedialarabic;FB69 +tthabengali;09A0 +tthadeva;0920 +tthagujarati;0AA0 +tthagurmukhi;0A20 +tturned;0287 +tuhiragana;3064 +tukatakana;30C4 +tukatakanahalfwidth;FF82 +tusmallhiragana;3063 +tusmallkatakana;30C3 +tusmallkatakanahalfwidth;FF6F +twelvecircle;246B +twelveparen;247F +twelveperiod;2493 +twelveroman;217B +twentycircle;2473 +twentyhangzhou;5344 +twentyparen;2487 +twentyperiod;249B +two;0032 +twoarabic;0662 +twobengali;09E8 +twocircle;2461 +twocircleinversesansserif;278B +twodeva;0968 +twodotenleader;2025 +twodotleader;2025 +twodotleadervertical;FE30 +twogujarati;0AE8 +twogurmukhi;0A68 +twohackarabic;0662 +twohangzhou;3022 +twoideographicparen;3221 +twoinferior;2082 +twomonospace;FF12 +twonumeratorbengali;09F5 +twooldstyle;F732 +twoparen;2475 +twoperiod;2489 +twopersian;06F2 +tworoman;2171 +twostroke;01BB +twosuperior;00B2 +twothai;0E52 +twothirds;2154 +u;0075 +uacute;00FA +ubar;0289 +ubengali;0989 +ubopomofo;3128 +ubreve;016D +ucaron;01D4 +ucircle;24E4 +ucircumflex;00FB +ucircumflexbelow;1E77 +ucyrillic;0443 +udattadeva;0951 +udblacute;0171 +udblgrave;0215 +udeva;0909 +udieresis;00FC +udieresisacute;01D8 +udieresisbelow;1E73 +udieresiscaron;01DA +udieresiscyrillic;04F1 +udieresisgrave;01DC +udieresismacron;01D6 +udotbelow;1EE5 +ugrave;00F9 +ugujarati;0A89 +ugurmukhi;0A09 +uhiragana;3046 +uhookabove;1EE7 +uhorn;01B0 +uhornacute;1EE9 +uhorndotbelow;1EF1 +uhorngrave;1EEB +uhornhookabove;1EED +uhorntilde;1EEF +uhungarumlaut;0171 +uhungarumlautcyrillic;04F3 +uinvertedbreve;0217 +ukatakana;30A6 +ukatakanahalfwidth;FF73 +ukcyrillic;0479 +ukorean;315C +umacron;016B +umacroncyrillic;04EF +umacrondieresis;1E7B +umatragurmukhi;0A41 +umonospace;FF55 +underscore;005F +underscoredbl;2017 +underscoremonospace;FF3F +underscorevertical;FE33 +underscorewavy;FE4F +union;222A +universal;2200 +uogonek;0173 +uparen;24B0 +upblock;2580 +upperdothebrew;05C4 +upsilon;03C5 +upsilondieresis;03CB +upsilondieresistonos;03B0 +upsilonlatin;028A +upsilontonos;03CD +uptackbelowcmb;031D +uptackmod;02D4 +uragurmukhi;0A73 +uring;016F +ushortcyrillic;045E +usmallhiragana;3045 +usmallkatakana;30A5 +usmallkatakanahalfwidth;FF69 +ustraightcyrillic;04AF +ustraightstrokecyrillic;04B1 +utilde;0169 +utildeacute;1E79 +utildebelow;1E75 +uubengali;098A +uudeva;090A +uugujarati;0A8A +uugurmukhi;0A0A +uumatragurmukhi;0A42 +uuvowelsignbengali;09C2 +uuvowelsigndeva;0942 +uuvowelsigngujarati;0AC2 +uvowelsignbengali;09C1 +uvowelsigndeva;0941 +uvowelsigngujarati;0AC1 +v;0076 +vadeva;0935 +vagujarati;0AB5 +vagurmukhi;0A35 +vakatakana;30F7 +vav;05D5 +vavdagesh;FB35 +vavdagesh65;FB35 +vavdageshhebrew;FB35 +vavhebrew;05D5 +vavholam;FB4B +vavholamhebrew;FB4B +vavvavhebrew;05F0 +vavyodhebrew;05F1 +vcircle;24E5 +vdotbelow;1E7F +vecyrillic;0432 +veharabic;06A4 +vehfinalarabic;FB6B +vehinitialarabic;FB6C +vehmedialarabic;FB6D +vekatakana;30F9 +venus;2640 +verticalbar;007C +verticallineabovecmb;030D +verticallinebelowcmb;0329 +verticallinelowmod;02CC +verticallinemod;02C8 +vewarmenian;057E +vhook;028B +vikatakana;30F8 +viramabengali;09CD +viramadeva;094D +viramagujarati;0ACD +visargabengali;0983 +visargadeva;0903 +visargagujarati;0A83 +vmonospace;FF56 +voarmenian;0578 +voicediterationhiragana;309E +voicediterationkatakana;30FE +voicedmarkkana;309B +voicedmarkkanahalfwidth;FF9E +vokatakana;30FA +vparen;24B1 +vtilde;1E7D +vturned;028C +vuhiragana;3094 +vukatakana;30F4 +w;0077 +wacute;1E83 +waekorean;3159 +wahiragana;308F +wakatakana;30EF +wakatakanahalfwidth;FF9C +wakorean;3158 +wasmallhiragana;308E +wasmallkatakana;30EE +wattosquare;3357 +wavedash;301C +wavyunderscorevertical;FE34 +wawarabic;0648 +wawfinalarabic;FEEE +wawhamzaabovearabic;0624 +wawhamzaabovefinalarabic;FE86 +wbsquare;33DD +wcircle;24E6 +wcircumflex;0175 +wdieresis;1E85 +wdotaccent;1E87 +wdotbelow;1E89 +wehiragana;3091 +weierstrass;2118 +wekatakana;30F1 +wekorean;315E +weokorean;315D +wgrave;1E81 +whitebullet;25E6 +whitecircle;25CB +whitecircleinverse;25D9 +whitecornerbracketleft;300E +whitecornerbracketleftvertical;FE43 +whitecornerbracketright;300F +whitecornerbracketrightvertical;FE44 +whitediamond;25C7 +whitediamondcontainingblacksmalldiamond;25C8 +whitedownpointingsmalltriangle;25BF +whitedownpointingtriangle;25BD +whiteleftpointingsmalltriangle;25C3 +whiteleftpointingtriangle;25C1 +whitelenticularbracketleft;3016 +whitelenticularbracketright;3017 +whiterightpointingsmalltriangle;25B9 +whiterightpointingtriangle;25B7 +whitesmallsquare;25AB +whitesmilingface;263A +whitesquare;25A1 +whitestar;2606 +whitetelephone;260F +whitetortoiseshellbracketleft;3018 +whitetortoiseshellbracketright;3019 +whiteuppointingsmalltriangle;25B5 +whiteuppointingtriangle;25B3 +wihiragana;3090 +wikatakana;30F0 +wikorean;315F +wmonospace;FF57 +wohiragana;3092 +wokatakana;30F2 +wokatakanahalfwidth;FF66 +won;20A9 +wonmonospace;FFE6 +wowaenthai;0E27 +wparen;24B2 +wring;1E98 +wsuperior;02B7 +wturned;028D +wynn;01BF +x;0078 +xabovecmb;033D +xbopomofo;3112 +xcircle;24E7 +xdieresis;1E8D +xdotaccent;1E8B +xeharmenian;056D +xi;03BE +xmonospace;FF58 +xparen;24B3 +xsuperior;02E3 +y;0079 +yaadosquare;334E +yabengali;09AF +yacute;00FD +yadeva;092F +yaekorean;3152 +yagujarati;0AAF +yagurmukhi;0A2F +yahiragana;3084 +yakatakana;30E4 +yakatakanahalfwidth;FF94 +yakorean;3151 +yamakkanthai;0E4E +yasmallhiragana;3083 +yasmallkatakana;30E3 +yasmallkatakanahalfwidth;FF6C +yatcyrillic;0463 +ycircle;24E8 +ycircumflex;0177 +ydieresis;00FF +ydotaccent;1E8F +ydotbelow;1EF5 +yeharabic;064A +yehbarreearabic;06D2 +yehbarreefinalarabic;FBAF +yehfinalarabic;FEF2 +yehhamzaabovearabic;0626 +yehhamzaabovefinalarabic;FE8A +yehhamzaaboveinitialarabic;FE8B +yehhamzaabovemedialarabic;FE8C +yehinitialarabic;FEF3 +yehmedialarabic;FEF4 +yehmeeminitialarabic;FCDD +yehmeemisolatedarabic;FC58 +yehnoonfinalarabic;FC94 +yehthreedotsbelowarabic;06D1 +yekorean;3156 +yen;00A5 +yenmonospace;FFE5 +yeokorean;3155 +yeorinhieuhkorean;3186 +yerahbenyomohebrew;05AA +yerahbenyomolefthebrew;05AA +yericyrillic;044B +yerudieresiscyrillic;04F9 +yesieungkorean;3181 +yesieungpansioskorean;3183 +yesieungsioskorean;3182 +yetivhebrew;059A +ygrave;1EF3 +yhook;01B4 +yhookabove;1EF7 +yiarmenian;0575 +yicyrillic;0457 +yikorean;3162 +yinyang;262F +yiwnarmenian;0582 +ymonospace;FF59 +yod;05D9 +yoddagesh;FB39 +yoddageshhebrew;FB39 +yodhebrew;05D9 +yodyodhebrew;05F2 +yodyodpatahhebrew;FB1F +yohiragana;3088 +yoikorean;3189 +yokatakana;30E8 +yokatakanahalfwidth;FF96 +yokorean;315B +yosmallhiragana;3087 +yosmallkatakana;30E7 +yosmallkatakanahalfwidth;FF6E +yotgreek;03F3 +yoyaekorean;3188 +yoyakorean;3187 +yoyakthai;0E22 +yoyingthai;0E0D +yparen;24B4 +ypogegrammeni;037A +ypogegrammenigreekcmb;0345 +yr;01A6 +yring;1E99 +ysuperior;02B8 +ytilde;1EF9 +yturned;028E +yuhiragana;3086 +yuikorean;318C +yukatakana;30E6 +yukatakanahalfwidth;FF95 +yukorean;3160 +yusbigcyrillic;046B +yusbigiotifiedcyrillic;046D +yuslittlecyrillic;0467 +yuslittleiotifiedcyrillic;0469 +yusmallhiragana;3085 +yusmallkatakana;30E5 +yusmallkatakanahalfwidth;FF6D +yuyekorean;318B +yuyeokorean;318A +yyabengali;09DF +yyadeva;095F +z;007A +zaarmenian;0566 +zacute;017A +zadeva;095B +zagurmukhi;0A5B +zaharabic;0638 +zahfinalarabic;FEC6 +zahinitialarabic;FEC7 +zahiragana;3056 +zahmedialarabic;FEC8 +zainarabic;0632 +zainfinalarabic;FEB0 +zakatakana;30B6 +zaqefgadolhebrew;0595 +zaqefqatanhebrew;0594 +zarqahebrew;0598 +zayin;05D6 +zayindagesh;FB36 +zayindageshhebrew;FB36 +zayinhebrew;05D6 +zbopomofo;3117 +zcaron;017E +zcircle;24E9 +zcircumflex;1E91 +zcurl;0291 +zdot;017C +zdotaccent;017C +zdotbelow;1E93 +zecyrillic;0437 +zedescendercyrillic;0499 +zedieresiscyrillic;04DF +zehiragana;305C +zekatakana;30BC +zero;0030 +zeroarabic;0660 +zerobengali;09E6 +zerodeva;0966 +zerogujarati;0AE6 +zerogurmukhi;0A66 +zerohackarabic;0660 +zeroinferior;2080 +zeromonospace;FF10 +zerooldstyle;F730 +zeropersian;06F0 +zerosuperior;2070 +zerothai;0E50 +zerowidthjoiner;FEFF +zerowidthnonjoiner;200C +zerowidthspace;200B +zeta;03B6 +zhbopomofo;3113 +zhearmenian;056A +zhebrevecyrillic;04C2 +zhecyrillic;0436 +zhedescendercyrillic;0497 +zhedieresiscyrillic;04DD +zihiragana;3058 +zikatakana;30B8 +zinorhebrew;05AE +zlinebelow;1E95 +zmonospace;FF5A +zohiragana;305E +zokatakana;30BE +zparen;24B5 +zretroflexhook;0290 +zstroke;01B6 +zuhiragana;305A +zukatakana;30BA +# END +""" + + +_aglfnText = """\ +# ----------------------------------------------------------- +# Copyright 2002-2019 Adobe (http://www.adobe.com/). +# +# Redistribution and use in source and binary forms, with or +# without modification, are permitted provided that the +# following conditions are met: +# +# Redistributions of source code must retain the above +# copyright notice, this list of conditions and the following +# disclaimer. +# +# Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# Neither the name of Adobe nor the names of its contributors +# may be used to endorse or promote products derived from this +# software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------- +# Name: Adobe Glyph List For New Fonts +# Table version: 1.7 +# Date: November 6, 2008 +# URL: https://github.com/adobe-type-tools/agl-aglfn +# +# Description: +# +# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph +# names that are recommended for new fonts, which are compatible with +# the AGL (Adobe Glyph List) Specification, and which should be used +# as described in Section 6 of that document. AGLFN comprises the set +# of glyph names from AGL that map via the AGL Specification rules to +# the semantically correct UV (Unicode Value). For example, "Asmall" +# is omitted because AGL maps this glyph name to the PUA (Private Use +# Area) value U+F761, rather than to the UV that maps from the glyph +# name "A." Also omitted is "ffi," because AGL maps this to the +# Alphabetic Presentation Forms value U+FB03, rather than decomposing +# it into the following sequence of three UVs: U+0066, U+0066, and +# U+0069. The name "arrowvertex" has been omitted because this glyph +# now has a real UV, and AGL is now incorrect in mapping it to the PUA +# value U+F8E6. If you do not find an appropriate name for your glyph +# in this list, then please refer to Section 6 of the AGL +# Specification. +# +# Format: three semicolon-delimited fields: +# (1) Standard UV or CUS UV--four uppercase hexadecimal digits +# (2) Glyph name--upper/lowercase letters and digits +# (3) Character names: Unicode character names for standard UVs, and +# descriptive names for CUS UVs--uppercase letters, hyphen, and +# space +# +# The records are sorted by glyph name in increasing ASCII order, +# entries with the same glyph name are sorted in decreasing priority +# order, the UVs and Unicode character names are provided for +# convenience, lines starting with "#" are comments, and blank lines +# should be ignored. +# +# Revision History: +# +# 1.7 [6 November 2008] +# - Reverted to the original 1.4 and earlier mappings for Delta, +# Omega, and mu. +# - Removed mappings for "afii" names. These should now be assigned +# "uni" names. +# - Removed mappings for "commaaccent" names. These should now be +# assigned "uni" names. +# +# 1.6 [30 January 2006] +# - Completed work intended in 1.5. +# +# 1.5 [23 November 2005] +# - Removed duplicated block at end of file. +# - Changed mappings: +# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA +# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA +# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU +# - Corrected statement above about why "ffi" is omitted. +# +# 1.4 [24 September 2003] +# - Changed version to 1.4, to avoid confusion with the AGL 1.3. +# - Fixed spelling errors in the header. +# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode +# value in some fonts. +# +# 1.1 [17 April 2003] +# - Renamed [Tt]cedilla back to [Tt]commaaccent. +# +# 1.0 [31 January 2003] +# - Original version. +# - Derived from the AGLv1.2 by: +# removing the PUA area codes; +# removing duplicate Unicode mappings; and +# renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla" +# +0041;A;LATIN CAPITAL LETTER A +00C6;AE;LATIN CAPITAL LETTER AE +01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE +00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE +0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE +00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX +00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS +00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE +0391;Alpha;GREEK CAPITAL LETTER ALPHA +0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS +0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON +0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK +00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE +01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE +00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE +0042;B;LATIN CAPITAL LETTER B +0392;Beta;GREEK CAPITAL LETTER BETA +0043;C;LATIN CAPITAL LETTER C +0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE +010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON +00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA +0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX +010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE +03A7;Chi;GREEK CAPITAL LETTER CHI +0044;D;LATIN CAPITAL LETTER D +010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON +0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE +2206;Delta;INCREMENT +0045;E;LATIN CAPITAL LETTER E +00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE +0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE +011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON +00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX +00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS +0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE +00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE +0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON +014A;Eng;LATIN CAPITAL LETTER ENG +0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK +0395;Epsilon;GREEK CAPITAL LETTER EPSILON +0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS +0397;Eta;GREEK CAPITAL LETTER ETA +0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS +00D0;Eth;LATIN CAPITAL LETTER ETH +20AC;Euro;EURO SIGN +0046;F;LATIN CAPITAL LETTER F +0047;G;LATIN CAPITAL LETTER G +0393;Gamma;GREEK CAPITAL LETTER GAMMA +011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE +01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON +011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX +0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE +0048;H;LATIN CAPITAL LETTER H +25CF;H18533;BLACK CIRCLE +25AA;H18543;BLACK SMALL SQUARE +25AB;H18551;WHITE SMALL SQUARE +25A1;H22073;WHITE SQUARE +0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE +0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX +0049;I;LATIN CAPITAL LETTER I +0132;IJ;LATIN CAPITAL LIGATURE IJ +00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE +012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE +00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX +00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS +0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE +2111;Ifraktur;BLACK-LETTER CAPITAL I +00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE +012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON +012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK +0399;Iota;GREEK CAPITAL LETTER IOTA +03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA +038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS +0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE +004A;J;LATIN CAPITAL LETTER J +0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX +004B;K;LATIN CAPITAL LETTER K +039A;Kappa;GREEK CAPITAL LETTER KAPPA +004C;L;LATIN CAPITAL LETTER L +0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE +039B;Lambda;GREEK CAPITAL LETTER LAMDA +013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON +013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT +0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE +004D;M;LATIN CAPITAL LETTER M +039C;Mu;GREEK CAPITAL LETTER MU +004E;N;LATIN CAPITAL LETTER N +0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE +0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON +00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE +039D;Nu;GREEK CAPITAL LETTER NU +004F;O;LATIN CAPITAL LETTER O +0152;OE;LATIN CAPITAL LIGATURE OE +00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE +014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE +00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX +00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS +00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE +01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN +0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE +014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON +2126;Omega;OHM SIGN +038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS +039F;Omicron;GREEK CAPITAL LETTER OMICRON +038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS +00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE +01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE +00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE +0050;P;LATIN CAPITAL LETTER P +03A6;Phi;GREEK CAPITAL LETTER PHI +03A0;Pi;GREEK CAPITAL LETTER PI +03A8;Psi;GREEK CAPITAL LETTER PSI +0051;Q;LATIN CAPITAL LETTER Q +0052;R;LATIN CAPITAL LETTER R +0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE +0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON +211C;Rfraktur;BLACK-LETTER CAPITAL R +03A1;Rho;GREEK CAPITAL LETTER RHO +0053;S;LATIN CAPITAL LETTER S +250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT +2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT +2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT +2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT +253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL +252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL +2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL +251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT +2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT +2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL +2502;SF110000;BOX DRAWINGS LIGHT VERTICAL +2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE +2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE +2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE +2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE +2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT +2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL +2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT +255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT +255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE +255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE +255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE +255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE +255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT +2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT +2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL +2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL +2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT +2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL +256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL +2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE +2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE +2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE +2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE +2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE +2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE +2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE +2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE +256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE +256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE +015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE +0160;Scaron;LATIN CAPITAL LETTER S WITH CARON +015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA +015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX +03A3;Sigma;GREEK CAPITAL LETTER SIGMA +0054;T;LATIN CAPITAL LETTER T +03A4;Tau;GREEK CAPITAL LETTER TAU +0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE +0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON +0398;Theta;GREEK CAPITAL LETTER THETA +00DE;Thorn;LATIN CAPITAL LETTER THORN +0055;U;LATIN CAPITAL LETTER U +00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE +016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE +00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX +00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS +00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE +01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN +0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE +016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON +0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK +03A5;Upsilon;GREEK CAPITAL LETTER UPSILON +03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL +03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA +038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS +016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE +0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE +0056;V;LATIN CAPITAL LETTER V +0057;W;LATIN CAPITAL LETTER W +1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE +0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX +1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS +1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE +0058;X;LATIN CAPITAL LETTER X +039E;Xi;GREEK CAPITAL LETTER XI +0059;Y;LATIN CAPITAL LETTER Y +00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE +0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX +0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS +1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE +005A;Z;LATIN CAPITAL LETTER Z +0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE +017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON +017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE +0396;Zeta;GREEK CAPITAL LETTER ZETA +0061;a;LATIN SMALL LETTER A +00E1;aacute;LATIN SMALL LETTER A WITH ACUTE +0103;abreve;LATIN SMALL LETTER A WITH BREVE +00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX +00B4;acute;ACUTE ACCENT +0301;acutecomb;COMBINING ACUTE ACCENT +00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS +00E6;ae;LATIN SMALL LETTER AE +01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE +00E0;agrave;LATIN SMALL LETTER A WITH GRAVE +2135;aleph;ALEF SYMBOL +03B1;alpha;GREEK SMALL LETTER ALPHA +03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS +0101;amacron;LATIN SMALL LETTER A WITH MACRON +0026;ampersand;AMPERSAND +2220;angle;ANGLE +2329;angleleft;LEFT-POINTING ANGLE BRACKET +232A;angleright;RIGHT-POINTING ANGLE BRACKET +0387;anoteleia;GREEK ANO TELEIA +0105;aogonek;LATIN SMALL LETTER A WITH OGONEK +2248;approxequal;ALMOST EQUAL TO +00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE +01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE +2194;arrowboth;LEFT RIGHT ARROW +21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW +21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW +21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW +21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW +21D1;arrowdblup;UPWARDS DOUBLE ARROW +2193;arrowdown;DOWNWARDS ARROW +2190;arrowleft;LEFTWARDS ARROW +2192;arrowright;RIGHTWARDS ARROW +2191;arrowup;UPWARDS ARROW +2195;arrowupdn;UP DOWN ARROW +21A8;arrowupdnbse;UP DOWN ARROW WITH BASE +005E;asciicircum;CIRCUMFLEX ACCENT +007E;asciitilde;TILDE +002A;asterisk;ASTERISK +2217;asteriskmath;ASTERISK OPERATOR +0040;at;COMMERCIAL AT +00E3;atilde;LATIN SMALL LETTER A WITH TILDE +0062;b;LATIN SMALL LETTER B +005C;backslash;REVERSE SOLIDUS +007C;bar;VERTICAL LINE +03B2;beta;GREEK SMALL LETTER BETA +2588;block;FULL BLOCK +007B;braceleft;LEFT CURLY BRACKET +007D;braceright;RIGHT CURLY BRACKET +005B;bracketleft;LEFT SQUARE BRACKET +005D;bracketright;RIGHT SQUARE BRACKET +02D8;breve;BREVE +00A6;brokenbar;BROKEN BAR +2022;bullet;BULLET +0063;c;LATIN SMALL LETTER C +0107;cacute;LATIN SMALL LETTER C WITH ACUTE +02C7;caron;CARON +21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS +010D;ccaron;LATIN SMALL LETTER C WITH CARON +00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA +0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX +010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE +00B8;cedilla;CEDILLA +00A2;cent;CENT SIGN +03C7;chi;GREEK SMALL LETTER CHI +25CB;circle;WHITE CIRCLE +2297;circlemultiply;CIRCLED TIMES +2295;circleplus;CIRCLED PLUS +02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT +2663;club;BLACK CLUB SUIT +003A;colon;COLON +20A1;colonmonetary;COLON SIGN +002C;comma;COMMA +2245;congruent;APPROXIMATELY EQUAL TO +00A9;copyright;COPYRIGHT SIGN +00A4;currency;CURRENCY SIGN +0064;d;LATIN SMALL LETTER D +2020;dagger;DAGGER +2021;daggerdbl;DOUBLE DAGGER +010F;dcaron;LATIN SMALL LETTER D WITH CARON +0111;dcroat;LATIN SMALL LETTER D WITH STROKE +00B0;degree;DEGREE SIGN +03B4;delta;GREEK SMALL LETTER DELTA +2666;diamond;BLACK DIAMOND SUIT +00A8;dieresis;DIAERESIS +0385;dieresistonos;GREEK DIALYTIKA TONOS +00F7;divide;DIVISION SIGN +2593;dkshade;DARK SHADE +2584;dnblock;LOWER HALF BLOCK +0024;dollar;DOLLAR SIGN +20AB;dong;DONG SIGN +02D9;dotaccent;DOT ABOVE +0323;dotbelowcomb;COMBINING DOT BELOW +0131;dotlessi;LATIN SMALL LETTER DOTLESS I +22C5;dotmath;DOT OPERATOR +0065;e;LATIN SMALL LETTER E +00E9;eacute;LATIN SMALL LETTER E WITH ACUTE +0115;ebreve;LATIN SMALL LETTER E WITH BREVE +011B;ecaron;LATIN SMALL LETTER E WITH CARON +00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX +00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS +0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE +00E8;egrave;LATIN SMALL LETTER E WITH GRAVE +0038;eight;DIGIT EIGHT +2208;element;ELEMENT OF +2026;ellipsis;HORIZONTAL ELLIPSIS +0113;emacron;LATIN SMALL LETTER E WITH MACRON +2014;emdash;EM DASH +2205;emptyset;EMPTY SET +2013;endash;EN DASH +014B;eng;LATIN SMALL LETTER ENG +0119;eogonek;LATIN SMALL LETTER E WITH OGONEK +03B5;epsilon;GREEK SMALL LETTER EPSILON +03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS +003D;equal;EQUALS SIGN +2261;equivalence;IDENTICAL TO +212E;estimated;ESTIMATED SYMBOL +03B7;eta;GREEK SMALL LETTER ETA +03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS +00F0;eth;LATIN SMALL LETTER ETH +0021;exclam;EXCLAMATION MARK +203C;exclamdbl;DOUBLE EXCLAMATION MARK +00A1;exclamdown;INVERTED EXCLAMATION MARK +2203;existential;THERE EXISTS +0066;f;LATIN SMALL LETTER F +2640;female;FEMALE SIGN +2012;figuredash;FIGURE DASH +25A0;filledbox;BLACK SQUARE +25AC;filledrect;BLACK RECTANGLE +0035;five;DIGIT FIVE +215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS +0192;florin;LATIN SMALL LETTER F WITH HOOK +0034;four;DIGIT FOUR +2044;fraction;FRACTION SLASH +20A3;franc;FRENCH FRANC SIGN +0067;g;LATIN SMALL LETTER G +03B3;gamma;GREEK SMALL LETTER GAMMA +011F;gbreve;LATIN SMALL LETTER G WITH BREVE +01E7;gcaron;LATIN SMALL LETTER G WITH CARON +011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX +0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE +00DF;germandbls;LATIN SMALL LETTER SHARP S +2207;gradient;NABLA +0060;grave;GRAVE ACCENT +0300;gravecomb;COMBINING GRAVE ACCENT +003E;greater;GREATER-THAN SIGN +2265;greaterequal;GREATER-THAN OR EQUAL TO +00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK +00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK +2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK +203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK +0068;h;LATIN SMALL LETTER H +0127;hbar;LATIN SMALL LETTER H WITH STROKE +0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX +2665;heart;BLACK HEART SUIT +0309;hookabovecomb;COMBINING HOOK ABOVE +2302;house;HOUSE +02DD;hungarumlaut;DOUBLE ACUTE ACCENT +002D;hyphen;HYPHEN-MINUS +0069;i;LATIN SMALL LETTER I +00ED;iacute;LATIN SMALL LETTER I WITH ACUTE +012D;ibreve;LATIN SMALL LETTER I WITH BREVE +00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX +00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS +00EC;igrave;LATIN SMALL LETTER I WITH GRAVE +0133;ij;LATIN SMALL LIGATURE IJ +012B;imacron;LATIN SMALL LETTER I WITH MACRON +221E;infinity;INFINITY +222B;integral;INTEGRAL +2321;integralbt;BOTTOM HALF INTEGRAL +2320;integraltp;TOP HALF INTEGRAL +2229;intersection;INTERSECTION +25D8;invbullet;INVERSE BULLET +25D9;invcircle;INVERSE WHITE CIRCLE +263B;invsmileface;BLACK SMILING FACE +012F;iogonek;LATIN SMALL LETTER I WITH OGONEK +03B9;iota;GREEK SMALL LETTER IOTA +03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA +0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS +03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS +0129;itilde;LATIN SMALL LETTER I WITH TILDE +006A;j;LATIN SMALL LETTER J +0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX +006B;k;LATIN SMALL LETTER K +03BA;kappa;GREEK SMALL LETTER KAPPA +0138;kgreenlandic;LATIN SMALL LETTER KRA +006C;l;LATIN SMALL LETTER L +013A;lacute;LATIN SMALL LETTER L WITH ACUTE +03BB;lambda;GREEK SMALL LETTER LAMDA +013E;lcaron;LATIN SMALL LETTER L WITH CARON +0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT +003C;less;LESS-THAN SIGN +2264;lessequal;LESS-THAN OR EQUAL TO +258C;lfblock;LEFT HALF BLOCK +20A4;lira;LIRA SIGN +2227;logicaland;LOGICAL AND +00AC;logicalnot;NOT SIGN +2228;logicalor;LOGICAL OR +017F;longs;LATIN SMALL LETTER LONG S +25CA;lozenge;LOZENGE +0142;lslash;LATIN SMALL LETTER L WITH STROKE +2591;ltshade;LIGHT SHADE +006D;m;LATIN SMALL LETTER M +00AF;macron;MACRON +2642;male;MALE SIGN +2212;minus;MINUS SIGN +2032;minute;PRIME +00B5;mu;MICRO SIGN +00D7;multiply;MULTIPLICATION SIGN +266A;musicalnote;EIGHTH NOTE +266B;musicalnotedbl;BEAMED EIGHTH NOTES +006E;n;LATIN SMALL LETTER N +0144;nacute;LATIN SMALL LETTER N WITH ACUTE +0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE +0148;ncaron;LATIN SMALL LETTER N WITH CARON +0039;nine;DIGIT NINE +2209;notelement;NOT AN ELEMENT OF +2260;notequal;NOT EQUAL TO +2284;notsubset;NOT A SUBSET OF +00F1;ntilde;LATIN SMALL LETTER N WITH TILDE +03BD;nu;GREEK SMALL LETTER NU +0023;numbersign;NUMBER SIGN +006F;o;LATIN SMALL LETTER O +00F3;oacute;LATIN SMALL LETTER O WITH ACUTE +014F;obreve;LATIN SMALL LETTER O WITH BREVE +00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX +00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS +0153;oe;LATIN SMALL LIGATURE OE +02DB;ogonek;OGONEK +00F2;ograve;LATIN SMALL LETTER O WITH GRAVE +01A1;ohorn;LATIN SMALL LETTER O WITH HORN +0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE +014D;omacron;LATIN SMALL LETTER O WITH MACRON +03C9;omega;GREEK SMALL LETTER OMEGA +03D6;omega1;GREEK PI SYMBOL +03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS +03BF;omicron;GREEK SMALL LETTER OMICRON +03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS +0031;one;DIGIT ONE +2024;onedotenleader;ONE DOT LEADER +215B;oneeighth;VULGAR FRACTION ONE EIGHTH +00BD;onehalf;VULGAR FRACTION ONE HALF +00BC;onequarter;VULGAR FRACTION ONE QUARTER +2153;onethird;VULGAR FRACTION ONE THIRD +25E6;openbullet;WHITE BULLET +00AA;ordfeminine;FEMININE ORDINAL INDICATOR +00BA;ordmasculine;MASCULINE ORDINAL INDICATOR +221F;orthogonal;RIGHT ANGLE +00F8;oslash;LATIN SMALL LETTER O WITH STROKE +01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE +00F5;otilde;LATIN SMALL LETTER O WITH TILDE +0070;p;LATIN SMALL LETTER P +00B6;paragraph;PILCROW SIGN +0028;parenleft;LEFT PARENTHESIS +0029;parenright;RIGHT PARENTHESIS +2202;partialdiff;PARTIAL DIFFERENTIAL +0025;percent;PERCENT SIGN +002E;period;FULL STOP +00B7;periodcentered;MIDDLE DOT +22A5;perpendicular;UP TACK +2030;perthousand;PER MILLE SIGN +20A7;peseta;PESETA SIGN +03C6;phi;GREEK SMALL LETTER PHI +03D5;phi1;GREEK PHI SYMBOL +03C0;pi;GREEK SMALL LETTER PI +002B;plus;PLUS SIGN +00B1;plusminus;PLUS-MINUS SIGN +211E;prescription;PRESCRIPTION TAKE +220F;product;N-ARY PRODUCT +2282;propersubset;SUBSET OF +2283;propersuperset;SUPERSET OF +221D;proportional;PROPORTIONAL TO +03C8;psi;GREEK SMALL LETTER PSI +0071;q;LATIN SMALL LETTER Q +003F;question;QUESTION MARK +00BF;questiondown;INVERTED QUESTION MARK +0022;quotedbl;QUOTATION MARK +201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK +201C;quotedblleft;LEFT DOUBLE QUOTATION MARK +201D;quotedblright;RIGHT DOUBLE QUOTATION MARK +2018;quoteleft;LEFT SINGLE QUOTATION MARK +201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK +2019;quoteright;RIGHT SINGLE QUOTATION MARK +201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK +0027;quotesingle;APOSTROPHE +0072;r;LATIN SMALL LETTER R +0155;racute;LATIN SMALL LETTER R WITH ACUTE +221A;radical;SQUARE ROOT +0159;rcaron;LATIN SMALL LETTER R WITH CARON +2286;reflexsubset;SUBSET OF OR EQUAL TO +2287;reflexsuperset;SUPERSET OF OR EQUAL TO +00AE;registered;REGISTERED SIGN +2310;revlogicalnot;REVERSED NOT SIGN +03C1;rho;GREEK SMALL LETTER RHO +02DA;ring;RING ABOVE +2590;rtblock;RIGHT HALF BLOCK +0073;s;LATIN SMALL LETTER S +015B;sacute;LATIN SMALL LETTER S WITH ACUTE +0161;scaron;LATIN SMALL LETTER S WITH CARON +015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA +015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX +2033;second;DOUBLE PRIME +00A7;section;SECTION SIGN +003B;semicolon;SEMICOLON +0037;seven;DIGIT SEVEN +215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS +2592;shade;MEDIUM SHADE +03C3;sigma;GREEK SMALL LETTER SIGMA +03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA +223C;similar;TILDE OPERATOR +0036;six;DIGIT SIX +002F;slash;SOLIDUS +263A;smileface;WHITE SMILING FACE +0020;space;SPACE +2660;spade;BLACK SPADE SUIT +00A3;sterling;POUND SIGN +220B;suchthat;CONTAINS AS MEMBER +2211;summation;N-ARY SUMMATION +263C;sun;WHITE SUN WITH RAYS +0074;t;LATIN SMALL LETTER T +03C4;tau;GREEK SMALL LETTER TAU +0167;tbar;LATIN SMALL LETTER T WITH STROKE +0165;tcaron;LATIN SMALL LETTER T WITH CARON +2234;therefore;THEREFORE +03B8;theta;GREEK SMALL LETTER THETA +03D1;theta1;GREEK THETA SYMBOL +00FE;thorn;LATIN SMALL LETTER THORN +0033;three;DIGIT THREE +215C;threeeighths;VULGAR FRACTION THREE EIGHTHS +00BE;threequarters;VULGAR FRACTION THREE QUARTERS +02DC;tilde;SMALL TILDE +0303;tildecomb;COMBINING TILDE +0384;tonos;GREEK TONOS +2122;trademark;TRADE MARK SIGN +25BC;triagdn;BLACK DOWN-POINTING TRIANGLE +25C4;triaglf;BLACK LEFT-POINTING POINTER +25BA;triagrt;BLACK RIGHT-POINTING POINTER +25B2;triagup;BLACK UP-POINTING TRIANGLE +0032;two;DIGIT TWO +2025;twodotenleader;TWO DOT LEADER +2154;twothirds;VULGAR FRACTION TWO THIRDS +0075;u;LATIN SMALL LETTER U +00FA;uacute;LATIN SMALL LETTER U WITH ACUTE +016D;ubreve;LATIN SMALL LETTER U WITH BREVE +00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX +00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS +00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE +01B0;uhorn;LATIN SMALL LETTER U WITH HORN +0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE +016B;umacron;LATIN SMALL LETTER U WITH MACRON +005F;underscore;LOW LINE +2017;underscoredbl;DOUBLE LOW LINE +222A;union;UNION +2200;universal;FOR ALL +0173;uogonek;LATIN SMALL LETTER U WITH OGONEK +2580;upblock;UPPER HALF BLOCK +03C5;upsilon;GREEK SMALL LETTER UPSILON +03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA +03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS +03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS +016F;uring;LATIN SMALL LETTER U WITH RING ABOVE +0169;utilde;LATIN SMALL LETTER U WITH TILDE +0076;v;LATIN SMALL LETTER V +0077;w;LATIN SMALL LETTER W +1E83;wacute;LATIN SMALL LETTER W WITH ACUTE +0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX +1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS +2118;weierstrass;SCRIPT CAPITAL P +1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE +0078;x;LATIN SMALL LETTER X +03BE;xi;GREEK SMALL LETTER XI +0079;y;LATIN SMALL LETTER Y +00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE +0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX +00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS +00A5;yen;YEN SIGN +1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE +007A;z;LATIN SMALL LETTER Z +017A;zacute;LATIN SMALL LETTER Z WITH ACUTE +017E;zcaron;LATIN SMALL LETTER Z WITH CARON +017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE +0030;zero;DIGIT ZERO +03B6;zeta;GREEK SMALL LETTER ZETA +# END +""" + + +class AGLError(Exception): + pass + +LEGACY_AGL2UV = {} +AGL2UV = {} +UV2AGL = {} + +def _builddicts(): + import re + + lines = _aglText.splitlines() + + parseAGL_RE = re.compile("([A-Za-z0-9]+);((?:[0-9A-F]{4})(?: (?:[0-9A-F]{4}))*)$") + + for line in lines: + if not line or line[:1] == '#': + continue + m = parseAGL_RE.match(line) + if not m: + raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20])) + unicodes = m.group(2) + assert len(unicodes) % 5 == 4 + unicodes = [int(unicode, 16) for unicode in unicodes.split()] + glyphName = tostr(m.group(1)) + LEGACY_AGL2UV[glyphName] = unicodes + + lines = _aglfnText.splitlines() + + parseAGLFN_RE = re.compile("([0-9A-F]{4});([A-Za-z0-9]+);.*?$") + + for line in lines: + if not line or line[:1] == '#': + continue + m = parseAGLFN_RE.match(line) + if not m: + raise AGLError("syntax error in aglfn.txt: %s" % repr(line[:20])) + unicode = m.group(1) + assert len(unicode) == 4 + unicode = int(unicode, 16) + glyphName = tostr(m.group(2)) + AGL2UV[glyphName] = unicode + UV2AGL[unicode] = glyphName + +_builddicts() + + +def toUnicode(glyph, isZapfDingbats=False): + """Convert glyph names to Unicode, such as ``'longs_t.oldstyle'`` --> ``u'Å¿t'`` + + If ``isZapfDingbats`` is ``True``, the implementation recognizes additional + glyph names (as required by the AGL specification). + """ + # https://github.com/adobe-type-tools/agl-specification#2-the-mapping + # + # 1. Drop all the characters from the glyph name starting with + # the first occurrence of a period (U+002E; FULL STOP), if any. + glyph = glyph.split(".", 1)[0] + + # 2. Split the remaining string into a sequence of components, + # using underscore (U+005F; LOW LINE) as the delimiter. + components = glyph.split("_") + + # 3. Map each component to a character string according to the + # procedure below, and concatenate those strings; the result + # is the character string to which the glyph name is mapped. + result = [_glyphComponentToUnicode(c, isZapfDingbats) + for c in components] + return "".join(result) + + +def _glyphComponentToUnicode(component, isZapfDingbats): + # If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats), + # and the component is in the ITC Zapf Dingbats Glyph List, then + # map it to the corresponding character in that list. + dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None + if dingbat: + return dingbat + + # Otherwise, if the component is in AGL, then map it + # to the corresponding character in that list. + uchars = LEGACY_AGL2UV.get(component) + if uchars: + return "".join(map(chr, uchars)) + + # Otherwise, if the component is of the form "uni" (U+0075, + # U+006E, and U+0069) followed by a sequence of uppercase + # hexadecimal digits (0–9 and A–F, meaning U+0030 through + # U+0039 and U+0041 through U+0046), if the length of that + # sequence is a multiple of four, and if each group of four + # digits represents a value in the ranges 0000 through D7FF + # or E000 through FFFF, then interpret each as a Unicode scalar + # value and map the component to the string made of those + # scalar values. Note that the range and digit-length + # restrictions mean that the "uni" glyph name prefix can be + # used only with UVs in the Basic Multilingual Plane (BMP). + uni = _uniToUnicode(component) + if uni: + return uni + + # Otherwise, if the component is of the form "u" (U+0075) + # followed by a sequence of four to six uppercase hexadecimal + # digits (0–9 and A–F, meaning U+0030 through U+0039 and + # U+0041 through U+0046), and those digits represents a value + # in the ranges 0000 through D7FF or E000 through 10FFFF, then + # interpret it as a Unicode scalar value and map the component + # to the string made of this scalar value. + uni = _uToUnicode(component) + if uni: + return uni + + # Otherwise, map the component to an empty string. + return '' + + +# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt +_AGL_ZAPF_DINGBATS = ( + " âœâœ‚✄☎✆âœâœžâœŸâœ âœ¡â˜›â˜žâœŒâœâœŽâœâœ‘✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿â€" + "ââ‚âƒâ„â…â†â‡âˆâ‰âŠâ‹â—ââ– ââ‘▲▼◆■◗â˜â™âšâ¯â±â²â³â¨â©â¬â­âªâ«â´âµâ›âœââžâ¡â¢â£â¤âœâ¥â¦â§â™ â™¥â™¦â™£ ✉✈✇" + "①②③④⑤⑥⑦⑧⑨⑩â¶â·â¸â¹âºâ»â¼â½â¾â¿âž€âžâž‚➃➄➅➆➇➈➉➊➋➌âžâžŽâžâžâž‘➒➓➔→➣↔" + "↕➙➛➜âžâžžâžŸâž âž¡âž¢âž¤âž¥âž¦âž§âž¨âž©âž«âž­âž¯âž²âž³âžµâž¸âžºâž»âž¼âž½âž¾âžšâžªâž¶âž¹âž˜âž´âž·âž¬âž®âž±âœƒââ’â®â°") + + +def _zapfDingbatsToUnicode(glyph): + """Helper for toUnicode().""" + if len(glyph) < 2 or glyph[0] != 'a': + return None + try: + gid = int(glyph[1:]) + except ValueError: + return None + if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS): + return None + uchar = _AGL_ZAPF_DINGBATS[gid] + return uchar if uchar != ' ' else None + + +_re_uni = re.compile("^uni([0-9A-F]+)$") + + +def _uniToUnicode(component): + """Helper for toUnicode() to handle "uniABCD" components.""" + match = _re_uni.match(component) + if match is None: + return None + digits = match.group(1) + if len(digits) % 4 != 0: + return None + chars = [int(digits[i : i + 4], 16) + for i in range(0, len(digits), 4)] + if any(c >= 0xD800 and c <= 0xDFFF for c in chars): + # The AGL specification explicitly excluded surrogate pairs. + return None + return ''.join([chr(c) for c in chars]) + + +_re_u = re.compile("^u([0-9A-F]{4,6})$") + + +def _uToUnicode(component): + """Helper for toUnicode() to handle "u1ABCD" components.""" + match = _re_u.match(component) + if match is None: + return None + digits = match.group(1) + try: + value = int(digits, 16) + except ValueError: + return None + if ((value >= 0x0000 and value <= 0xD7FF) or + (value >= 0xE000 and value <= 0x10FFFF)): + return chr(value) + return None diff --git a/.venv/lib/python3.9/site-packages/fontTools/cffLib/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/cffLib/__init__.py new file mode 100644 index 00000000..07d0d513 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/cffLib/__init__.py @@ -0,0 +1,3023 @@ +"""cffLib: read/write Adobe CFF fonts + +OpenType fonts with PostScript outlines contain a completely independent +font file, Adobe's *Compact Font Format*. So dealing with OpenType fonts +requires also dealing with CFF. This module allows you to read and write +fonts written in the CFF format. + +In 2016, OpenType 1.8 introduced the `CFF2 `_ +format which, along with other changes, extended the CFF format to deal with +the demands of variable fonts. This module parses both original CFF and CFF2. + +""" + +from fontTools.misc import sstruct +from fontTools.misc import psCharStrings +from fontTools.misc.arrayTools import unionRect, intRect +from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr, safeEval +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables.otBase import OTTableWriter +from fontTools.ttLib.tables.otBase import OTTableReader +from fontTools.ttLib.tables import otTables as ot +from io import BytesIO +import struct +import logging +import re + +# mute cffLib debug messages when running ttx in verbose mode +DEBUG = logging.DEBUG - 1 +log = logging.getLogger(__name__) + +cffHeaderFormat = """ + major: B + minor: B + hdrSize: B +""" + +maxStackLimit = 513 +# maxstack operator has been deprecated. max stack is now always 513. + + +class StopHintCountEvent(Exception): + pass + + +class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): + stop_hintcount_ops = ("op_hintmask", "op_cntrmask", "op_rmoveto", "op_hmoveto", + "op_vmoveto") + + def __init__(self, localSubrs, globalSubrs, private=None): + psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, + private) + + def execute(self, charString): + self.need_hintcount = True # until proven otherwise + for op_name in self.stop_hintcount_ops: + setattr(self, op_name, self.stop_hint_count) + + if hasattr(charString, '_desubroutinized'): + # If a charstring has already been desubroutinized, we will still + # need to execute it if we need to count hints in order to + # compute the byte length for mask arguments, and haven't finished + # counting hints pairs. + if self.need_hintcount and self.callingStack: + try: + psCharStrings.SimpleT2Decompiler.execute(self, charString) + except StopHintCountEvent: + del self.callingStack[-1] + return + + charString._patches = [] + psCharStrings.SimpleT2Decompiler.execute(self, charString) + desubroutinized = charString.program[:] + for idx, expansion in reversed(charString._patches): + assert idx >= 2 + assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1] + assert type(desubroutinized[idx - 2]) == int + if expansion[-1] == 'return': + expansion = expansion[:-1] + desubroutinized[idx-2:idx] = expansion + if not self.private.in_cff2: + if 'endchar' in desubroutinized: + # Cut off after first endchar + desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1] + else: + if not len(desubroutinized) or desubroutinized[-1] != 'return': + desubroutinized.append('return') + + charString._desubroutinized = desubroutinized + del charString._patches + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def stop_hint_count(self, *args): + self.need_hintcount = False + for op_name in self.stop_hintcount_ops: + setattr(self, op_name, None) + cs = self.callingStack[-1] + if hasattr(cs, '_desubroutinized'): + raise StopHintCountEvent() + + def op_hintmask(self, index): + psCharStrings.SimpleT2Decompiler.op_hintmask(self, index) + if self.need_hintcount: + self.stop_hint_count() + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + if not hasattr(cs, '_desubroutinized'): + cs._patches.append((index, subr._desubroutinized)) + + +class CFFFontSet(object): + """A CFF font "file" can contain more than one font, although this is + extremely rare (and not allowed within OpenType fonts). + + This class is the entry point for parsing a CFF table. To actually + manipulate the data inside the CFF font, you will want to access the + ``CFFFontSet``'s :class:`TopDict` object. To do this, a ``CFFFontSet`` + object can either be treated as a dictionary (with appropriate + ``keys()`` and ``values()`` methods) mapping font names to :class:`TopDict` + objects, or as a list. + + .. code:: python + + from fontTools import ttLib + tt = ttLib.TTFont("Tests/cffLib/data/LinLibertine_RBI.otf") + tt["CFF "].cff + # + tt["CFF "].cff[0] # Here's your actual font data + # + + """ + + def decompile(self, file, otFont, isCFF2=None): + """Parse a binary CFF file into an internal representation. ``file`` + should be a file handle object. ``otFont`` is the top-level + :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file. + + If ``isCFF2`` is passed and set to ``True`` or ``False``, then the + library makes an assertion that the CFF header is of the appropriate + version. + """ + + self.otFont = otFont + sstruct.unpack(cffHeaderFormat, file.read(3), self) + if isCFF2 is not None: + # called from ttLib: assert 'major' as read from file matches the + # expected version + expected_major = (2 if isCFF2 else 1) + if self.major != expected_major: + raise ValueError( + "Invalid CFF 'major' version: expected %d, found %d" % + (expected_major, self.major)) + else: + # use 'major' version from file to determine if isCFF2 + assert self.major in (1, 2), "Unknown CFF format" + isCFF2 = self.major == 2 + if not isCFF2: + self.offSize = struct.unpack("B", file.read(1))[0] + file.seek(self.hdrSize) + self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2)) + self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2) + self.strings = IndexedStrings(file) + else: # isCFF2 + self.topDictSize = struct.unpack(">H", file.read(2))[0] + file.seek(self.hdrSize) + self.fontNames = ["CFF2Font"] + cff2GetGlyphOrder = otFont.getGlyphOrder + # in CFF2, offsetSize is the size of the TopDict data. + self.topDictIndex = TopDictIndex( + file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2) + self.strings = None + self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2) + self.topDictIndex.strings = self.strings + self.topDictIndex.GlobalSubrs = self.GlobalSubrs + + def __len__(self): + return len(self.fontNames) + + def keys(self): + return list(self.fontNames) + + def values(self): + return self.topDictIndex + + def __getitem__(self, nameOrIndex): + """ Return TopDict instance identified by name (str) or index (int + or any object that implements `__index__`). + """ + if hasattr(nameOrIndex, "__index__"): + index = nameOrIndex.__index__() + elif isinstance(nameOrIndex, str): + name = nameOrIndex + try: + index = self.fontNames.index(name) + except ValueError: + raise KeyError(nameOrIndex) + else: + raise TypeError(nameOrIndex) + return self.topDictIndex[index] + + def compile(self, file, otFont, isCFF2=None): + """Write the object back into binary representation onto the given file. + ``file`` should be a file handle object. ``otFont`` is the top-level + :py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file. + + If ``isCFF2`` is passed and set to ``True`` or ``False``, then the + library makes an assertion that the CFF header is of the appropriate + version. + """ + self.otFont = otFont + if isCFF2 is not None: + # called from ttLib: assert 'major' value matches expected version + expected_major = (2 if isCFF2 else 1) + if self.major != expected_major: + raise ValueError( + "Invalid CFF 'major' version: expected %d, found %d" % + (expected_major, self.major)) + else: + # use current 'major' value to determine output format + assert self.major in (1, 2), "Unknown CFF format" + isCFF2 = self.major == 2 + + if otFont.recalcBBoxes and not isCFF2: + for topDict in self.topDictIndex: + topDict.recalcFontBBox() + + if not isCFF2: + strings = IndexedStrings() + else: + strings = None + writer = CFFWriter(isCFF2) + topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2) + if isCFF2: + self.hdrSize = 5 + writer.add(sstruct.pack(cffHeaderFormat, self)) + # Note: topDictSize will most likely change in CFFWriter.toFile(). + self.topDictSize = topCompiler.getDataLength() + writer.add(struct.pack(">H", self.topDictSize)) + else: + self.hdrSize = 4 + self.offSize = 4 # will most likely change in CFFWriter.toFile(). + writer.add(sstruct.pack(cffHeaderFormat, self)) + writer.add(struct.pack("B", self.offSize)) + if not isCFF2: + fontNames = Index() + for name in self.fontNames: + fontNames.append(name) + writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2)) + writer.add(topCompiler) + if not isCFF2: + writer.add(strings.getCompiler()) + writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2)) + + for topDict in self.topDictIndex: + if not hasattr(topDict, "charset") or topDict.charset is None: + charset = otFont.getGlyphOrder() + topDict.charset = charset + children = topCompiler.getChildren(strings) + for child in children: + writer.add(child) + + writer.toFile(file) + + def toXML(self, xmlWriter): + """Write the object into XML representation onto the given + :class:`fontTools.misc.xmlWriter.XMLWriter`. + + .. code:: python + + writer = xmlWriter.XMLWriter(sys.stdout) + tt["CFF "].cff.toXML(writer) + + """ + + xmlWriter.simpletag("major", value=self.major) + xmlWriter.newline() + xmlWriter.simpletag("minor", value=self.minor) + xmlWriter.newline() + for fontName in self.fontNames: + xmlWriter.begintag("CFFFont", name=tostr(fontName)) + xmlWriter.newline() + font = self[fontName] + font.toXML(xmlWriter) + xmlWriter.endtag("CFFFont") + xmlWriter.newline() + xmlWriter.newline() + xmlWriter.begintag("GlobalSubrs") + xmlWriter.newline() + self.GlobalSubrs.toXML(xmlWriter) + xmlWriter.endtag("GlobalSubrs") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, otFont=None): + """Reads data from the XML element into the ``CFFFontSet`` object.""" + self.otFont = otFont + + # set defaults. These will be replaced if there are entries for them + # in the XML file. + if not hasattr(self, "major"): + self.major = 1 + if not hasattr(self, "minor"): + self.minor = 0 + + if name == "CFFFont": + if self.major == 1: + if not hasattr(self, "offSize"): + # this will be recalculated when the cff is compiled. + self.offSize = 4 + if not hasattr(self, "hdrSize"): + self.hdrSize = 4 + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + if not hasattr(self, "fontNames"): + self.fontNames = [] + self.topDictIndex = TopDictIndex() + fontName = attrs["name"] + self.fontNames.append(fontName) + topDict = TopDict(GlobalSubrs=self.GlobalSubrs) + topDict.charset = None # gets filled in later + elif self.major == 2: + if not hasattr(self, "hdrSize"): + self.hdrSize = 5 + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + if not hasattr(self, "fontNames"): + self.fontNames = ["CFF2Font"] + cff2GetGlyphOrder = self.otFont.getGlyphOrder + topDict = TopDict( + GlobalSubrs=self.GlobalSubrs, + cff2GetGlyphOrder=cff2GetGlyphOrder) + self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None) + self.topDictIndex.append(topDict) + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + topDict.fromXML(name, attrs, content) + + if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None: + fdArray = topDict.FDArray + for fontDict in fdArray: + if hasattr(fontDict, "Private"): + fontDict.Private.vstore = topDict.VarStore + + elif name == "GlobalSubrs": + subrCharStringClass = psCharStrings.T2CharString + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + subr = subrCharStringClass() + subr.fromXML(name, attrs, content) + self.GlobalSubrs.append(subr) + elif name == "major": + self.major = int(attrs['value']) + elif name == "minor": + self.minor = int(attrs['value']) + + def convertCFFToCFF2(self, otFont): + """Converts this object from CFF format to CFF2 format. This conversion + is done 'in-place'. The conversion cannot be reversed. + + This assumes a decompiled CFF table. (i.e. that the object has been + filled via :meth:`decompile`.)""" + self.major = 2 + cff2GetGlyphOrder = self.otFont.getGlyphOrder + topDictData = TopDictIndex(None, cff2GetGlyphOrder, None) + topDictData.items = self.topDictIndex.items + self.topDictIndex = topDictData + topDict = topDictData[0] + if hasattr(topDict, 'Private'): + privateDict = topDict.Private + else: + privateDict = None + opOrder = buildOrder(topDictOperators2) + topDict.order = opOrder + topDict.cff2GetGlyphOrder = cff2GetGlyphOrder + for entry in topDictOperators: + key = entry[1] + if key not in opOrder: + if key in topDict.rawDict: + del topDict.rawDict[key] + if hasattr(topDict, key): + delattr(topDict, key) + + if not hasattr(topDict, "FDArray"): + fdArray = topDict.FDArray = FDArrayIndex() + fdArray.strings = None + fdArray.GlobalSubrs = topDict.GlobalSubrs + topDict.GlobalSubrs.fdArray = fdArray + charStrings = topDict.CharStrings + if charStrings.charStringsAreIndexed: + charStrings.charStringsIndex.fdArray = fdArray + else: + charStrings.fdArray = fdArray + fontDict = FontDict() + fontDict.setCFF2(True) + fdArray.append(fontDict) + fontDict.Private = privateDict + privateOpOrder = buildOrder(privateDictOperators2) + for entry in privateDictOperators: + key = entry[1] + if key not in privateOpOrder: + if key in privateDict.rawDict: + # print "Removing private dict", key + del privateDict.rawDict[key] + if hasattr(privateDict, key): + delattr(privateDict, key) + # print "Removing privateDict attr", key + else: + # clean up the PrivateDicts in the fdArray + fdArray = topDict.FDArray + privateOpOrder = buildOrder(privateDictOperators2) + for fontDict in fdArray: + fontDict.setCFF2(True) + for key in fontDict.rawDict.keys(): + if key not in fontDict.order: + del fontDict.rawDict[key] + if hasattr(fontDict, key): + delattr(fontDict, key) + + privateDict = fontDict.Private + for entry in privateDictOperators: + key = entry[1] + if key not in privateOpOrder: + if key in privateDict.rawDict: + # print "Removing private dict", key + del privateDict.rawDict[key] + if hasattr(privateDict, key): + delattr(privateDict, key) + # print "Removing privateDict attr", key + # At this point, the Subrs and Charstrings are all still T2Charstring class + # easiest to fix this by compiling, then decompiling again + file = BytesIO() + self.compile(file, otFont, isCFF2=True) + file.seek(0) + self.decompile(file, otFont, isCFF2=True) + + def desubroutinize(self): + for fontName in self.fontNames: + font = self[fontName] + cs = font.CharStrings + for g in font.charset: + c, _ = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private) + decompiler.execute(c) + c.program = c._desubroutinized + del c._desubroutinized + # Delete all the local subrs + if hasattr(font, 'FDArray'): + for fd in font.FDArray: + pd = fd.Private + if hasattr(pd, 'Subrs'): + del pd.Subrs + if 'Subrs' in pd.rawDict: + del pd.rawDict['Subrs'] + else: + pd = font.Private + if hasattr(pd, 'Subrs'): + del pd.Subrs + if 'Subrs' in pd.rawDict: + del pd.rawDict['Subrs'] + # as well as the global subrs + self.GlobalSubrs.clear() + + +class CFFWriter(object): + """Helper class for serializing CFF data to binary. Used by + :meth:`CFFFontSet.compile`.""" + def __init__(self, isCFF2): + self.data = [] + self.isCFF2 = isCFF2 + + def add(self, table): + self.data.append(table) + + def toFile(self, file): + lastPosList = None + count = 1 + while True: + log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count) + count = count + 1 + pos = 0 + posList = [pos] + for item in self.data: + if hasattr(item, "getDataLength"): + endPos = pos + item.getDataLength() + if isinstance(item, TopDictIndexCompiler) and item.isCFF2: + self.topDictSize = item.getDataLength() + else: + endPos = pos + len(item) + if hasattr(item, "setPos"): + item.setPos(pos, endPos) + pos = endPos + posList.append(pos) + if posList == lastPosList: + break + lastPosList = posList + log.log(DEBUG, "CFFWriter.toFile() writing to file.") + begin = file.tell() + if self.isCFF2: + self.data[1] = struct.pack(">H", self.topDictSize) + else: + self.offSize = calcOffSize(lastPosList[-1]) + self.data[1] = struct.pack("B", self.offSize) + posList = [0] + for item in self.data: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + posList.append(file.tell() - begin) + assert posList == lastPosList + + +def calcOffSize(largestOffset): + if largestOffset < 0x100: + offSize = 1 + elif largestOffset < 0x10000: + offSize = 2 + elif largestOffset < 0x1000000: + offSize = 3 + else: + offSize = 4 + return offSize + + +class IndexCompiler(object): + """Base class for writing CFF `INDEX data `_ + to binary.""" + + def __init__(self, items, strings, parent, isCFF2=None): + if isCFF2 is None and hasattr(parent, "isCFF2"): + isCFF2 = parent.isCFF2 + assert isCFF2 is not None + self.isCFF2 = isCFF2 + self.items = self.getItems(items, strings) + self.parent = parent + + def getItems(self, items, strings): + return items + + def getOffsets(self): + # An empty INDEX contains only the count field. + if self.items: + pos = 1 + offsets = [pos] + for item in self.items: + if hasattr(item, "getDataLength"): + pos = pos + item.getDataLength() + else: + pos = pos + len(item) + offsets.append(pos) + else: + offsets = [] + return offsets + + def getDataLength(self): + if self.isCFF2: + countSize = 4 + else: + countSize = 2 + + if self.items: + lastOffset = self.getOffsets()[-1] + offSize = calcOffSize(lastOffset) + dataLength = ( + countSize + # count + 1 + # offSize + (len(self.items) + 1) * offSize + # the offsets + lastOffset - 1 # size of object data + ) + else: + # count. For empty INDEX tables, this is the only entry. + dataLength = countSize + + return dataLength + + def toFile(self, file): + offsets = self.getOffsets() + if self.isCFF2: + writeCard32(file, len(self.items)) + else: + writeCard16(file, len(self.items)) + # An empty INDEX contains only the count field. + if self.items: + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + data = tobytes(item, encoding="latin1") + file.write(data) + + +class IndexedStringsCompiler(IndexCompiler): + + def getItems(self, items, strings): + return items.strings + + +class TopDictIndexCompiler(IndexCompiler): + """Helper class for writing the TopDict to binary.""" + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for topDict in self.items: + children.extend(topDict.getChildren(strings)) + return children + + def getOffsets(self): + if self.isCFF2: + offsets = [0, self.items[0].getDataLength()] + return offsets + else: + return super(TopDictIndexCompiler, self).getOffsets() + + def getDataLength(self): + if self.isCFF2: + dataLength = self.items[0].getDataLength() + return dataLength + else: + return super(TopDictIndexCompiler, self).getDataLength() + + def toFile(self, file): + if self.isCFF2: + self.items[0].toFile(file) + else: + super(TopDictIndexCompiler, self).toFile(file) + + +class FDArrayIndexCompiler(IndexCompiler): + """Helper class for writing the + `Font DICT INDEX `_ + to binary.""" + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for fontDict in self.items: + children.extend(fontDict.getChildren(strings)) + return children + + def toFile(self, file): + offsets = self.getOffsets() + if self.isCFF2: + writeCard32(file, len(self.items)) + else: + writeCard16(file, len(self.items)) + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + + def setPos(self, pos, endPos): + self.parent.rawDict["FDArray"] = pos + + +class GlobalSubrsCompiler(IndexCompiler): + """Helper class for writing the `global subroutine INDEX `_ + to binary.""" + + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile(self.isCFF2) + out.append(cs.bytecode) + return out + + +class SubrsCompiler(GlobalSubrsCompiler): + """Helper class for writing the `local subroutine INDEX `_ + to binary.""" + + def setPos(self, pos, endPos): + offset = pos - self.parent.pos + self.parent.rawDict["Subrs"] = offset + + +class CharStringsCompiler(GlobalSubrsCompiler): + """Helper class for writing the `CharStrings INDEX `_ + to binary.""" + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile(self.isCFF2) + out.append(cs.bytecode) + return out + + def setPos(self, pos, endPos): + self.parent.rawDict["CharStrings"] = pos + + +class Index(object): + """This class represents what the CFF spec calls an INDEX (an array of + variable-sized objects). `Index` items can be addressed and set using + Python list indexing.""" + + compilerClass = IndexCompiler + + def __init__(self, file=None, isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.items = [] + name = self.__class__.__name__ + if file is None: + return + self._isCFF2 = isCFF2 + log.log(DEBUG, "loading %s at %s", name, file.tell()) + self.file = file + if isCFF2: + count = readCard32(file) + else: + count = readCard16(file) + if count == 0: + return + self.items = [None] * count + offSize = readCard8(file) + log.log(DEBUG, " index count: %s offSize: %s", count, offSize) + assert offSize <= 4, "offSize too large: %s" % offSize + self.offsets = offsets = [] + pad = b'\0' * (4 - offSize) + for index in range(count + 1): + chunk = file.read(offSize) + chunk = pad + chunk + offset, = struct.unpack(">L", chunk) + offsets.append(int(offset)) + self.offsetBase = file.tell() - 1 + file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot + log.log(DEBUG, " end of %s at %s", name, file.tell()) + + def __len__(self): + return len(self.items) + + def __getitem__(self, index): + item = self.items[index] + if item is not None: + return item + offset = self.offsets[index] + self.offsetBase + size = self.offsets[index + 1] - self.offsets[index] + file = self.file + file.seek(offset) + data = file.read(size) + assert len(data) == size + item = self.produceItem(index, data, file, offset) + self.items[index] = item + return item + + def __setitem__(self, index, item): + self.items[index] = item + + def produceItem(self, index, data, file, offset): + return data + + def append(self, item): + """Add an item to an INDEX.""" + self.items.append(item) + + def getCompiler(self, strings, parent, isCFF2=None): + return self.compilerClass(self, strings, parent, isCFF2=isCFF2) + + def clear(self): + """Empty the INDEX.""" + del self.items[:] + + +class GlobalSubrsIndex(Index): + """This index contains all the global subroutines in the font. A global + subroutine is a set of ``CharString`` data which is accessible to any + glyph in the font, and are used to store repeated instructions - for + example, components may be encoded as global subroutines, but so could + hinting instructions. + + Remember that when interpreting a ``callgsubr`` instruction (or indeed + a ``callsubr`` instruction) that you will need to add the "subroutine + number bias" to number given: + + .. code:: python + + tt = ttLib.TTFont("Almendra-Bold.otf") + u = tt["CFF "].cff[0].CharStrings["udieresis"] + u.decompile() + + u.toXML(XMLWriter(sys.stdout)) + # + # -64 callgsubr <-- Subroutine which implements the dieresis mark + # + + tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG + # + + tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT + # + + ("The bias applied depends on the number of subrs (gsubrs). If the number of + subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less + than 33900, it is 1131; otherwise it is 32768.", + `Subroutine Operators `) + """ + + compilerClass = GlobalSubrsCompiler + subrClass = psCharStrings.T2CharString + charStringClass = psCharStrings.T2CharString + + def __init__(self, file=None, globalSubrs=None, private=None, + fdSelect=None, fdArray=None, isCFF2=None): + super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2) + self.globalSubrs = globalSubrs + self.private = private + if fdSelect: + self.fdSelect = fdSelect + if fdArray: + self.fdArray = fdArray + + def produceItem(self, index, data, file, offset): + if self.private is not None: + private = self.private + elif hasattr(self, 'fdArray') and self.fdArray is not None: + if hasattr(self, 'fdSelect') and self.fdSelect is not None: + fdIndex = self.fdSelect[index] + else: + fdIndex = 0 + private = self.fdArray[fdIndex].Private + else: + private = None + return self.subrClass(data, private=private, globalSubrs=self.globalSubrs) + + def toXML(self, xmlWriter): + """Write the subroutines index into XML representation onto the given + :class:`fontTools.misc.xmlWriter.XMLWriter`. + + .. code:: python + + writer = xmlWriter.XMLWriter(sys.stdout) + tt["CFF "].cff[0].GlobalSubrs.toXML(writer) + + """ + xmlWriter.comment( + "The 'index' attribute is only for humans; " + "it is ignored when parsed.") + xmlWriter.newline() + for i in range(len(self)): + subr = self[i] + if subr.needsDecompilation(): + xmlWriter.begintag("CharString", index=i, raw=1) + else: + xmlWriter.begintag("CharString", index=i) + xmlWriter.newline() + subr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + if name != "CharString": + return + subr = self.subrClass() + subr.fromXML(name, attrs, content) + self.append(subr) + + def getItemAndSelector(self, index): + sel = None + if hasattr(self, 'fdSelect'): + sel = self.fdSelect[index] + return self[index], sel + + +class SubrsIndex(GlobalSubrsIndex): + """This index contains a glyph's local subroutines. A local subroutine is a + private set of ``CharString`` data which is accessible only to the glyph to + which the index is attached.""" + + compilerClass = SubrsCompiler + + +class TopDictIndex(Index): + """This index represents the array of ``TopDict`` structures in the font + (again, usually only one entry is present). Hence the following calls are + equivalent: + + .. code:: python + + tt["CFF "].cff[0] + # + tt["CFF "].cff.topDictIndex[0] + # + + """ + + compilerClass = TopDictIndexCompiler + + def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, + isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.cff2GetGlyphOrder = cff2GetGlyphOrder + if file is not None and isCFF2: + self._isCFF2 = isCFF2 + self.items = [] + name = self.__class__.__name__ + log.log(DEBUG, "loading %s at %s", name, file.tell()) + self.file = file + count = 1 + self.items = [None] * count + self.offsets = [0, topSize] + self.offsetBase = file.tell() + # pretend we've read the whole lot + file.seek(self.offsetBase + topSize) + log.log(DEBUG, " end of %s at %s", name, file.tell()) + else: + super(TopDictIndex, self).__init__(file, isCFF2=isCFF2) + + def produceItem(self, index, data, file, offset): + top = TopDict( + self.strings, file, offset, self.GlobalSubrs, + self.cff2GetGlyphOrder, isCFF2=self._isCFF2) + top.decompile(data) + return top + + def toXML(self, xmlWriter): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + +class FDArrayIndex(Index): + + compilerClass = FDArrayIndexCompiler + + def toXML(self, xmlWriter): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + def produceItem(self, index, data, file, offset): + fontDict = FontDict( + self.strings, file, offset, self.GlobalSubrs, isCFF2=self._isCFF2, + vstore=self.vstore) + fontDict.decompile(data) + return fontDict + + def fromXML(self, name, attrs, content): + if name != "FontDict": + return + fontDict = FontDict() + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + fontDict.fromXML(name, attrs, content) + self.append(fontDict) + + +class VarStoreData(object): + + def __init__(self, file=None, otVarStore=None): + self.file = file + self.data = None + self.otVarStore = otVarStore + self.font = TTFont() # dummy font for the decompile function. + + def decompile(self): + if self.file: + class GlobalState(object): + def __init__(self, tableType, cachingStats): + self.tableType = tableType + self.cachingStats = cachingStats + globalState = GlobalState(tableType="VarStore", cachingStats={}) + # read data in from file. Assume position is correct. + length = readCard16(self.file) + self.data = self.file.read(length) + globalState = {} + reader = OTTableReader(self.data, globalState) + self.otVarStore = ot.VarStore() + self.otVarStore.decompile(reader, self.font) + return self + + def compile(self): + writer = OTTableWriter() + self.otVarStore.compile(writer, self.font) + # Note that this omits the initial Card16 length from the CFF2 + # VarStore data block + self.data = writer.getAllData() + + def writeXML(self, xmlWriter, name): + self.otVarStore.toXML(xmlWriter, self.font) + + def xmlRead(self, name, attrs, content, parent): + self.otVarStore = ot.VarStore() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + self.otVarStore.fromXML(name, attrs, content, self.font) + else: + pass + return None + + def __len__(self): + return len(self.data) + + def getNumRegions(self, vsIndex): + varData = self.otVarStore.VarData[vsIndex] + numRegions = varData.VarRegionCount + return numRegions + + +class FDSelect(object): + + def __init__(self, file=None, numGlyphs=None, format=None): + if file: + # read data in from file + self.format = readCard8(file) + if self.format == 0: + from array import array + self.gidArray = array("B", file.read(numGlyphs)).tolist() + elif self.format == 3: + gidArray = [None] * numGlyphs + nRanges = readCard16(file) + fd = None + prev = None + for i in range(nRanges): + first = readCard16(file) + if prev is not None: + for glyphID in range(prev, first): + gidArray[glyphID] = fd + prev = first + fd = readCard8(file) + if prev is not None: + first = readCard16(file) + for glyphID in range(prev, first): + gidArray[glyphID] = fd + self.gidArray = gidArray + elif self.format == 4: + gidArray = [None] * numGlyphs + nRanges = readCard32(file) + fd = None + prev = None + for i in range(nRanges): + first = readCard32(file) + if prev is not None: + for glyphID in range(prev, first): + gidArray[glyphID] = fd + prev = first + fd = readCard16(file) + if prev is not None: + first = readCard32(file) + for glyphID in range(prev, first): + gidArray[glyphID] = fd + self.gidArray = gidArray + else: + assert False, "unsupported FDSelect format: %s" % format + else: + # reading from XML. Make empty gidArray, and leave format as passed in. + # format is None will result in the smallest representation being used. + self.format = format + self.gidArray = [] + + def __len__(self): + return len(self.gidArray) + + def __getitem__(self, index): + return self.gidArray[index] + + def __setitem__(self, index, fdSelectValue): + self.gidArray[index] = fdSelectValue + + def append(self, fdSelectValue): + self.gidArray.append(fdSelectValue) + + +class CharStrings(object): + """The ``CharStrings`` in the font represent the instructions for drawing + each glyph. This object presents a dictionary interface to the font's + CharStrings, indexed by glyph name: + + .. code:: python + + tt["CFF "].cff[0].CharStrings["a"] + # + + See :class:`fontTools.misc.psCharStrings.T1CharString` and + :class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile, + compile and interpret the glyph drawing instructions in the returned objects. + + """ + + def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray, + isCFF2=None): + self.globalSubrs = globalSubrs + if file is not None: + self.charStringsIndex = SubrsIndex( + file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2) + self.charStrings = charStrings = {} + for i in range(len(charset)): + charStrings[charset[i]] = i + # read from OTF file: charStrings.values() are indices into + # charStringsIndex. + self.charStringsAreIndexed = 1 + else: + self.charStrings = {} + # read from ttx file: charStrings.values() are actual charstrings + self.charStringsAreIndexed = 0 + self.private = private + if fdSelect is not None: + self.fdSelect = fdSelect + if fdArray is not None: + self.fdArray = fdArray + + def keys(self): + return list(self.charStrings.keys()) + + def values(self): + if self.charStringsAreIndexed: + return self.charStringsIndex + else: + return list(self.charStrings.values()) + + def has_key(self, name): + return name in self.charStrings + + __contains__ = has_key + + def __len__(self): + return len(self.charStrings) + + def __getitem__(self, name): + charString = self.charStrings[name] + if self.charStringsAreIndexed: + charString = self.charStringsIndex[charString] + return charString + + def __setitem__(self, name, charString): + if self.charStringsAreIndexed: + index = self.charStrings[name] + self.charStringsIndex[index] = charString + else: + self.charStrings[name] = charString + + def getItemAndSelector(self, name): + if self.charStringsAreIndexed: + index = self.charStrings[name] + return self.charStringsIndex.getItemAndSelector(index) + else: + if hasattr(self, 'fdArray'): + if hasattr(self, 'fdSelect'): + sel = self.charStrings[name].fdSelectIndex + else: + sel = 0 + else: + sel = None + return self.charStrings[name], sel + + def toXML(self, xmlWriter): + names = sorted(self.keys()) + for name in names: + charStr, fdSelectIndex = self.getItemAndSelector(name) + if charStr.needsDecompilation(): + raw = [("raw", 1)] + else: + raw = [] + if fdSelectIndex is None: + xmlWriter.begintag("CharString", [('name', name)] + raw) + else: + xmlWriter.begintag( + "CharString", + [('name', name), ('fdSelectIndex', fdSelectIndex)] + raw) + xmlWriter.newline() + charStr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + if name != "CharString": + continue + fdID = -1 + if hasattr(self, "fdArray"): + try: + fdID = safeEval(attrs["fdSelectIndex"]) + except KeyError: + fdID = 0 + private = self.fdArray[fdID].Private + else: + private = self.private + + glyphName = attrs["name"] + charStringClass = psCharStrings.T2CharString + charString = charStringClass( + private=private, + globalSubrs=self.globalSubrs) + charString.fromXML(name, attrs, content) + if fdID >= 0: + charString.fdSelectIndex = fdID + self[glyphName] = charString + + +def readCard8(file): + return byteord(file.read(1)) + + +def readCard16(file): + value, = struct.unpack(">H", file.read(2)) + return value + + +def readCard32(file): + value, = struct.unpack(">L", file.read(4)) + return value + + +def writeCard8(file, value): + file.write(bytechr(value)) + + +def writeCard16(file, value): + file.write(struct.pack(">H", value)) + + +def writeCard32(file, value): + file.write(struct.pack(">L", value)) + + +def packCard8(value): + return bytechr(value) + + +def packCard16(value): + return struct.pack(">H", value) + + +def packCard32(value): + return struct.pack(">L", value) + + +def buildOperatorDict(table): + d = {} + for op, name, arg, default, conv in table: + d[op] = (name, arg) + return d + + +def buildOpcodeDict(table): + d = {} + for op, name, arg, default, conv in table: + if isinstance(op, tuple): + op = bytechr(op[0]) + bytechr(op[1]) + else: + op = bytechr(op) + d[name] = (op, arg) + return d + + +def buildOrder(table): + l = [] + for op, name, arg, default, conv in table: + l.append(name) + return l + + +def buildDefaults(table): + d = {} + for op, name, arg, default, conv in table: + if default is not None: + d[name] = default + return d + + +def buildConverters(table): + d = {} + for op, name, arg, default, conv in table: + d[name] = conv + return d + + +class SimpleConverter(object): + + def read(self, parent, value): + if not hasattr(parent, "file"): + return self._read(parent, value) + file = parent.file + pos = file.tell() + try: + return self._read(parent, value) + finally: + file.seek(pos) + + def _read(self, parent, value): + return value + + def write(self, parent, value): + return value + + def xmlWrite(self, xmlWriter, name, value): + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return attrs["value"] + + +class ASCIIConverter(SimpleConverter): + + def _read(self, parent, value): + return tostr(value, encoding='ascii') + + def write(self, parent, value): + return tobytes(value, encoding='ascii') + + def xmlWrite(self, xmlWriter, name, value): + xmlWriter.simpletag(name, value=tostr(value, encoding="ascii")) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("ascii")) + + +class Latin1Converter(SimpleConverter): + + def _read(self, parent, value): + return tostr(value, encoding='latin1') + + def write(self, parent, value): + return tobytes(value, encoding='latin1') + + def xmlWrite(self, xmlWriter, name, value): + value = tostr(value, encoding="latin1") + if name in ['Notice', 'Copyright']: + value = re.sub(r"[\r\n]\s+", " ", value) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("latin1")) + + +def parseNum(s): + try: + value = int(s) + except: + value = float(s) + return value + + +def parseBlendList(s): + valueList = [] + for element in s: + if isinstance(element, str): + continue + name, attrs, content = element + blendList = attrs["value"].split() + blendList = [eval(val) for val in blendList] + valueList.append(blendList) + if len(valueList) == 1: + valueList = valueList[0] + return valueList + + +class NumberConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value): + if isinstance(value, list): + xmlWriter.begintag(name) + xmlWriter.newline() + xmlWriter.indent() + blendValue = " ".join([str(val) for val in value]) + xmlWriter.simpletag(kBlendDictOpName, value=blendValue) + xmlWriter.newline() + xmlWriter.dedent() + xmlWriter.endtag(name) + xmlWriter.newline() + else: + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + valueString = attrs.get("value", None) + if valueString is None: + value = parseBlendList(content) + else: + value = parseNum(attrs["value"]) + return value + + +class ArrayConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value): + if value and isinstance(value[0], list): + xmlWriter.begintag(name) + xmlWriter.newline() + xmlWriter.indent() + for valueList in value: + blendValue = " ".join([str(val) for val in valueList]) + xmlWriter.simpletag(kBlendDictOpName, value=blendValue) + xmlWriter.newline() + xmlWriter.dedent() + xmlWriter.endtag(name) + xmlWriter.newline() + else: + value = " ".join([str(val) for val in value]) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + valueString = attrs.get("value", None) + if valueString is None: + valueList = parseBlendList(content) + else: + values = valueString.split() + valueList = [parseNum(value) for value in values] + return valueList + + +class TableConverter(SimpleConverter): + + def xmlWrite(self, xmlWriter, name, value): + xmlWriter.begintag(name) + xmlWriter.newline() + value.toXML(xmlWriter) + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + ob = self.getClass()() + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + ob.fromXML(name, attrs, content) + return ob + + +class PrivateDictConverter(TableConverter): + + def getClass(self): + return PrivateDict + + def _read(self, parent, value): + size, offset = value + file = parent.file + isCFF2 = parent._isCFF2 + try: + vstore = parent.vstore + except AttributeError: + vstore = None + priv = PrivateDict( + parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore) + file.seek(offset) + data = file.read(size) + assert len(data) == size + priv.decompile(data) + return priv + + def write(self, parent, value): + return (0, 0) # dummy value + + +class SubrsConverter(TableConverter): + + def getClass(self): + return SubrsIndex + + def _read(self, parent, value): + file = parent.file + isCFF2 = parent._isCFF2 + file.seek(parent.offset + value) # Offset(self) + return SubrsIndex(file, isCFF2=isCFF2) + + def write(self, parent, value): + return 0 # dummy value + + +class CharStringsConverter(TableConverter): + + def _read(self, parent, value): + file = parent.file + isCFF2 = parent._isCFF2 + charset = parent.charset + globalSubrs = parent.GlobalSubrs + if hasattr(parent, "FDArray"): + fdArray = parent.FDArray + if hasattr(parent, "FDSelect"): + fdSelect = parent.FDSelect + else: + fdSelect = None + private = None + else: + fdSelect, fdArray = None, None + private = parent.Private + file.seek(value) # Offset(0) + charStrings = CharStrings( + file, charset, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2) + return charStrings + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + if hasattr(parent, "FDArray"): + # if it is a CID-keyed font, then the private Dict is extracted from the + # parent.FDArray + fdArray = parent.FDArray + if hasattr(parent, "FDSelect"): + fdSelect = parent.FDSelect + else: + fdSelect = None + private = None + else: + # if it is a name-keyed font, then the private dict is in the top dict, + # and + # there is no fdArray. + private, fdSelect, fdArray = parent.Private, None, None + charStrings = CharStrings( + None, None, parent.GlobalSubrs, private, fdSelect, fdArray) + charStrings.fromXML(name, attrs, content) + return charStrings + + +class CharsetConverter(SimpleConverter): + def _read(self, parent, value): + isCID = hasattr(parent, "ROS") + if value > 2: + numGlyphs = parent.numGlyphs + file = parent.file + file.seek(value) + log.log(DEBUG, "loading charset at %s", value) + format = readCard8(file) + if format == 0: + charset = parseCharset0(numGlyphs, file, parent.strings, isCID) + elif format == 1 or format == 2: + charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) + else: + raise NotImplementedError + assert len(charset) == numGlyphs + log.log(DEBUG, " charset end at %s", file.tell()) + # make sure glyph names are unique + allNames = {} + newCharset = [] + for glyphName in charset: + if glyphName in allNames: + # make up a new glyphName that's unique + n = allNames[glyphName] + while (glyphName + "#" + str(n)) in allNames: + n += 1 + allNames[glyphName] = n + 1 + glyphName = glyphName + "#" + str(n) + allNames[glyphName] = 1 + newCharset.append(glyphName) + charset = newCharset + else: # offset == 0 -> no charset data. + if isCID or "CharStrings" not in parent.rawDict: + # We get here only when processing fontDicts from the FDArray of + # CFF-CID fonts. Only the real topDict references the chrset. + assert value == 0 + charset = None + elif value == 0: + charset = cffISOAdobeStrings + elif value == 1: + charset = cffIExpertStrings + elif value == 2: + charset = cffExpertSubsetStrings + if charset and (len(charset) != parent.numGlyphs): + charset = charset[:parent.numGlyphs] + return charset + + def write(self, parent, value): + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value): + # XXX only write charset when not in OT/TTX context, where we + # dump charset as a separate "GlyphOrder" table. + # # xmlWriter.simpletag("charset") + xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + pass + + +class CharsetCompiler(object): + + def __init__(self, strings, charset, parent): + assert charset[0] == '.notdef' + isCID = hasattr(parent.dictObj, "ROS") + data0 = packCharset0(charset, isCID, strings) + data = packCharset(charset, isCID, strings) + if len(data) < len(data0): + self.data = data + else: + self.data = data0 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["charset"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +def getStdCharSet(charset): + # check to see if we can use a predefined charset value. + predefinedCharSetVal = None + predefinedCharSets = [ + (cffISOAdobeStringCount, cffISOAdobeStrings, 0), + (cffExpertStringCount, cffIExpertStrings, 1), + (cffExpertSubsetStringCount, cffExpertSubsetStrings, 2)] + lcs = len(charset) + for cnt, pcs, csv in predefinedCharSets: + if predefinedCharSetVal is not None: + break + if lcs > cnt: + continue + predefinedCharSetVal = csv + for i in range(lcs): + if charset[i] != pcs[i]: + predefinedCharSetVal = None + break + return predefinedCharSetVal + + +def getCIDfromName(name, strings): + return int(name[3:]) + + +def getSIDfromName(name, strings): + return strings.getSID(name) + + +def packCharset0(charset, isCID, strings): + fmt = 0 + data = [packCard8(fmt)] + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + data.append(packCard16(getNameID(name, strings))) + return bytesjoin(data) + + +def packCharset(charset, isCID, strings): + fmt = 1 + ranges = [] + first = None + end = 0 + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + SID = getNameID(name, strings) + if first is None: + first = SID + elif end + 1 != SID: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + first = SID + end = SID + if end: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + + data = [packCard8(fmt)] + if fmt == 1: + nLeftFunc = packCard8 + else: + nLeftFunc = packCard16 + for first, nLeft in ranges: + data.append(packCard16(first) + nLeftFunc(nLeft)) + return bytesjoin(data) + + +def parseCharset0(numGlyphs, file, strings, isCID): + charset = [".notdef"] + if isCID: + for i in range(numGlyphs - 1): + CID = readCard16(file) + charset.append("cid" + str(CID).zfill(5)) + else: + for i in range(numGlyphs - 1): + SID = readCard16(file) + charset.append(strings[SID]) + return charset + + +def parseCharset(numGlyphs, file, strings, isCID, fmt): + charset = ['.notdef'] + count = 1 + if fmt == 1: + nLeftFunc = readCard8 + else: + nLeftFunc = readCard16 + while count < numGlyphs: + first = readCard16(file) + nLeft = nLeftFunc(file) + if isCID: + for CID in range(first, first + nLeft + 1): + charset.append("cid" + str(CID).zfill(5)) + else: + for SID in range(first, first + nLeft + 1): + charset.append(strings[SID]) + count = count + nLeft + 1 + return charset + + +class EncodingCompiler(object): + + def __init__(self, strings, encoding, parent): + assert not isinstance(encoding, str) + data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) + data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) + if len(data0) < len(data1): + self.data = data0 + else: + self.data = data1 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["Encoding"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class EncodingConverter(SimpleConverter): + + def _read(self, parent, value): + if value == 0: + return "StandardEncoding" + elif value == 1: + return "ExpertEncoding" + else: + assert value > 1 + file = parent.file + file.seek(value) + log.log(DEBUG, "loading Encoding at %s", value) + fmt = readCard8(file) + haveSupplement = fmt & 0x80 + if haveSupplement: + raise NotImplementedError("Encoding supplements are not yet supported") + fmt = fmt & 0x7f + if fmt == 0: + encoding = parseEncoding0(parent.charset, file, haveSupplement, + parent.strings) + elif fmt == 1: + encoding = parseEncoding1(parent.charset, file, haveSupplement, + parent.strings) + return encoding + + def write(self, parent, value): + if value == "StandardEncoding": + return 0 + elif value == "ExpertEncoding": + return 1 + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value): + if value in ("StandardEncoding", "ExpertEncoding"): + xmlWriter.simpletag(name, name=value) + xmlWriter.newline() + return + xmlWriter.begintag(name) + xmlWriter.newline() + for code in range(len(value)): + glyphName = value[code] + if glyphName != ".notdef": + xmlWriter.simpletag("map", code=hex(code), name=glyphName) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + if "name" in attrs: + return attrs["name"] + encoding = [".notdef"] * 256 + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + code = safeEval(attrs["code"]) + glyphName = attrs["name"] + encoding[code] = glyphName + return encoding + + +def parseEncoding0(charset, file, haveSupplement, strings): + nCodes = readCard8(file) + encoding = [".notdef"] * 256 + for glyphID in range(1, nCodes + 1): + code = readCard8(file) + if code != 0: + encoding[code] = charset[glyphID] + return encoding + + +def parseEncoding1(charset, file, haveSupplement, strings): + nRanges = readCard8(file) + encoding = [".notdef"] * 256 + glyphID = 1 + for i in range(nRanges): + code = readCard8(file) + nLeft = readCard8(file) + for glyphID in range(glyphID, glyphID + nLeft + 1): + encoding[code] = charset[glyphID] + code = code + 1 + glyphID = glyphID + 1 + return encoding + + +def packEncoding0(charset, encoding, strings): + fmt = 0 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + codes = [] + for name in charset[1:]: + code = m.get(name) + codes.append(code) + + while codes and codes[-1] is None: + codes.pop() + + data = [packCard8(fmt), packCard8(len(codes))] + for code in codes: + if code is None: + code = 0 + data.append(packCard8(code)) + return bytesjoin(data) + + +def packEncoding1(charset, encoding, strings): + fmt = 1 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + ranges = [] + first = None + end = 0 + for name in charset[1:]: + code = m.get(name, -1) + if first is None: + first = code + elif end + 1 != code: + nLeft = end - first + ranges.append((first, nLeft)) + first = code + end = code + nLeft = end - first + ranges.append((first, nLeft)) + + # remove unencoded glyphs at the end. + while ranges and ranges[-1][0] == -1: + ranges.pop() + + data = [packCard8(fmt), packCard8(len(ranges))] + for first, nLeft in ranges: + if first == -1: # unencoded + first = 0 + data.append(packCard8(first) + packCard8(nLeft)) + return bytesjoin(data) + + +class FDArrayConverter(TableConverter): + + def _read(self, parent, value): + try: + vstore = parent.VarStore + except AttributeError: + vstore = None + file = parent.file + isCFF2 = parent._isCFF2 + file.seek(value) + fdArray = FDArrayIndex(file, isCFF2=isCFF2) + fdArray.vstore = vstore + fdArray.strings = parent.strings + fdArray.GlobalSubrs = parent.GlobalSubrs + return fdArray + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + fdArray = FDArrayIndex() + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + fdArray.fromXML(name, attrs, content) + return fdArray + + +class FDSelectConverter(SimpleConverter): + + def _read(self, parent, value): + file = parent.file + file.seek(value) + fdSelect = FDSelect(file, parent.numGlyphs) + return fdSelect + + def write(self, parent, value): + return 0 # dummy value + + # The FDSelect glyph data is written out to XML in the charstring keys, + # so we write out only the format selector + def xmlWrite(self, xmlWriter, name, value): + xmlWriter.simpletag(name, [('format', value.format)]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + fmt = safeEval(attrs["format"]) + file = None + numGlyphs = None + fdSelect = FDSelect(file, numGlyphs, fmt) + return fdSelect + + +class VarStoreConverter(SimpleConverter): + + def _read(self, parent, value): + file = parent.file + file.seek(value) + varStore = VarStoreData(file) + varStore.decompile() + return varStore + + def write(self, parent, value): + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value): + value.writeXML(xmlWriter, name) + + def xmlRead(self, name, attrs, content, parent): + varStore = VarStoreData() + varStore.xmlRead(name, attrs, content, parent) + return varStore + + +def packFDSelect0(fdSelectArray): + fmt = 0 + data = [packCard8(fmt)] + for index in fdSelectArray: + data.append(packCard8(index)) + return bytesjoin(data) + + +def packFDSelect3(fdSelectArray): + fmt = 3 + fdRanges = [] + lenArray = len(fdSelectArray) + lastFDIndex = -1 + for i in range(lenArray): + fdIndex = fdSelectArray[i] + if lastFDIndex != fdIndex: + fdRanges.append([i, fdIndex]) + lastFDIndex = fdIndex + sentinelGID = i + 1 + + data = [packCard8(fmt)] + data.append(packCard16(len(fdRanges))) + for fdRange in fdRanges: + data.append(packCard16(fdRange[0])) + data.append(packCard8(fdRange[1])) + data.append(packCard16(sentinelGID)) + return bytesjoin(data) + + +def packFDSelect4(fdSelectArray): + fmt = 4 + fdRanges = [] + lenArray = len(fdSelectArray) + lastFDIndex = -1 + for i in range(lenArray): + fdIndex = fdSelectArray[i] + if lastFDIndex != fdIndex: + fdRanges.append([i, fdIndex]) + lastFDIndex = fdIndex + sentinelGID = i + 1 + + data = [packCard8(fmt)] + data.append(packCard32(len(fdRanges))) + for fdRange in fdRanges: + data.append(packCard32(fdRange[0])) + data.append(packCard16(fdRange[1])) + data.append(packCard32(sentinelGID)) + return bytesjoin(data) + + +class FDSelectCompiler(object): + + def __init__(self, fdSelect, parent): + fmt = fdSelect.format + fdSelectArray = fdSelect.gidArray + if fmt == 0: + self.data = packFDSelect0(fdSelectArray) + elif fmt == 3: + self.data = packFDSelect3(fdSelectArray) + elif fmt == 4: + self.data = packFDSelect4(fdSelectArray) + else: + # choose smaller of the two formats + data0 = packFDSelect0(fdSelectArray) + data3 = packFDSelect3(fdSelectArray) + if len(data0) < len(data3): + self.data = data0 + fdSelect.format = 0 + else: + self.data = data3 + fdSelect.format = 3 + + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["FDSelect"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class VarStoreCompiler(object): + + def __init__(self, varStoreData, parent): + self.parent = parent + if not varStoreData.data: + varStoreData.compile() + data = [ + packCard16(len(varStoreData.data)), + varStoreData.data + ] + self.data = bytesjoin(data) + + def setPos(self, pos, endPos): + self.parent.rawDict["VarStore"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class ROSConverter(SimpleConverter): + + def xmlWrite(self, xmlWriter, name, value): + registry, order, supplement = value + xmlWriter.simpletag( + name, + [ + ('Registry', tostr(registry)), + ('Order', tostr(order)), + ('Supplement', supplement) + ]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement'])) + +topDictOperators = [ +# opcode name argument type default converter + (25, 'maxstack', 'number', None, None), + ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()), + ((12, 20), 'SyntheticBase', 'number', None, None), + (0, 'version', 'SID', None, None), + (1, 'Notice', 'SID', None, Latin1Converter()), + ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), + (2, 'FullName', 'SID', None, None), + ((12, 38), 'FontName', 'SID', None, None), + (3, 'FamilyName', 'SID', None, None), + (4, 'Weight', 'SID', None, None), + ((12, 1), 'isFixedPitch', 'number', 0, None), + ((12, 2), 'ItalicAngle', 'number', 0, None), + ((12, 3), 'UnderlinePosition', 'number', -100, None), + ((12, 4), 'UnderlineThickness', 'number', 50, None), + ((12, 5), 'PaintType', 'number', 0, None), + ((12, 6), 'CharstringType', 'number', 2, None), + ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), + (13, 'UniqueID', 'number', None, None), + (5, 'FontBBox', 'array', [0, 0, 0, 0], None), + ((12, 8), 'StrokeWidth', 'number', 0, None), + (14, 'XUID', 'array', None, None), + ((12, 21), 'PostScript', 'SID', None, None), + ((12, 22), 'BaseFontName', 'SID', None, None), + ((12, 23), 'BaseFontBlend', 'delta', None, None), + ((12, 31), 'CIDFontVersion', 'number', 0, None), + ((12, 32), 'CIDFontRevision', 'number', 0, None), + ((12, 33), 'CIDFontType', 'number', 0, None), + ((12, 34), 'CIDCount', 'number', 8720, None), + (15, 'charset', 'number', None, CharsetConverter()), + ((12, 35), 'UIDBase', 'number', None, None), + (16, 'Encoding', 'number', 0, EncodingConverter()), + (18, 'Private', ('number', 'number'), None, PrivateDictConverter()), + ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), + ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), + (17, 'CharStrings', 'number', None, CharStringsConverter()), + (24, 'VarStore', 'number', None, VarStoreConverter()), +] + +topDictOperators2 = [ +# opcode name argument type default converter + (25, 'maxstack', 'number', None, None), + ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), + ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), + ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), + (17, 'CharStrings', 'number', None, CharStringsConverter()), + (24, 'VarStore', 'number', None, VarStoreConverter()), +] + +# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, +# in order for the font to compile back from xml. + +kBlendDictOpName = "blend" +blendOp = 23 + +privateDictOperators = [ +# opcode name argument type default converter + (22, "vsindex", 'number', None, None), + (blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF. + (6, 'BlueValues', 'delta', None, None), + (7, 'OtherBlues', 'delta', None, None), + (8, 'FamilyBlues', 'delta', None, None), + (9, 'FamilyOtherBlues', 'delta', None, None), + ((12, 9), 'BlueScale', 'number', 0.039625, None), + ((12, 10), 'BlueShift', 'number', 7, None), + ((12, 11), 'BlueFuzz', 'number', 1, None), + (10, 'StdHW', 'number', None, None), + (11, 'StdVW', 'number', None, None), + ((12, 12), 'StemSnapH', 'delta', None, None), + ((12, 13), 'StemSnapV', 'delta', None, None), + ((12, 14), 'ForceBold', 'number', 0, None), + ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated + ((12, 16), 'lenIV', 'number', None, None), # deprecated + ((12, 17), 'LanguageGroup', 'number', 0, None), + ((12, 18), 'ExpansionFactor', 'number', 0.06, None), + ((12, 19), 'initialRandomSeed', 'number', 0, None), + (20, 'defaultWidthX', 'number', 0, None), + (21, 'nominalWidthX', 'number', 0, None), + (19, 'Subrs', 'number', None, SubrsConverter()), +] + +privateDictOperators2 = [ +# opcode name argument type default converter + (22, "vsindex", 'number', None, None), + (blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF. + (6, 'BlueValues', 'delta', None, None), + (7, 'OtherBlues', 'delta', None, None), + (8, 'FamilyBlues', 'delta', None, None), + (9, 'FamilyOtherBlues', 'delta', None, None), + ((12, 9), 'BlueScale', 'number', 0.039625, None), + ((12, 10), 'BlueShift', 'number', 7, None), + ((12, 11), 'BlueFuzz', 'number', 1, None), + (10, 'StdHW', 'number', None, None), + (11, 'StdVW', 'number', None, None), + ((12, 12), 'StemSnapH', 'delta', None, None), + ((12, 13), 'StemSnapV', 'delta', None, None), + ((12, 17), 'LanguageGroup', 'number', 0, None), + ((12, 18), 'ExpansionFactor', 'number', 0.06, None), + (19, 'Subrs', 'number', None, SubrsConverter()), +] + + +def addConverters(table): + for i in range(len(table)): + op, name, arg, default, conv = table[i] + if conv is not None: + continue + if arg in ("delta", "array"): + conv = ArrayConverter() + elif arg == "number": + conv = NumberConverter() + elif arg == "SID": + conv = ASCIIConverter() + elif arg == 'blendList': + conv = None + else: + assert False + table[i] = op, name, arg, default, conv + + +addConverters(privateDictOperators) +addConverters(topDictOperators) + + +class TopDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(topDictOperators) + + +class PrivateDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(privateDictOperators) + + +class DictCompiler(object): + maxBlendStack = 0 + + def __init__(self, dictObj, strings, parent, isCFF2=None): + if strings: + assert isinstance(strings, IndexedStrings) + if isCFF2 is None and hasattr(parent, "isCFF2"): + isCFF2 = parent.isCFF2 + assert isCFF2 is not None + self.isCFF2 = isCFF2 + self.dictObj = dictObj + self.strings = strings + self.parent = parent + rawDict = {} + for name in dictObj.order: + value = getattr(dictObj, name, None) + if value is None: + continue + conv = dictObj.converters[name] + value = conv.write(dictObj, value) + if value == dictObj.defaults.get(name): + continue + rawDict[name] = value + self.rawDict = rawDict + + def setPos(self, pos, endPos): + pass + + def getDataLength(self): + return len(self.compile("getDataLength")) + + def compile(self, reason): + log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason) + rawDict = self.rawDict + data = [] + for name in self.dictObj.order: + value = rawDict.get(name) + if value is None: + continue + op, argType = self.opcodes[name] + if isinstance(argType, tuple): + l = len(argType) + assert len(value) == l, "value doesn't match arg type" + for i in range(l): + arg = argType[i] + v = value[i] + arghandler = getattr(self, "arg_" + arg) + data.append(arghandler(v)) + else: + arghandler = getattr(self, "arg_" + argType) + data.append(arghandler(value)) + data.append(op) + data = bytesjoin(data) + return data + + def toFile(self, file): + data = self.compile("toFile") + file.write(data) + + def arg_number(self, num): + if isinstance(num, list): + data = [encodeNumber(val) for val in num] + data.append(encodeNumber(1)) + data.append(bytechr(blendOp)) + datum = bytesjoin(data) + else: + datum = encodeNumber(num) + return datum + + def arg_SID(self, s): + return psCharStrings.encodeIntCFF(self.strings.getSID(s)) + + def arg_array(self, value): + data = [] + for num in value: + data.append(self.arg_number(num)) + return bytesjoin(data) + + def arg_delta(self, value): + if not value: + return b"" + val0 = value[0] + if isinstance(val0, list): + data = self.arg_delta_blend(value) + else: + out = [] + last = 0 + for v in value: + out.append(v - last) + last = v + data = [] + for num in out: + data.append(encodeNumber(num)) + return bytesjoin(data) + + + def arg_delta_blend(self, value): + """A delta list with blend lists has to be *all* blend lists. + + The value is a list is arranged as follows:: + + [ + [V0, d0..dn] + [V1, d0..dn] + ... + [Vm, d0..dn] + ] + + ``V`` is the absolute coordinate value from the default font, and ``d0-dn`` + are the delta values from the *n* regions. Each ``V`` is an absolute + coordinate from the default font. + + We want to return a list:: + + [ + [v0, v1..vm] + [d0..dn] + ... + [d0..dn] + numBlends + blendOp + ] + + where each ``v`` is relative to the previous default font value. + """ + numMasters = len(value[0]) + numBlends = len(value) + numStack = (numBlends * numMasters) + 1 + if numStack > self.maxBlendStack: + # Figure out the max number of value we can blend + # and divide this list up into chunks of that size. + + numBlendValues = int((self.maxBlendStack - 1) / numMasters) + out = [] + while True: + numVal = min(len(value), numBlendValues) + if numVal == 0: + break + valList = value[0:numVal] + out1 = self.arg_delta_blend(valList) + out.extend(out1) + value = value[numVal:] + else: + firstList = [0] * numBlends + deltaList = [None] * numBlends + i = 0 + prevVal = 0 + while i < numBlends: + # For PrivateDict BlueValues, the default font + # values are absolute, not relative. + # Must convert these back to relative coordinates + # befor writing to CFF2. + defaultValue = value[i][0] + firstList[i] = defaultValue - prevVal + prevVal = defaultValue + deltaList[i] = value[i][1:] + i += 1 + + relValueList = firstList + for blendList in deltaList: + relValueList.extend(blendList) + out = [encodeNumber(val) for val in relValueList] + out.append(encodeNumber(numBlends)) + out.append(bytechr(blendOp)) + return out + + +def encodeNumber(num): + if isinstance(num, float): + return psCharStrings.encodeFloat(num) + else: + return psCharStrings.encodeIntCFF(num) + + +class TopDictCompiler(DictCompiler): + + opcodes = buildOpcodeDict(topDictOperators) + + def getChildren(self, strings): + isCFF2 = self.isCFF2 + children = [] + if self.dictObj.cff2GetGlyphOrder is None: + if hasattr(self.dictObj, "charset") and self.dictObj.charset: + if hasattr(self.dictObj, "ROS"): # aka isCID + charsetCode = None + else: + charsetCode = getStdCharSet(self.dictObj.charset) + if charsetCode is None: + children.append(CharsetCompiler(strings, self.dictObj.charset, self)) + else: + self.rawDict["charset"] = charsetCode + if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding: + encoding = self.dictObj.Encoding + if not isinstance(encoding, str): + children.append(EncodingCompiler(strings, encoding, self)) + else: + if hasattr(self.dictObj, "VarStore"): + varStoreData = self.dictObj.VarStore + varStoreComp = VarStoreCompiler(varStoreData, self) + children.append(varStoreComp) + if hasattr(self.dictObj, "FDSelect"): + # I have not yet supported merging a ttx CFF-CID font, as there are + # interesting issues about merging the FDArrays. Here I assume that + # either the font was read from XML, and the FDSelect indices are all + # in the charstring data, or the FDSelect array is already fully defined. + fdSelect = self.dictObj.FDSelect + # probably read in from XML; assume fdIndex in CharString data + if len(fdSelect) == 0: + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + fdSelect.append(charStrings[name].fdSelectIndex) + fdSelectComp = FDSelectCompiler(fdSelect, self) + children.append(fdSelectComp) + if hasattr(self.dictObj, "CharStrings"): + items = [] + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + items.append(charStrings[name]) + charStringsComp = CharStringsCompiler( + items, strings, self, isCFF2=isCFF2) + children.append(charStringsComp) + if hasattr(self.dictObj, "FDArray"): + # I have not yet supported merging a ttx CFF-CID font, as there are + # interesting issues about merging the FDArrays. Here I assume that the + # FDArray info is correct and complete. + fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) + children.append(fdArrayIndexComp) + children.extend(fdArrayIndexComp.getChildren(strings)) + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class FontDictCompiler(DictCompiler): + opcodes = buildOpcodeDict(topDictOperators) + + def __init__(self, dictObj, strings, parent, isCFF2=None): + super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2) + # + # We now take some effort to detect if there were any key/value pairs + # supplied that were ignored in the FontDict context, and issue a warning + # for those cases. + # + ignoredNames = [] + dictObj = self.dictObj + for name in sorted(set(dictObj.converters) - set(dictObj.order)): + if name in dictObj.rawDict: + # The font was directly read from binary. In this + # case, we want to report *all* "useless" key/value + # pairs that are in the font, not just the ones that + # are different from the default. + ignoredNames.append(name) + else: + # The font was probably read from a TTX file. We only + # warn about keys whos value is not the default. The + # ones that have the default value will not be written + # to binary anyway. + default = dictObj.defaults.get(name) + if default is not None: + conv = dictObj.converters[name] + default = conv.read(dictObj, default) + if getattr(dictObj, name, None) != default: + ignoredNames.append(name) + if ignoredNames: + log.warning( + "Some CFF FDArray/FontDict keys were ignored upon compile: " + + " ".join(sorted(ignoredNames))) + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class PrivateDictCompiler(DictCompiler): + + maxBlendStack = maxStackLimit + opcodes = buildOpcodeDict(privateDictOperators) + + def setPos(self, pos, endPos): + size = endPos - pos + self.parent.rawDict["Private"] = size, pos + self.pos = pos + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Subrs"): + children.append(self.dictObj.Subrs.getCompiler(strings, self)) + return children + + +class BaseDict(object): + + def __init__(self, strings=None, file=None, offset=None, isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.rawDict = {} + self.skipNames = [] + self.strings = strings + if file is None: + return + self._isCFF2 = isCFF2 + self.file = file + if offset is not None: + log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset) + self.offset = offset + + def decompile(self, data): + log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data)) + dec = self.decompilerClass(self.strings, self) + dec.decompile(data) + self.rawDict = dec.getDict() + self.postDecompile() + + def postDecompile(self): + pass + + def getCompiler(self, strings, parent, isCFF2=None): + return self.compilerClass(self, strings, parent, isCFF2=isCFF2) + + def __getattr__(self, name): + if name[:2] == name[-2:] == "__": + # to make deepcopy() and pickle.load() work, we need to signal with + # AttributeError that dunder methods like '__deepcopy__' or '__getstate__' + # aren't implemented. For more details, see: + # https://github.com/fonttools/fonttools/pull/1488 + raise AttributeError(name) + value = self.rawDict.get(name, None) + if value is None: + value = self.defaults.get(name) + if value is None: + raise AttributeError(name) + conv = self.converters[name] + value = conv.read(self, value) + setattr(self, name, value) + return value + + def toXML(self, xmlWriter): + for name in self.order: + if name in self.skipNames: + continue + value = getattr(self, name, None) + # XXX For "charset" we never skip calling xmlWrite even if the + # value is None, so we always write the following XML comment: + # + # + # + # Charset is None when 'CFF ' table is imported from XML into an + # empty TTFont(). By writing this comment all the time, we obtain + # the same XML output whether roundtripping XML-to-XML or + # dumping binary-to-XML + if value is None and name != "charset": + continue + conv = self.converters[name] + conv.xmlWrite(xmlWriter, name, value) + ignoredNames = set(self.rawDict) - set(self.order) + if ignoredNames: + xmlWriter.comment( + "some keys were ignored: %s" % " ".join(sorted(ignoredNames))) + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + conv = self.converters[name] + value = conv.xmlRead(name, attrs, content, self) + setattr(self, name, value) + + +class TopDict(BaseDict): + """The ``TopDict`` represents the top-level dictionary holding font + information. CFF2 tables contain a restricted set of top-level entries + as described `here `_, + but CFF tables may contain a wider range of information. This information + can be accessed through attributes or through the dictionary returned + through the ``rawDict`` property: + + .. code:: python + + font = tt["CFF "].cff[0] + font.FamilyName + # 'Linux Libertine O' + font.rawDict["FamilyName"] + # 'Linux Libertine O' + + More information is available in the CFF file's private dictionary, accessed + via the ``Private`` property: + + .. code:: python + + tt["CFF "].cff[0].Private.BlueValues + # [-15, 0, 515, 515, 666, 666] + + """ + + defaults = buildDefaults(topDictOperators) + converters = buildConverters(topDictOperators) + compilerClass = TopDictCompiler + order = buildOrder(topDictOperators) + decompilerClass = TopDictDecompiler + + def __init__(self, strings=None, file=None, offset=None, + GlobalSubrs=None, cff2GetGlyphOrder=None, isCFF2=None): + super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.cff2GetGlyphOrder = cff2GetGlyphOrder + self.GlobalSubrs = GlobalSubrs + if isCFF2: + self.defaults = buildDefaults(topDictOperators2) + self.charset = cff2GetGlyphOrder() + self.order = buildOrder(topDictOperators2) + else: + self.defaults = buildDefaults(topDictOperators) + self.order = buildOrder(topDictOperators) + + def getGlyphOrder(self): + """Returns a list of glyph names in the CFF font.""" + return self.charset + + def postDecompile(self): + offset = self.rawDict.get("CharStrings") + if offset is None: + return + # get the number of glyphs beforehand. + self.file.seek(offset) + if self._isCFF2: + self.numGlyphs = readCard32(self.file) + else: + self.numGlyphs = readCard16(self.file) + + def toXML(self, xmlWriter): + if hasattr(self, "CharStrings"): + self.decompileAllCharStrings() + if hasattr(self, "ROS"): + self.skipNames = ['Encoding'] + if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): + # these values have default values, but I only want them to show up + # in CID fonts. + self.skipNames = [ + 'CIDFontVersion', 'CIDFontRevision', 'CIDFontType', 'CIDCount'] + BaseDict.toXML(self, xmlWriter) + + def decompileAllCharStrings(self): + # Make sure that all the Private Dicts have been instantiated. + for i, charString in enumerate(self.CharStrings.values()): + try: + charString.decompile() + except: + log.error("Error in charstring %s", i) + raise + + def recalcFontBBox(self): + fontBBox = None + for charString in self.CharStrings.values(): + bounds = charString.calcBounds(self.CharStrings) + if bounds is not None: + if fontBBox is not None: + fontBBox = unionRect(fontBBox, bounds) + else: + fontBBox = bounds + + if fontBBox is None: + self.FontBBox = self.defaults['FontBBox'][:] + else: + self.FontBBox = list(intRect(fontBBox)) + + +class FontDict(BaseDict): + # + # Since fonttools used to pass a lot of fields that are not relevant in the FDArray + # FontDict, there are 'ttx' files in the wild that contain all these. These got in + # the ttx files because fonttools writes explicit values for all the TopDict default + # values. These are not actually illegal in the context of an FDArray FontDict - you + # can legally, per spec, put any arbitrary key/value pair in a FontDict - but are + # useless since current major company CFF interpreters ignore anything but the set + # listed in this file. So, we just silently skip them. An exception is Weight: this + # is not used by any interpreter, but some foundries have asked that this be + # supported in FDArray FontDicts just to preserve information about the design when + # the font is being inspected. + # + # On top of that, there are fonts out there that contain such useless FontDict values. + # + # By subclassing TopDict, we *allow* all key/values from TopDict, both when reading + # from binary or when reading from XML, but by overriding `order` with a limited + # list of names, we ensure that only the useful names ever get exported to XML and + # ever get compiled into the binary font. + # + # We override compilerClass so we can warn about "useless" key/value pairs, either + # from the original binary font or from TTX input. + # + # See: + # - https://github.com/fonttools/fonttools/issues/740 + # - https://github.com/fonttools/fonttools/issues/601 + # - https://github.com/adobe-type-tools/afdko/issues/137 + # + defaults = {} + converters = buildConverters(topDictOperators) + compilerClass = FontDictCompiler + orderCFF = ['FontName', 'FontMatrix', 'Weight', 'Private'] + orderCFF2 = ['Private'] + decompilerClass = TopDictDecompiler + + def __init__(self, strings=None, file=None, offset=None, + GlobalSubrs=None, isCFF2=None, vstore=None): + super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.vstore = vstore + self.setCFF2(isCFF2) + + def setCFF2(self, isCFF2): + # isCFF2 may be None. + if isCFF2: + self.order = self.orderCFF2 + self._isCFF2 = True + else: + self.order = self.orderCFF + self._isCFF2 = False + + +class PrivateDict(BaseDict): + defaults = buildDefaults(privateDictOperators) + converters = buildConverters(privateDictOperators) + order = buildOrder(privateDictOperators) + decompilerClass = PrivateDictDecompiler + compilerClass = PrivateDictCompiler + + def __init__(self, strings=None, file=None, offset=None, isCFF2=None, + vstore=None): + super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.vstore = vstore + if isCFF2: + self.defaults = buildDefaults(privateDictOperators2) + self.order = buildOrder(privateDictOperators2) + # Provide dummy values. This avoids needing to provide + # an isCFF2 state in a lot of places. + self.nominalWidthX = self.defaultWidthX = None + else: + self.defaults = buildDefaults(privateDictOperators) + self.order = buildOrder(privateDictOperators) + + @property + def in_cff2(self): + return self._isCFF2 + + def getNumRegions(self, vi=None): # called from misc/psCharStrings.py + # if getNumRegions is being called, we can assume that VarStore exists. + if vi is None: + if hasattr(self, 'vsindex'): + vi = self.vsindex + else: + vi = 0 + numRegions = self.vstore.getNumRegions(vi) + return numRegions + + +class IndexedStrings(object): + + """SID -> string mapping.""" + + def __init__(self, file=None): + if file is None: + strings = [] + else: + strings = [ + tostr(s, encoding="latin1") + for s in Index(file, isCFF2=False) + ] + self.strings = strings + + def getCompiler(self): + return IndexedStringsCompiler(self, None, self, isCFF2=False) + + def __len__(self): + return len(self.strings) + + def __getitem__(self, SID): + if SID < cffStandardStringCount: + return cffStandardStrings[SID] + else: + return self.strings[SID - cffStandardStringCount] + + def getSID(self, s): + if not hasattr(self, "stringMapping"): + self.buildStringMapping() + s = tostr(s, encoding="latin1") + if s in cffStandardStringMapping: + SID = cffStandardStringMapping[s] + elif s in self.stringMapping: + SID = self.stringMapping[s] + else: + SID = len(self.strings) + cffStandardStringCount + self.strings.append(s) + self.stringMapping[s] = SID + return SID + + def getStrings(self): + return self.strings + + def buildStringMapping(self): + self.stringMapping = {} + for index in range(len(self.strings)): + self.stringMapping[self.strings[index]] = index + cffStandardStringCount + + +# The 391 Standard Strings as used in the CFF format. +# from Adobe Technical None #5176, version 1.0, 18 March 1998 + +cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', + 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', + 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', + 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', + 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', + 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', + 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', + 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', + 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', + 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', + 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', + 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', + 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', + 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', + 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', + 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', + 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', + 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', + 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', + 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', + 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', + 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', + 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', + 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', + 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', + 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', + 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', + 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', + 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', + 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', + 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', + 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', + 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', + 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', + 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', + 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', + 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', + 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', + 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', + 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', + 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', + 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', + 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', + 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', + 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', + 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', + 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', + 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', + 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', + 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', + 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', + 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', + 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', + 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', + 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', + 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', + 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', + 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', + 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', + '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', + 'Semibold' +] + +cffStandardStringCount = 391 +assert len(cffStandardStrings) == cffStandardStringCount +# build reverse mapping +cffStandardStringMapping = {} +for _i in range(cffStandardStringCount): + cffStandardStringMapping[cffStandardStrings[_i]] = _i + +cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign", +"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", +"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", +"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", +"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", +"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", +"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", +"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", +"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", +"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", +"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", +"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", +"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", +"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", +"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", +"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", +"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", +"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", +"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", +"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", +"threequarters", "twosuperior", "registered", "minus", "eth", "multiply", +"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", +"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", +"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", +"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", +"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", +"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", +"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", +"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", +"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", +"zcaron"] + +cffISOAdobeStringCount = 229 +assert len(cffISOAdobeStrings) == cffISOAdobeStringCount + +cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall", +"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", +"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", +"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", +"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", +"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", +"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", +"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", +"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", +"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", +"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", +"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", +"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", +"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", +"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth", +"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", +"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", +"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", +"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", +"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", +"centinferior", "dollarinferior", "periodinferior", "commainferior", +"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", +"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", +"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", +"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", +"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", +"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", +"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", +"Ydieresissmall"] + +cffExpertStringCount = 166 +assert len(cffIExpertStrings) == cffExpertStringCount + +cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle", +"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader", +"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", +"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", +"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", +"semicolon", "commasuperior", "threequartersemdash", "periodsuperior", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah", +"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf", +"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", +"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", +"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", +"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", +"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", +"eightinferior", "nineinferior", "centinferior", "dollarinferior", +"periodinferior", "commainferior"] + +cffExpertSubsetStringCount = 87 +assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount diff --git a/.venv/lib/python3.9/site-packages/fontTools/cffLib/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/cffLib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..94869d40 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/cffLib/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/cffLib/__pycache__/specializer.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/cffLib/__pycache__/specializer.cpython-39.pyc new file mode 100644 index 00000000..4f93643e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/cffLib/__pycache__/specializer.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/cffLib/__pycache__/width.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/cffLib/__pycache__/width.cpython-39.pyc new file mode 100644 index 00000000..618a3d21 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/cffLib/__pycache__/width.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/cffLib/specializer.py b/.venv/lib/python3.9/site-packages/fontTools/cffLib/specializer.py new file mode 100644 index 00000000..fbfefa92 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/cffLib/specializer.py @@ -0,0 +1,750 @@ +# -*- coding: utf-8 -*- + +"""T2CharString operator specializer and generalizer. + +PostScript glyph drawing operations can be expressed in multiple different +ways. For example, as well as the ``lineto`` operator, there is also a +``hlineto`` operator which draws a horizontal line, removing the need to +specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a +vertical line, removing the need to specify a ``dy`` coordinate. As well +as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects +into lists of operations, this module allows for conversion between general +and specific forms of the operation. + +""" + +from fontTools.cffLib import maxStackLimit + + +def stringToProgram(string): + if isinstance(string, str): + string = string.split() + program = [] + for token in string: + try: + token = int(token) + except ValueError: + try: + token = float(token) + except ValueError: + pass + program.append(token) + return program + + +def programToString(program): + return ' '.join(str(x) for x in program) + + +def programToCommands(program, getNumRegions=None): + """Takes a T2CharString program list and returns list of commands. + Each command is a two-tuple of commandname,arg-list. The commandname might + be empty string if no commandname shall be emitted (used for glyph width, + hintmask/cntrmask argument, as well as stray arguments at the end of the + program (¯\_(ツ)_/¯). + 'getNumRegions' may be None, or a callable object. It must return the + number of regions. 'getNumRegions' takes a single argument, vsindex. If + the vsindex argument is None, getNumRegions returns the default number + of regions for the charstring, else it returns the numRegions for + the vsindex. + The Charstring may or may not start with a width value. If the first + non-blend operator has an odd number of arguments, then the first argument is + a width, and is popped off. This is complicated with blend operators, as + there may be more than one before the first hint or moveto operator, and each + one reduces several arguments to just one list argument. We have to sum the + number of arguments that are not part of the blend arguments, and all the + 'numBlends' values. We could instead have said that by definition, if there + is a blend operator, there is no width value, since CFF2 Charstrings don't + have width values. I discussed this with Behdad, and we are allowing for an + initial width value in this case because developers may assemble a CFF2 + charstring from CFF Charstrings, which could have width values. + """ + + seenWidthOp = False + vsIndex = None + lenBlendStack = 0 + lastBlendIndex = 0 + commands = [] + stack = [] + it = iter(program) + + for token in it: + if not isinstance(token, str): + stack.append(token) + continue + + if token == 'blend': + assert getNumRegions is not None + numSourceFonts = 1 + getNumRegions(vsIndex) + # replace the blend op args on the stack with a single list + # containing all the blend op args. + numBlends = stack[-1] + numBlendArgs = numBlends * numSourceFonts + 1 + # replace first blend op by a list of the blend ops. + stack[-numBlendArgs:] = [stack[-numBlendArgs:]] + lenBlendStack += numBlends + len(stack) - 1 + lastBlendIndex = len(stack) + # if a blend op exists, this is or will be a CFF2 charstring. + continue + + elif token == 'vsindex': + vsIndex = stack[-1] + assert type(vsIndex) is int + + elif (not seenWidthOp) and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm', + 'cntrmask', 'hintmask', + 'hmoveto', 'vmoveto', 'rmoveto', + 'endchar'}: + seenWidthOp = True + parity = token in {'hmoveto', 'vmoveto'} + if lenBlendStack: + # lenBlendStack has the number of args represented by the last blend + # arg and all the preceding args. We need to now add the number of + # args following the last blend arg. + numArgs = lenBlendStack + len(stack[lastBlendIndex:]) + else: + numArgs = len(stack) + if numArgs and (numArgs % 2) ^ parity: + width = stack.pop(0) + commands.append(('', [width])) + + if token in {'hintmask', 'cntrmask'}: + if stack: + commands.append(('', stack)) + commands.append((token, [])) + commands.append(('', [next(it)])) + else: + commands.append((token, stack)) + stack = [] + if stack: + commands.append(('', stack)) + return commands + + +def _flattenBlendArgs(args): + token_list = [] + for arg in args: + if isinstance(arg, list): + token_list.extend(arg) + token_list.append('blend') + else: + token_list.append(arg) + return token_list + +def commandsToProgram(commands): + """Takes a commands list as returned by programToCommands() and converts + it back to a T2CharString program list.""" + program = [] + for op,args in commands: + if any(isinstance(arg, list) for arg in args): + args = _flattenBlendArgs(args) + program.extend(args) + if op: + program.append(op) + return program + + +def _everyN(el, n): + """Group the list el into groups of size n""" + if len(el) % n != 0: raise ValueError(el) + for i in range(0, len(el), n): + yield el[i:i+n] + + +class _GeneralizerDecombinerCommandsMap(object): + + @staticmethod + def rmoveto(args): + if len(args) != 2: raise ValueError(args) + yield ('rmoveto', args) + @staticmethod + def hmoveto(args): + if len(args) != 1: raise ValueError(args) + yield ('rmoveto', [args[0], 0]) + @staticmethod + def vmoveto(args): + if len(args) != 1: raise ValueError(args) + yield ('rmoveto', [0, args[0]]) + + @staticmethod + def rlineto(args): + if not args: raise ValueError(args) + for args in _everyN(args, 2): + yield ('rlineto', args) + @staticmethod + def hlineto(args): + if not args: raise ValueError(args) + it = iter(args) + try: + while True: + yield ('rlineto', [next(it), 0]) + yield ('rlineto', [0, next(it)]) + except StopIteration: + pass + @staticmethod + def vlineto(args): + if not args: raise ValueError(args) + it = iter(args) + try: + while True: + yield ('rlineto', [0, next(it)]) + yield ('rlineto', [next(it), 0]) + except StopIteration: + pass + @staticmethod + def rrcurveto(args): + if not args: raise ValueError(args) + for args in _everyN(args, 6): + yield ('rrcurveto', args) + @staticmethod + def hhcurveto(args): + if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args) + if len(args) % 2 == 1: + yield ('rrcurveto', [args[1], args[0], args[2], args[3], args[4], 0]) + args = args[5:] + for args in _everyN(args, 4): + yield ('rrcurveto', [args[0], 0, args[1], args[2], args[3], 0]) + @staticmethod + def vvcurveto(args): + if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args) + if len(args) % 2 == 1: + yield ('rrcurveto', [args[0], args[1], args[2], args[3], 0, args[4]]) + args = args[5:] + for args in _everyN(args, 4): + yield ('rrcurveto', [0, args[0], args[1], args[2], 0, args[3]]) + @staticmethod + def hvcurveto(args): + if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args) + last_args = None + if len(args) % 2 == 1: + lastStraight = len(args) % 8 == 5 + args, last_args = args[:-5], args[-5:] + it = _everyN(args, 4) + try: + while True: + args = next(it) + yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]]) + args = next(it) + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0]) + except StopIteration: + pass + if last_args: + args = last_args + if lastStraight: + yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]]) + else: + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]]) + @staticmethod + def vhcurveto(args): + if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args) + last_args = None + if len(args) % 2 == 1: + lastStraight = len(args) % 8 == 5 + args, last_args = args[:-5], args[-5:] + it = _everyN(args, 4) + try: + while True: + args = next(it) + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0]) + args = next(it) + yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]]) + except StopIteration: + pass + if last_args: + args = last_args + if lastStraight: + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]]) + else: + yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]]) + + @staticmethod + def rcurveline(args): + if len(args) < 8 or len(args) % 6 != 2: raise ValueError(args) + args, last_args = args[:-2], args[-2:] + for args in _everyN(args, 6): + yield ('rrcurveto', args) + yield ('rlineto', last_args) + @staticmethod + def rlinecurve(args): + if len(args) < 8 or len(args) % 2 != 0: raise ValueError(args) + args, last_args = args[:-6], args[-6:] + for args in _everyN(args, 2): + yield ('rlineto', args) + yield ('rrcurveto', last_args) + +def _convertBlendOpToArgs(blendList): + # args is list of blend op args. Since we are supporting + # recursive blend op calls, some of these args may also + # be a list of blend op args, and need to be converted before + # we convert the current list. + if any([isinstance(arg, list) for arg in blendList]): + args = [i for e in blendList for i in + (_convertBlendOpToArgs(e) if isinstance(e,list) else [e]) ] + else: + args = blendList + + # We now know that blendList contains a blend op argument list, even if + # some of the args are lists that each contain a blend op argument list. + # Convert from: + # [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn] + # to: + # [ [x0] + [delta tuple for x0], + # ..., + # [xn] + [delta tuple for xn] ] + numBlends = args[-1] + # Can't use args.pop() when the args are being used in a nested list + # comprehension. See calling context + args = args[:-1] + + numRegions = len(args)//numBlends - 1 + if not (numBlends*(numRegions + 1) == len(args)): + raise ValueError(blendList) + + defaultArgs = [[arg] for arg in args[:numBlends]] + deltaArgs = args[numBlends:] + numDeltaValues = len(deltaArgs) + deltaList = [ deltaArgs[i:i + numRegions] for i in range(0, numDeltaValues, numRegions) ] + blend_args = [ a + b for a, b in zip(defaultArgs,deltaList)] + return blend_args + +def generalizeCommands(commands, ignoreErrors=False): + result = [] + mapping = _GeneralizerDecombinerCommandsMap + for op, args in commands: + # First, generalize any blend args in the arg list. + if any([isinstance(arg, list) for arg in args]): + try: + args = [n for arg in args for n in (_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg])] + except ValueError: + if ignoreErrors: + # Store op as data, such that consumers of commands do not have to + # deal with incorrect number of arguments. + result.append(('', args)) + result.append(('', [op])) + else: + raise + + func = getattr(mapping, op, None) + if not func: + result.append((op,args)) + continue + try: + for command in func(args): + result.append(command) + except ValueError: + if ignoreErrors: + # Store op as data, such that consumers of commands do not have to + # deal with incorrect number of arguments. + result.append(('', args)) + result.append(('', [op])) + else: + raise + return result + +def generalizeProgram(program, getNumRegions=None, **kwargs): + return commandsToProgram(generalizeCommands(programToCommands(program, getNumRegions), **kwargs)) + + +def _categorizeVector(v): + """ + Takes X,Y vector v and returns one of r, h, v, or 0 depending on which + of X and/or Y are zero, plus tuple of nonzero ones. If both are zero, + it returns a single zero still. + + >>> _categorizeVector((0,0)) + ('0', (0,)) + >>> _categorizeVector((1,0)) + ('h', (1,)) + >>> _categorizeVector((0,2)) + ('v', (2,)) + >>> _categorizeVector((1,2)) + ('r', (1, 2)) + """ + if not v[0]: + if not v[1]: + return '0', v[:1] + else: + return 'v', v[1:] + else: + if not v[1]: + return 'h', v[:1] + else: + return 'r', v + +def _mergeCategories(a, b): + if a == '0': return b + if b == '0': return a + if a == b: return a + return None + +def _negateCategory(a): + if a == 'h': return 'v' + if a == 'v': return 'h' + assert a in '0r' + return a + +def _convertToBlendCmds(args): + # return a list of blend commands, and + # the remaining non-blended args, if any. + num_args = len(args) + stack_use = 0 + new_args = [] + i = 0 + while i < num_args: + arg = args[i] + if not isinstance(arg, list): + new_args.append(arg) + i += 1 + stack_use += 1 + else: + prev_stack_use = stack_use + # The arg is a tuple of blend values. + # These are each (master 0,delta 1..delta n) + # Combine as many successive tuples as we can, + # up to the max stack limit. + num_sources = len(arg) + blendlist = [arg] + i += 1 + stack_use += 1 + num_sources # 1 for the num_blends arg + while (i < num_args) and isinstance(args[i], list): + blendlist.append(args[i]) + i += 1 + stack_use += num_sources + if stack_use + num_sources > maxStackLimit: + # if we are here, max stack is the CFF2 max stack. + # I use the CFF2 max stack limit here rather than + # the 'maxstack' chosen by the client, as the default + # maxstack may have been used unintentionally. For all + # the other operators, this just produces a little less + # optimization, but here it puts a hard (and low) limit + # on the number of source fonts that can be used. + break + # blendList now contains as many single blend tuples as can be + # combined without exceeding the CFF2 stack limit. + num_blends = len(blendlist) + # append the 'num_blends' default font values + blend_args = [] + for arg in blendlist: + blend_args.append(arg[0]) + for arg in blendlist: + blend_args.extend(arg[1:]) + blend_args.append(num_blends) + new_args.append(blend_args) + stack_use = prev_stack_use + num_blends + + return new_args + +def _addArgs(a, b): + if isinstance(b, list): + if isinstance(a, list): + if len(a) != len(b): + raise ValueError() + return [_addArgs(va, vb) for va,vb in zip(a, b)] + else: + a, b = b, a + if isinstance(a, list): + return [_addArgs(a[0], b)] + a[1:] + return a + b + + +def specializeCommands(commands, + ignoreErrors=False, + generalizeFirst=True, + preserveTopology=False, + maxstack=48): + + # We perform several rounds of optimizations. They are carefully ordered and are: + # + # 0. Generalize commands. + # This ensures that they are in our expected simple form, with each line/curve only + # having arguments for one segment, and using the generic form (rlineto/rrcurveto). + # If caller is sure the input is in this form, they can turn off generalization to + # save time. + # + # 1. Combine successive rmoveto operations. + # + # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. + # We specialize into some, made-up, variants as well, which simplifies following + # passes. + # + # 3. Merge or delete redundant operations, to the extent requested. + # OpenType spec declares point numbers in CFF undefined. As such, we happily + # change topology. If client relies on point numbers (in GPOS anchors, or for + # hinting purposes(what?)) they can turn this off. + # + # 4. Peephole optimization to revert back some of the h/v variants back into their + # original "relative" operator (rline/rrcurveto) if that saves a byte. + # + # 5. Combine adjacent operators when possible, minding not to go over max stack size. + # + # 6. Resolve any remaining made-up operators into real operators. + # + # I have convinced myself that this produces optimal bytecode (except for, possibly + # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-) + # A dynamic-programming approach can do the same but would be significantly slower. + # + # 7. For any args which are blend lists, convert them to a blend command. + + + # 0. Generalize commands. + if generalizeFirst: + commands = generalizeCommands(commands, ignoreErrors=ignoreErrors) + else: + commands = list(commands) # Make copy since we modify in-place later. + + # 1. Combine successive rmoveto operations. + for i in range(len(commands)-1, 0, -1): + if 'rmoveto' == commands[i][0] == commands[i-1][0]: + v1, v2 = commands[i-1][1], commands[i][1] + commands[i-1] = ('rmoveto', [v1[0]+v2[0], v1[1]+v2[1]]) + del commands[i] + + # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. + # + # We, in fact, specialize into more, made-up, variants that special-case when both + # X and Y components are zero. This simplifies the following optimization passes. + # This case is rare, but OCD does not let me skip it. + # + # After this round, we will have four variants that use the following mnemonics: + # + # - 'r' for relative, ie. non-zero X and non-zero Y, + # - 'h' for horizontal, ie. zero X and non-zero Y, + # - 'v' for vertical, ie. non-zero X and zero Y, + # - '0' for zeros, ie. zero X and zero Y. + # + # The '0' pseudo-operators are not part of the spec, but help simplify the following + # optimization rounds. We resolve them at the end. So, after this, we will have four + # moveto and four lineto variants: + # + # - 0moveto, 0lineto + # - hmoveto, hlineto + # - vmoveto, vlineto + # - rmoveto, rlineto + # + # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve + # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3. + # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3. + # + # There are nine different variants of curves without the '0'. Those nine map exactly + # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto, + # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of + # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of + # arguments) is in fact an rhcurveto. The operators in the spec are designed such that + # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve. + # + # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest + # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be + # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always + # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute + # the '0' with either 'h' or 'v' and it works. + # + # When we get to curve splines however, things become more complicated... XXX finish this. + # There's one more complexity with splines. If one side of the spline is not horizontal or + # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode. + # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and + # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'. + # This limits our merge opportunities later. + # + for i in range(len(commands)): + op,args = commands[i] + + if op in {'rmoveto', 'rlineto'}: + c, args = _categorizeVector(args) + commands[i] = c+op[1:], args + continue + + if op == 'rrcurveto': + c1, args1 = _categorizeVector(args[:2]) + c2, args2 = _categorizeVector(args[-2:]) + commands[i] = c1+c2+'curveto', args1+args[2:4]+args2 + continue + + # 3. Merge or delete redundant operations, to the extent requested. + # + # TODO + # A 0moveto that comes before all other path operations can be removed. + # though I find conflicting evidence for this. + # + # TODO + # "If hstem and vstem hints are both declared at the beginning of a + # CharString, and this sequence is followed directly by the hintmask or + # cntrmask operators, then the vstem hint operator (or, if applicable, + # the vstemhm operator) need not be included." + # + # "The sequence and form of a CFF2 CharString program may be represented as: + # {hs* vs* cm* hm* mt subpath}? {mt subpath}*" + # + # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1 + # + # For Type2 CharStrings the sequence is: + # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar" + + + # Some other redundancies change topology (point numbers). + if not preserveTopology: + for i in range(len(commands)-1, -1, -1): + op, args = commands[i] + + # A 00curveto is demoted to a (specialized) lineto. + if op == '00curveto': + assert len(args) == 4 + c, args = _categorizeVector(args[1:3]) + op = c+'lineto' + commands[i] = op, args + # and then... + + # A 0lineto can be deleted. + if op == '0lineto': + del commands[i] + continue + + # Merge adjacent hlineto's and vlineto's. + # In CFF2 charstrings from variable fonts, each + # arg item may be a list of blendable values, one from + # each source font. + if (i and op in {'hlineto', 'vlineto'} and + (op == commands[i-1][0])): + _, other_args = commands[i-1] + assert len(args) == 1 and len(other_args) == 1 + try: + new_args = [_addArgs(args[0], other_args[0])] + except ValueError: + continue + commands[i-1] = (op, new_args) + del commands[i] + continue + + # 4. Peephole optimization to revert back some of the h/v variants back into their + # original "relative" operator (rline/rrcurveto) if that saves a byte. + for i in range(1, len(commands)-1): + op,args = commands[i] + prv,nxt = commands[i-1][0], commands[i+1][0] + + if op in {'0lineto', 'hlineto', 'vlineto'} and prv == nxt == 'rlineto': + assert len(args) == 1 + args = [0, args[0]] if op[0] == 'v' else [args[0], 0] + commands[i] = ('rlineto', args) + continue + + if op[2:] == 'curveto' and len(args) == 5 and prv == nxt == 'rrcurveto': + assert (op[0] == 'r') ^ (op[1] == 'r') + if op[0] == 'v': + pos = 0 + elif op[0] != 'r': + pos = 1 + elif op[1] == 'v': + pos = 4 + else: + pos = 5 + # Insert, while maintaining the type of args (can be tuple or list). + args = args[:pos] + type(args)((0,)) + args[pos:] + commands[i] = ('rrcurveto', args) + continue + + # 5. Combine adjacent operators when possible, minding not to go over max stack size. + for i in range(len(commands)-1, 0, -1): + op1,args1 = commands[i-1] + op2,args2 = commands[i] + new_op = None + + # Merge logic... + if {op1, op2} <= {'rlineto', 'rrcurveto'}: + if op1 == op2: + new_op = op1 + else: + if op2 == 'rrcurveto' and len(args2) == 6: + new_op = 'rlinecurve' + elif len(args2) == 2: + new_op = 'rcurveline' + + elif (op1, op2) in {('rlineto', 'rlinecurve'), ('rrcurveto', 'rcurveline')}: + new_op = op2 + + elif {op1, op2} == {'vlineto', 'hlineto'}: + new_op = op1 + + elif 'curveto' == op1[2:] == op2[2:]: + d0, d1 = op1[:2] + d2, d3 = op2[:2] + + if d1 == 'r' or d2 == 'r' or d0 == d3 == 'r': + continue + + d = _mergeCategories(d1, d2) + if d is None: continue + if d0 == 'r': + d = _mergeCategories(d, d3) + if d is None: continue + new_op = 'r'+d+'curveto' + elif d3 == 'r': + d0 = _mergeCategories(d0, _negateCategory(d)) + if d0 is None: continue + new_op = d0+'r'+'curveto' + else: + d0 = _mergeCategories(d0, d3) + if d0 is None: continue + new_op = d0+d+'curveto' + + # Make sure the stack depth does not exceed (maxstack - 1), so + # that subroutinizer can insert subroutine calls at any point. + if new_op and len(args1) + len(args2) < maxstack: + commands[i-1] = (new_op, args1+args2) + del commands[i] + + # 6. Resolve any remaining made-up operators into real operators. + for i in range(len(commands)): + op,args = commands[i] + + if op in {'0moveto', '0lineto'}: + commands[i] = 'h'+op[1:], args + continue + + if op[2:] == 'curveto' and op[:2] not in {'rr', 'hh', 'vv', 'vh', 'hv'}: + op0, op1 = op[:2] + if (op0 == 'r') ^ (op1 == 'r'): + assert len(args) % 2 == 1 + if op0 == '0': op0 = 'h' + if op1 == '0': op1 = 'h' + if op0 == 'r': op0 = op1 + if op1 == 'r': op1 = _negateCategory(op0) + assert {op0,op1} <= {'h','v'}, (op0, op1) + + if len(args) % 2: + if op0 != op1: # vhcurveto / hvcurveto + if (op0 == 'h') ^ (len(args) % 8 == 1): + # Swap last two args order + args = args[:-2]+args[-1:]+args[-2:-1] + else: # hhcurveto / vvcurveto + if op0 == 'h': # hhcurveto + # Swap first two args order + args = args[1:2]+args[:1]+args[2:] + + commands[i] = op0+op1+'curveto', args + continue + + # 7. For any series of args which are blend lists, convert the series to a single blend arg. + for i in range(len(commands)): + op, args = commands[i] + if any(isinstance(arg, list) for arg in args): + commands[i] = op, _convertToBlendCmds(args) + + return commands + +def specializeProgram(program, getNumRegions=None, **kwargs): + return commandsToProgram(specializeCommands(programToCommands(program, getNumRegions), **kwargs)) + + +if __name__ == '__main__': + import sys + if len(sys.argv) == 1: + import doctest + sys.exit(doctest.testmod().failed) + program = stringToProgram(sys.argv[1:]) + print("Program:"); print(programToString(program)) + commands = programToCommands(program) + print("Commands:"); print(commands) + program2 = commandsToProgram(commands) + print("Program from commands:"); print(programToString(program2)) + assert program == program2 + print("Generalized program:"); print(programToString(generalizeProgram(program))) + print("Specialized program:"); print(programToString(specializeProgram(program))) diff --git a/.venv/lib/python3.9/site-packages/fontTools/cffLib/width.py b/.venv/lib/python3.9/site-packages/fontTools/cffLib/width.py new file mode 100644 index 00000000..00b859bb --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/cffLib/width.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- + +"""T2CharString glyph width optimizer. + +CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX`` +value do not need to specify their width in their charstring, saving bytes. +This module determines the optimum ``defaultWidthX`` and ``nominalWidthX`` +values for a font, when provided with a list of glyph widths.""" + +from fontTools.ttLib import TTFont +from collections import defaultdict +from operator import add +from functools import reduce + + +class missingdict(dict): + def __init__(self, missing_func): + self.missing_func = missing_func + def __missing__(self, v): + return self.missing_func(v) + +def cumSum(f, op=add, start=0, decreasing=False): + + keys = sorted(f.keys()) + minx, maxx = keys[0], keys[-1] + + total = reduce(op, f.values(), start) + + if decreasing: + missing = lambda x: start if x > maxx else total + domain = range(maxx, minx - 1, -1) + else: + missing = lambda x: start if x < minx else total + domain = range(minx, maxx + 1) + + out = missingdict(missing) + + v = start + for x in domain: + v = op(v, f[x]) + out[x] = v + + return out + +def byteCost(widths, default, nominal): + + if not hasattr(widths, 'items'): + d = defaultdict(int) + for w in widths: + d[w] += 1 + widths = d + + cost = 0 + for w,freq in widths.items(): + if w == default: continue + diff = abs(w - nominal) + if diff <= 107: + cost += freq + elif diff <= 1131: + cost += freq * 2 + else: + cost += freq * 5 + return cost + + +def optimizeWidthsBruteforce(widths): + """Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts.""" + + d = defaultdict(int) + for w in widths: + d[w] += 1 + + # Maximum number of bytes using default can possibly save + maxDefaultAdvantage = 5 * max(d.values()) + + minw, maxw = min(widths), max(widths) + domain = list(range(minw, maxw+1)) + + bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain) + + bestCost = len(widths) * 5 + 1 + for nominal in domain: + if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage: + continue + for default in domain: + cost = byteCost(widths, default, nominal) + if cost < bestCost: + bestCost = cost + bestDefault = default + bestNominal = nominal + + return bestDefault, bestNominal + + +def optimizeWidths(widths): + """Given a list of glyph widths, or dictionary mapping glyph width to number of + glyphs having that, returns a tuple of best CFF default and nominal glyph widths. + + This algorithm is linear in UPEM+numGlyphs.""" + + if not hasattr(widths, 'items'): + d = defaultdict(int) + for w in widths: + d[w] += 1 + widths = d + + keys = sorted(widths.keys()) + minw, maxw = keys[0], keys[-1] + domain = list(range(minw, maxw+1)) + + # Cumulative sum/max forward/backward. + cumFrqU = cumSum(widths, op=add) + cumMaxU = cumSum(widths, op=max) + cumFrqD = cumSum(widths, op=add, decreasing=True) + cumMaxD = cumSum(widths, op=max, decreasing=True) + + # Cost per nominal choice, without default consideration. + nomnCostU = missingdict(lambda x: cumFrqU[x] + cumFrqU[x-108] + cumFrqU[x-1132]*3) + nomnCostD = missingdict(lambda x: cumFrqD[x] + cumFrqD[x+108] + cumFrqD[x+1132]*3) + nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x]) + + # Cost-saving per nominal choice, by best default choice. + dfltCostU = missingdict(lambda x: max(cumMaxU[x], cumMaxU[x-108]*2, cumMaxU[x-1132]*5)) + dfltCostD = missingdict(lambda x: max(cumMaxD[x], cumMaxD[x+108]*2, cumMaxD[x+1132]*5)) + dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x])) + + # Combined cost per nominal choice. + bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x]) + + # Best nominal. + nominal = min(domain, key=lambda x: bestCost[x]) + + # Work back the best default. + bestC = bestCost[nominal] + dfltC = nomnCost[nominal] - bestCost[nominal] + ends = [] + if dfltC == dfltCostU[nominal]: + starts = [nominal, nominal-108, nominal-1131] + for start in starts: + while cumMaxU[start] and cumMaxU[start] == cumMaxU[start-1]: + start -= 1 + ends.append(start) + else: + starts = [nominal, nominal+108, nominal+1131] + for start in starts: + while cumMaxD[start] and cumMaxD[start] == cumMaxD[start+1]: + start += 1 + ends.append(start) + default = min(ends, key=lambda default: byteCost(widths, default, nominal)) + + return default, nominal + +def main(args=None): + """Calculate optimum defaultWidthX/nominalWidthX values""" + + import argparse + parser = argparse.ArgumentParser( + "fonttools cffLib.width", + description=main.__doc__, + ) + parser.add_argument('inputs', metavar='FILE', type=str, nargs='+', + help="Input TTF files") + parser.add_argument('-b', '--brute-force', dest="brute", action="store_true", + help="Use brute-force approach (VERY slow)") + + args = parser.parse_args(args) + + for fontfile in args.inputs: + font = TTFont(fontfile) + hmtx = font['hmtx'] + widths = [m[0] for m in hmtx.metrics.values()] + if args.brute: + default, nominal = optimizeWidthsBruteforce(widths) + else: + default, nominal = optimizeWidths(widths) + print("glyphs=%d default=%d nominal=%d byteCost=%d" % (len(widths), default, nominal, byteCost(widths, default, nominal))) + +if __name__ == '__main__': + import sys + if len(sys.argv) == 1: + import doctest + sys.exit(doctest.testmod().failed) + main() diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..90e2a67d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/builder.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/builder.cpython-39.pyc new file mode 100644 index 00000000..6a2e3a3c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/builder.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/errors.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/errors.cpython-39.pyc new file mode 100644 index 00000000..dca9a72b Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/errors.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/geometry.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/geometry.cpython-39.pyc new file mode 100644 index 00000000..d9031bb7 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/geometry.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/table_builder.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/table_builder.cpython-39.pyc new file mode 100644 index 00000000..5c7895e3 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/table_builder.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/unbuilder.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/unbuilder.cpython-39.pyc new file mode 100644 index 00000000..6a506829 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/colorLib/__pycache__/unbuilder.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/builder.py b/.venv/lib/python3.9/site-packages/fontTools/colorLib/builder.py new file mode 100644 index 00000000..d2e35d81 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/colorLib/builder.py @@ -0,0 +1,673 @@ +""" +colorLib.builder: Build COLR/CPAL tables from scratch + +""" +import collections +import copy +import enum +from functools import partial +from math import ceil, log +from typing import ( + Any, + Dict, + Generator, + Iterable, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, +) +from fontTools.misc.arrayTools import intRect +from fontTools.misc.fixedTools import fixedToFloat +from fontTools.ttLib.tables import C_O_L_R_ +from fontTools.ttLib.tables import C_P_A_L_ +from fontTools.ttLib.tables import _n_a_m_e +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables.otTables import ExtendMode, CompositeMode +from .errors import ColorLibError +from .geometry import round_start_circle_stable_containment +from .table_builder import BuildCallback, TableBuilder + + +# TODO move type aliases to colorLib.types? +T = TypeVar("T") +_Kwargs = Mapping[str, Any] +_PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]] +_PaintInputList = Sequence[_PaintInput] +_ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]] +_ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]] +_ClipBoxInput = Union[ + Tuple[int, int, int, int, int], # format 1, variable + Tuple[int, int, int, int], # format 0, non-variable + ot.ClipBox, +] + + +MAX_PAINT_COLR_LAYER_COUNT = 255 +_DEFAULT_ALPHA = 1.0 +_MAX_REUSE_LEN = 32 + + +def _beforeBuildPaintRadialGradient(paint, source): + x0 = source["x0"] + y0 = source["y0"] + r0 = source["r0"] + x1 = source["x1"] + y1 = source["y1"] + r1 = source["r1"] + + # TODO apparently no builder_test confirms this works (?) + + # avoid abrupt change after rounding when c0 is near c1's perimeter + c = round_start_circle_stable_containment((x0, y0), r0, (x1, y1), r1) + x0, y0 = c.centre + r0 = c.radius + + # update source to ensure paint is built with corrected values + source["x0"] = x0 + source["y0"] = y0 + source["r0"] = r0 + source["x1"] = x1 + source["y1"] = y1 + source["r1"] = r1 + + return paint, source + + +def _defaultColorStop(): + colorStop = ot.ColorStop() + colorStop.Alpha = _DEFAULT_ALPHA + return colorStop + + +def _defaultVarColorStop(): + colorStop = ot.VarColorStop() + colorStop.Alpha = _DEFAULT_ALPHA + return colorStop + + +def _defaultColorLine(): + colorLine = ot.ColorLine() + colorLine.Extend = ExtendMode.PAD + return colorLine + + +def _defaultVarColorLine(): + colorLine = ot.VarColorLine() + colorLine.Extend = ExtendMode.PAD + return colorLine + + +def _defaultPaintSolid(): + paint = ot.Paint() + paint.Alpha = _DEFAULT_ALPHA + return paint + + +def _buildPaintCallbacks(): + return { + ( + BuildCallback.BEFORE_BUILD, + ot.Paint, + ot.PaintFormat.PaintRadialGradient, + ): _beforeBuildPaintRadialGradient, + ( + BuildCallback.BEFORE_BUILD, + ot.Paint, + ot.PaintFormat.PaintVarRadialGradient, + ): _beforeBuildPaintRadialGradient, + (BuildCallback.CREATE_DEFAULT, ot.ColorStop): _defaultColorStop, + (BuildCallback.CREATE_DEFAULT, ot.VarColorStop): _defaultVarColorStop, + (BuildCallback.CREATE_DEFAULT, ot.ColorLine): _defaultColorLine, + (BuildCallback.CREATE_DEFAULT, ot.VarColorLine): _defaultVarColorLine, + ( + BuildCallback.CREATE_DEFAULT, + ot.Paint, + ot.PaintFormat.PaintSolid, + ): _defaultPaintSolid, + ( + BuildCallback.CREATE_DEFAULT, + ot.Paint, + ot.PaintFormat.PaintVarSolid, + ): _defaultPaintSolid, + } + + +def populateCOLRv0( + table: ot.COLR, + colorGlyphsV0: _ColorGlyphsV0Dict, + glyphMap: Optional[Mapping[str, int]] = None, +): + """Build v0 color layers and add to existing COLR table. + + Args: + table: a raw otTables.COLR() object (not ttLib's table_C_O_L_R_). + colorGlyphsV0: map of base glyph names to lists of (layer glyph names, + color palette index) tuples. Can be empty. + glyphMap: a map from glyph names to glyph indices, as returned from + TTFont.getReverseGlyphMap(), to optionally sort base records by GID. + """ + if glyphMap is not None: + colorGlyphItems = sorted( + colorGlyphsV0.items(), key=lambda item: glyphMap[item[0]] + ) + else: + colorGlyphItems = colorGlyphsV0.items() + baseGlyphRecords = [] + layerRecords = [] + for baseGlyph, layers in colorGlyphItems: + baseRec = ot.BaseGlyphRecord() + baseRec.BaseGlyph = baseGlyph + baseRec.FirstLayerIndex = len(layerRecords) + baseRec.NumLayers = len(layers) + baseGlyphRecords.append(baseRec) + + for layerGlyph, paletteIndex in layers: + layerRec = ot.LayerRecord() + layerRec.LayerGlyph = layerGlyph + layerRec.PaletteIndex = paletteIndex + layerRecords.append(layerRec) + + table.BaseGlyphRecordArray = table.LayerRecordArray = None + if baseGlyphRecords: + table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray() + table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords + if layerRecords: + table.LayerRecordArray = ot.LayerRecordArray() + table.LayerRecordArray.LayerRecord = layerRecords + table.BaseGlyphRecordCount = len(baseGlyphRecords) + table.LayerRecordCount = len(layerRecords) + + +def buildCOLR( + colorGlyphs: _ColorGlyphsDict, + version: Optional[int] = None, + glyphMap: Optional[Mapping[str, int]] = None, + varStore: Optional[ot.VarStore] = None, + varIndexMap: Optional[ot.DeltaSetIndexMap] = None, + clipBoxes: Optional[Dict[str, _ClipBoxInput]] = None, +) -> C_O_L_R_.table_C_O_L_R_: + """Build COLR table from color layers mapping. + Args: + colorGlyphs: map of base glyph name to, either list of (layer glyph name, + color palette index) tuples for COLRv0; or a single Paint (dict) or + list of Paint for COLRv1. + version: the version of COLR table. If None, the version is determined + by the presence of COLRv1 paints or variation data (varStore), which + require version 1; otherwise, if all base glyphs use only simple color + layers, version 0 is used. + glyphMap: a map from glyph names to glyph indices, as returned from + TTFont.getReverseGlyphMap(), to optionally sort base records by GID. + varStore: Optional ItemVarationStore for deltas associated with v1 layer. + varIndexMap: Optional DeltaSetIndexMap for deltas associated with v1 layer. + clipBoxes: Optional map of base glyph name to clip box 4- or 5-tuples: + (xMin, yMin, xMax, yMax) or (xMin, yMin, xMax, yMax, varIndexBase). + Return: + A new COLR table. + """ + self = C_O_L_R_.table_C_O_L_R_() + + if varStore is not None and version == 0: + raise ValueError("Can't add VarStore to COLRv0") + + if version in (None, 0) and not varStore: + # split color glyphs into v0 and v1 and encode separately + colorGlyphsV0, colorGlyphsV1 = _split_color_glyphs_by_version(colorGlyphs) + if version == 0 and colorGlyphsV1: + raise ValueError("Can't encode COLRv1 glyphs in COLRv0") + else: + # unless explicitly requested for v1 or have variations, in which case + # we encode all color glyph as v1 + colorGlyphsV0, colorGlyphsV1 = {}, colorGlyphs + + colr = ot.COLR() + + populateCOLRv0(colr, colorGlyphsV0, glyphMap) + + colr.LayerList, colr.BaseGlyphList = buildColrV1(colorGlyphsV1, glyphMap) + + if version is None: + version = 1 if (varStore or colorGlyphsV1) else 0 + elif version not in (0, 1): + raise NotImplementedError(version) + self.version = colr.Version = version + + if version == 0: + self.ColorLayers = self._decompileColorLayersV0(colr) + else: + clipBoxes = { + name: clipBoxes[name] for name in clipBoxes or {} if name in colorGlyphsV1 + } + colr.ClipList = buildClipList(clipBoxes) if clipBoxes else None + colr.VarIndexMap = varIndexMap + colr.VarStore = varStore + self.table = colr + + return self + + +def buildClipList(clipBoxes: Dict[str, _ClipBoxInput]) -> ot.ClipList: + clipList = ot.ClipList() + clipList.Format = 1 + clipList.clips = {name: buildClipBox(box) for name, box in clipBoxes.items()} + return clipList + + +def buildClipBox(clipBox: _ClipBoxInput) -> ot.ClipBox: + if isinstance(clipBox, ot.ClipBox): + return clipBox + n = len(clipBox) + clip = ot.ClipBox() + if n not in (4, 5): + raise ValueError(f"Invalid ClipBox: expected 4 or 5 values, found {n}") + clip.xMin, clip.yMin, clip.xMax, clip.yMax = intRect(clipBox[:4]) + clip.Format = int(n == 5) + 1 + if n == 5: + clip.VarIndexBase = int(clipBox[4]) + return clip + + +class ColorPaletteType(enum.IntFlag): + USABLE_WITH_LIGHT_BACKGROUND = 0x0001 + USABLE_WITH_DARK_BACKGROUND = 0x0002 + + @classmethod + def _missing_(cls, value): + # enforce reserved bits + if isinstance(value, int) and (value < 0 or value & 0xFFFC != 0): + raise ValueError(f"{value} is not a valid {cls.__name__}") + return super()._missing_(value) + + +# None, 'abc' or {'en': 'abc', 'de': 'xyz'} +_OptionalLocalizedString = Union[None, str, Dict[str, str]] + + +def buildPaletteLabels( + labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e +) -> List[Optional[int]]: + return [ + nameTable.addMultilingualName(l, mac=False) + if isinstance(l, dict) + else C_P_A_L_.table_C_P_A_L_.NO_NAME_ID + if l is None + else nameTable.addMultilingualName({"en": l}, mac=False) + for l in labels + ] + + +def buildCPAL( + palettes: Sequence[Sequence[Tuple[float, float, float, float]]], + paletteTypes: Optional[Sequence[ColorPaletteType]] = None, + paletteLabels: Optional[Sequence[_OptionalLocalizedString]] = None, + paletteEntryLabels: Optional[Sequence[_OptionalLocalizedString]] = None, + nameTable: Optional[_n_a_m_e.table__n_a_m_e] = None, +) -> C_P_A_L_.table_C_P_A_L_: + """Build CPAL table from list of color palettes. + + Args: + palettes: list of lists of colors encoded as tuples of (R, G, B, A) floats + in the range [0..1]. + paletteTypes: optional list of ColorPaletteType, one for each palette. + paletteLabels: optional list of palette labels. Each lable can be either: + None (no label), a string (for for default English labels), or a + localized string (as a dict keyed with BCP47 language codes). + paletteEntryLabels: optional list of palette entry labels, one for each + palette entry (see paletteLabels). + nameTable: optional name table where to store palette and palette entry + labels. Required if either paletteLabels or paletteEntryLabels is set. + + Return: + A new CPAL v0 or v1 table, if custom palette types or labels are specified. + """ + if len({len(p) for p in palettes}) != 1: + raise ColorLibError("color palettes have different lengths") + + if (paletteLabels or paletteEntryLabels) and not nameTable: + raise TypeError( + "nameTable is required if palette or palette entries have labels" + ) + + cpal = C_P_A_L_.table_C_P_A_L_() + cpal.numPaletteEntries = len(palettes[0]) + + cpal.palettes = [] + for i, palette in enumerate(palettes): + colors = [] + for j, color in enumerate(palette): + if not isinstance(color, tuple) or len(color) != 4: + raise ColorLibError( + f"In palette[{i}][{j}]: expected (R, G, B, A) tuple, got {color!r}" + ) + if any(v > 1 or v < 0 for v in color): + raise ColorLibError( + f"palette[{i}][{j}] has invalid out-of-range [0..1] color: {color!r}" + ) + # input colors are RGBA, CPAL encodes them as BGRA + red, green, blue, alpha = color + colors.append( + C_P_A_L_.Color(*(round(v * 255) for v in (blue, green, red, alpha))) + ) + cpal.palettes.append(colors) + + if any(v is not None for v in (paletteTypes, paletteLabels, paletteEntryLabels)): + cpal.version = 1 + + if paletteTypes is not None: + if len(paletteTypes) != len(palettes): + raise ColorLibError( + f"Expected {len(palettes)} paletteTypes, got {len(paletteTypes)}" + ) + cpal.paletteTypes = [ColorPaletteType(t).value for t in paletteTypes] + else: + cpal.paletteTypes = [C_P_A_L_.table_C_P_A_L_.DEFAULT_PALETTE_TYPE] * len( + palettes + ) + + if paletteLabels is not None: + if len(paletteLabels) != len(palettes): + raise ColorLibError( + f"Expected {len(palettes)} paletteLabels, got {len(paletteLabels)}" + ) + cpal.paletteLabels = buildPaletteLabels(paletteLabels, nameTable) + else: + cpal.paletteLabels = [C_P_A_L_.table_C_P_A_L_.NO_NAME_ID] * len(palettes) + + if paletteEntryLabels is not None: + if len(paletteEntryLabels) != cpal.numPaletteEntries: + raise ColorLibError( + f"Expected {cpal.numPaletteEntries} paletteEntryLabels, " + f"got {len(paletteEntryLabels)}" + ) + cpal.paletteEntryLabels = buildPaletteLabels(paletteEntryLabels, nameTable) + else: + cpal.paletteEntryLabels = [ + C_P_A_L_.table_C_P_A_L_.NO_NAME_ID + ] * cpal.numPaletteEntries + else: + cpal.version = 0 + + return cpal + + +# COLR v1 tables +# See draft proposal at: https://github.com/googlefonts/colr-gradients-spec + + +def _is_colrv0_layer(layer: Any) -> bool: + # Consider as COLRv0 layer any sequence of length 2 (be it tuple or list) in which + # the first element is a str (the layerGlyph) and the second element is an int + # (CPAL paletteIndex). + # https://github.com/googlefonts/ufo2ft/issues/426 + try: + layerGlyph, paletteIndex = layer + except (TypeError, ValueError): + return False + else: + return isinstance(layerGlyph, str) and isinstance(paletteIndex, int) + + +def _split_color_glyphs_by_version( + colorGlyphs: _ColorGlyphsDict, +) -> Tuple[_ColorGlyphsV0Dict, _ColorGlyphsDict]: + colorGlyphsV0 = {} + colorGlyphsV1 = {} + for baseGlyph, layers in colorGlyphs.items(): + if all(_is_colrv0_layer(l) for l in layers): + colorGlyphsV0[baseGlyph] = layers + else: + colorGlyphsV1[baseGlyph] = layers + + # sanity check + assert set(colorGlyphs) == (set(colorGlyphsV0) | set(colorGlyphsV1)) + + return colorGlyphsV0, colorGlyphsV1 + + +def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]: + # TODO feels like something itertools might have already + for lbound in range(num_layers): + # Reuse of very large #s of layers is relatively unlikely + # +2: we want sequences of at least 2 + # otData handles single-record duplication + for ubound in range( + lbound + 2, min(num_layers + 1, lbound + 2 + _MAX_REUSE_LEN) + ): + yield (lbound, ubound) + + +class LayerListBuilder: + slices: List[ot.Paint] + layers: List[ot.Paint] + reusePool: Mapping[Tuple[Any, ...], int] + tuples: Mapping[int, Tuple[Any, ...]] + keepAlive: List[ot.Paint] # we need id to remain valid + + def __init__(self): + self.slices = [] + self.layers = [] + self.reusePool = {} + self.tuples = {} + self.keepAlive = [] + + # We need to intercept construction of PaintColrLayers + callbacks = _buildPaintCallbacks() + callbacks[ + ( + BuildCallback.BEFORE_BUILD, + ot.Paint, + ot.PaintFormat.PaintColrLayers, + ) + ] = self._beforeBuildPaintColrLayers + self.tableBuilder = TableBuilder(callbacks) + + def _paint_tuple(self, paint: ot.Paint): + # start simple, who even cares about cyclic graphs or interesting field types + def _tuple_safe(value): + if isinstance(value, enum.Enum): + return value + elif hasattr(value, "__dict__"): + return tuple( + (k, _tuple_safe(v)) for k, v in sorted(value.__dict__.items()) + ) + elif isinstance(value, collections.abc.MutableSequence): + return tuple(_tuple_safe(e) for e in value) + return value + + # Cache the tuples for individual Paint instead of the whole sequence + # because the seq could be a transient slice + result = self.tuples.get(id(paint), None) + if result is None: + result = _tuple_safe(paint) + self.tuples[id(paint)] = result + self.keepAlive.append(paint) + return result + + def _as_tuple(self, paints: Sequence[ot.Paint]) -> Tuple[Any, ...]: + return tuple(self._paint_tuple(p) for p in paints) + + # COLR layers is unusual in that it modifies shared state + # so we need a callback into an object + def _beforeBuildPaintColrLayers(self, dest, source): + paint = ot.Paint() + paint.Format = int(ot.PaintFormat.PaintColrLayers) + self.slices.append(paint) + + # Sketchy gymnastics: a sequence input will have dropped it's layers + # into NumLayers; get it back + if isinstance(source.get("NumLayers", None), collections.abc.Sequence): + layers = source["NumLayers"] + else: + layers = source["Layers"] + + # Convert maps seqs or whatever into typed objects + layers = [self.buildPaint(l) for l in layers] + + # No reason to have a colr layers with just one entry + if len(layers) == 1: + return layers[0], {} + + # Look for reuse, with preference to longer sequences + # This may make the layer list smaller + found_reuse = True + while found_reuse: + found_reuse = False + + ranges = sorted( + _reuse_ranges(len(layers)), + key=lambda t: (t[1] - t[0], t[1], t[0]), + reverse=True, + ) + for lbound, ubound in ranges: + reuse_lbound = self.reusePool.get( + self._as_tuple(layers[lbound:ubound]), -1 + ) + if reuse_lbound == -1: + continue + new_slice = ot.Paint() + new_slice.Format = int(ot.PaintFormat.PaintColrLayers) + new_slice.NumLayers = ubound - lbound + new_slice.FirstLayerIndex = reuse_lbound + layers = layers[:lbound] + [new_slice] + layers[ubound:] + found_reuse = True + break + + # The layer list is now final; if it's too big we need to tree it + is_tree = len(layers) > MAX_PAINT_COLR_LAYER_COUNT + layers = _build_n_ary_tree(layers, n=MAX_PAINT_COLR_LAYER_COUNT) + + # We now have a tree of sequences with Paint leaves. + # Convert the sequences into PaintColrLayers. + def listToColrLayers(layer): + if isinstance(layer, collections.abc.Sequence): + return self.buildPaint( + { + "Format": ot.PaintFormat.PaintColrLayers, + "Layers": [listToColrLayers(l) for l in layer], + } + ) + return layer + + layers = [listToColrLayers(l) for l in layers] + + paint.NumLayers = len(layers) + paint.FirstLayerIndex = len(self.layers) + self.layers.extend(layers) + + # Register our parts for reuse provided we aren't a tree + # If we are a tree the leaves registered for reuse and that will suffice + if not is_tree: + for lbound, ubound in _reuse_ranges(len(layers)): + self.reusePool[self._as_tuple(layers[lbound:ubound])] = ( + lbound + paint.FirstLayerIndex + ) + + # we've fully built dest; empty source prevents generalized build from kicking in + return paint, {} + + def buildPaint(self, paint: _PaintInput) -> ot.Paint: + return self.tableBuilder.build(ot.Paint, paint) + + def build(self) -> Optional[ot.LayerList]: + if not self.layers: + return None + layers = ot.LayerList() + layers.LayerCount = len(self.layers) + layers.Paint = self.layers + return layers + + +def buildBaseGlyphPaintRecord( + baseGlyph: str, layerBuilder: LayerListBuilder, paint: _PaintInput +) -> ot.BaseGlyphList: + self = ot.BaseGlyphPaintRecord() + self.BaseGlyph = baseGlyph + self.Paint = layerBuilder.buildPaint(paint) + return self + + +def _format_glyph_errors(errors: Mapping[str, Exception]) -> str: + lines = [] + for baseGlyph, error in sorted(errors.items()): + lines.append(f" {baseGlyph} => {type(error).__name__}: {error}") + return "\n".join(lines) + + +def buildColrV1( + colorGlyphs: _ColorGlyphsDict, + glyphMap: Optional[Mapping[str, int]] = None, +) -> Tuple[Optional[ot.LayerList], ot.BaseGlyphList]: + if glyphMap is not None: + colorGlyphItems = sorted( + colorGlyphs.items(), key=lambda item: glyphMap[item[0]] + ) + else: + colorGlyphItems = colorGlyphs.items() + + errors = {} + baseGlyphs = [] + layerBuilder = LayerListBuilder() + for baseGlyph, paint in colorGlyphItems: + try: + baseGlyphs.append(buildBaseGlyphPaintRecord(baseGlyph, layerBuilder, paint)) + + except (ColorLibError, OverflowError, ValueError, TypeError) as e: + errors[baseGlyph] = e + + if errors: + failed_glyphs = _format_glyph_errors(errors) + exc = ColorLibError(f"Failed to build BaseGlyphList:\n{failed_glyphs}") + exc.errors = errors + raise exc from next(iter(errors.values())) + + layers = layerBuilder.build() + glyphs = ot.BaseGlyphList() + glyphs.BaseGlyphCount = len(baseGlyphs) + glyphs.BaseGlyphPaintRecord = baseGlyphs + return (layers, glyphs) + + +def _build_n_ary_tree(leaves, n): + """Build N-ary tree from sequence of leaf nodes. + + Return a list of lists where each non-leaf node is a list containing + max n nodes. + """ + if not leaves: + return [] + + assert n > 1 + + depth = ceil(log(len(leaves), n)) + + if depth <= 1: + return list(leaves) + + # Fully populate complete subtrees of root until we have enough leaves left + root = [] + unassigned = None + full_step = n ** (depth - 1) + for i in range(0, len(leaves), full_step): + subtree = leaves[i : i + full_step] + if len(subtree) < full_step: + unassigned = subtree + break + while len(subtree) > n: + subtree = [subtree[k : k + n] for k in range(0, len(subtree), n)] + root.append(subtree) + + if unassigned: + # Recurse to fill the last subtree, which is the only partially populated one + subtree = _build_n_ary_tree(unassigned, n) + if len(subtree) <= n - len(root): + # replace last subtree with its children if they can still fit + root.extend(subtree) + else: + root.append(subtree) + assert len(root) <= n + + return root diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/errors.py b/.venv/lib/python3.9/site-packages/fontTools/colorLib/errors.py new file mode 100644 index 00000000..a0bdda17 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/colorLib/errors.py @@ -0,0 +1,3 @@ + +class ColorLibError(Exception): + pass diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/geometry.py b/.venv/lib/python3.9/site-packages/fontTools/colorLib/geometry.py new file mode 100644 index 00000000..e62aead1 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/colorLib/geometry.py @@ -0,0 +1,145 @@ +"""Helpers for manipulating 2D points and vectors in COLR table.""" + +from math import copysign, cos, hypot, pi +from fontTools.misc.roundTools import otRound + + +def _vector_between(origin, target): + return (target[0] - origin[0], target[1] - origin[1]) + + +def _round_point(pt): + return (otRound(pt[0]), otRound(pt[1])) + + +def _unit_vector(vec): + length = hypot(*vec) + if length == 0: + return None + return (vec[0] / length, vec[1] / length) + + +# This is the same tolerance used by Skia's SkTwoPointConicalGradient.cpp to detect +# when a radial gradient's focal point lies on the end circle. +_NEARLY_ZERO = 1 / (1 << 12) # 0.000244140625 + + +# The unit vector's X and Y components are respectively +# U = (cos(α), sin(α)) +# where α is the angle between the unit vector and the positive x axis. +_UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984 + + +def _rounding_offset(direction): + # Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector. + # We divide the unit circle in 8 equal slices oriented towards the cardinal + # (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we + # map one of the possible cases: -1, 0, +1 for either X and Y coordinate. + # E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or + # (-1.0, 0.0) if it's pointing West, etc. + uv = _unit_vector(direction) + if not uv: + return (0, 0) + + result = [] + for uv_component in uv: + if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD: + # unit vector component near 0: direction almost orthogonal to the + # direction of the current axis, thus keep coordinate unchanged + result.append(0) + else: + # nudge coord by +/- 1.0 in direction of unit vector + result.append(copysign(1.0, uv_component)) + return tuple(result) + + +class Circle: + def __init__(self, centre, radius): + self.centre = centre + self.radius = radius + + def __repr__(self): + return f"Circle(centre={self.centre}, radius={self.radius})" + + def round(self): + return Circle(_round_point(self.centre), otRound(self.radius)) + + def inside(self, outer_circle): + dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre)) + return ( + abs(outer_circle.radius - dist) <= _NEARLY_ZERO + or outer_circle.radius > dist + ) + + def concentric(self, other): + return self.centre == other.centre + + def move(self, dx, dy): + self.centre = (self.centre[0] + dx, self.centre[1] + dy) + + +def round_start_circle_stable_containment(c0, r0, c1, r1): + """Round start circle so that it stays inside/outside end circle after rounding. + + The rounding of circle coordinates to integers may cause an abrupt change + if the start circle c0 is so close to the end circle c1's perimiter that + it ends up falling outside (or inside) as a result of the rounding. + To keep the gradient unchanged, we nudge it in the right direction. + + See: + https://github.com/googlefonts/colr-gradients-spec/issues/204 + https://github.com/googlefonts/picosvg/issues/158 + """ + start, end = Circle(c0, r0), Circle(c1, r1) + + inside_before_round = start.inside(end) + + round_start = start.round() + round_end = end.round() + inside_after_round = round_start.inside(round_end) + + if inside_before_round == inside_after_round: + return round_start + elif inside_after_round: + # start was outside before rounding: we need to push start away from end + direction = _vector_between(round_end.centre, round_start.centre) + radius_delta = +1.0 + else: + # start was inside before rounding: we need to push start towards end + direction = _vector_between(round_start.centre, round_end.centre) + radius_delta = -1.0 + dx, dy = _rounding_offset(direction) + + # At most 2 iterations ought to be enough to converge. Before the loop, we + # know the start circle didn't keep containment after normal rounding; thus + # we continue adjusting by -/+ 1.0 until containment is restored. + # Normal rounding can at most move each coordinates -/+0.5; in the worst case + # both the start and end circle's centres and radii will be rounded in opposite + # directions, e.g. when they move along a 45 degree diagonal: + # c0 = (1.5, 1.5) ===> (2.0, 2.0) + # r0 = 0.5 ===> 1.0 + # c1 = (0.499, 0.499) ===> (0.0, 0.0) + # r1 = 2.499 ===> 2.0 + # In this example, the relative distance between the circles, calculated + # as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and + # -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both + # x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these + # moves cover twice that distance, which is enough to restore containment. + max_attempts = 2 + for _ in range(max_attempts): + if round_start.concentric(round_end): + # can't move c0 towards c1 (they are the same), so we change the radius + round_start.radius += radius_delta + assert round_start.radius >= 0 + else: + round_start.move(dx, dy) + if inside_before_round == round_start.inside(round_end): + break + else: # likely a bug + raise AssertionError( + f"Rounding circle {start} " + f"{'inside' if inside_before_round else 'outside'} " + f"{end} failed after {max_attempts} attempts!" + ) + + return round_start diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/table_builder.py b/.venv/lib/python3.9/site-packages/fontTools/colorLib/table_builder.py new file mode 100644 index 00000000..763115b9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/colorLib/table_builder.py @@ -0,0 +1,225 @@ +""" +colorLib.table_builder: Generic helper for filling in BaseTable derivatives from tuples and maps and such. + +""" + +import collections +import enum +from fontTools.ttLib.tables.otBase import ( + BaseTable, + FormatSwitchingBaseTable, + UInt8FormatSwitchingBaseTable, +) +from fontTools.ttLib.tables.otConverters import ( + ComputedInt, + SimpleValue, + Struct, + Short, + UInt8, + UShort, + IntValue, + FloatValue, + OptionalValue, +) +from fontTools.misc.roundTools import otRound + + +class BuildCallback(enum.Enum): + """Keyed on (BEFORE_BUILD, class[, Format if available]). + Receives (dest, source). + Should return (dest, source), which can be new objects. + """ + + BEFORE_BUILD = enum.auto() + + """Keyed on (AFTER_BUILD, class[, Format if available]). + Receives (dest). + Should return dest, which can be a new object. + """ + AFTER_BUILD = enum.auto() + + """Keyed on (CREATE_DEFAULT, class[, Format if available]). + Receives no arguments. + Should return a new instance of class. + """ + CREATE_DEFAULT = enum.auto() + + +def _assignable(convertersByName): + return {k: v for k, v in convertersByName.items() if not isinstance(v, ComputedInt)} + + +def _isNonStrSequence(value): + return isinstance(value, collections.abc.Sequence) and not isinstance(value, str) + + +def _split_format(cls, source): + if _isNonStrSequence(source): + assert len(source) > 0, f"{cls} needs at least format from {source}" + fmt, remainder = source[0], source[1:] + elif isinstance(source, collections.abc.Mapping): + assert "Format" in source, f"{cls} needs at least Format from {source}" + remainder = source.copy() + fmt = remainder.pop("Format") + else: + raise ValueError(f"Not sure how to populate {cls} from {source}") + + assert isinstance( + fmt, collections.abc.Hashable + ), f"{cls} Format is not hashable: {fmt!r}" + assert ( + fmt in cls.convertersByName + ), f"{cls} invalid Format: {fmt!r}" + + return fmt, remainder + + +class TableBuilder: + """ + Helps to populate things derived from BaseTable from maps, tuples, etc. + + A table of lifecycle callbacks may be provided to add logic beyond what is possible + based on otData info for the target class. See BuildCallbacks. + """ + + def __init__(self, callbackTable=None): + if callbackTable is None: + callbackTable = {} + self._callbackTable = callbackTable + + def _convert(self, dest, field, converter, value): + enumClass = getattr(converter, "enumClass", None) + + if enumClass: + if isinstance(value, enumClass): + pass + elif isinstance(value, str): + try: + value = getattr(enumClass, value.upper()) + except AttributeError: + raise ValueError(f"{value} is not a valid {enumClass}") + else: + value = enumClass(value) + + elif isinstance(converter, IntValue): + value = otRound(value) + elif isinstance(converter, FloatValue): + value = float(value) + + elif isinstance(converter, Struct): + if converter.repeat: + if _isNonStrSequence(value): + value = [self.build(converter.tableClass, v) for v in value] + else: + value = [self.build(converter.tableClass, value)] + setattr(dest, converter.repeat, len(value)) + else: + value = self.build(converter.tableClass, value) + elif callable(converter): + value = converter(value) + + setattr(dest, field, value) + + def build(self, cls, source): + assert issubclass(cls, BaseTable) + + if isinstance(source, cls): + return source + + callbackKey = (cls,) + fmt = None + if issubclass(cls, FormatSwitchingBaseTable): + fmt, source = _split_format(cls, source) + callbackKey = (cls, fmt) + + dest = self._callbackTable.get( + (BuildCallback.CREATE_DEFAULT,) + callbackKey, lambda: cls() + )() + assert isinstance(dest, cls) + + convByName = _assignable(cls.convertersByName) + skippedFields = set() + + # For format switchers we need to resolve converters based on format + if issubclass(cls, FormatSwitchingBaseTable): + dest.Format = fmt + convByName = _assignable(convByName[dest.Format]) + skippedFields.add("Format") + + # Convert sequence => mapping so before thunk only has to handle one format + if _isNonStrSequence(source): + # Sequence (typically list or tuple) assumed to match fields in declaration order + assert len(source) <= len( + convByName + ), f"Sequence of {len(source)} too long for {cls}; expected <= {len(convByName)} values" + source = dict(zip(convByName.keys(), source)) + + dest, source = self._callbackTable.get( + (BuildCallback.BEFORE_BUILD,) + callbackKey, lambda d, s: (d, s) + )(dest, source) + + if isinstance(source, collections.abc.Mapping): + for field, value in source.items(): + if field in skippedFields: + continue + converter = convByName.get(field, None) + if not converter: + raise ValueError( + f"Unrecognized field {field} for {cls}; expected one of {sorted(convByName.keys())}" + ) + self._convert(dest, field, converter, value) + else: + # let's try as a 1-tuple + dest = self.build(cls, (source,)) + + for field, conv in convByName.items(): + if not hasattr(dest, field) and isinstance(conv, OptionalValue): + setattr(dest, field, conv.DEFAULT) + + dest = self._callbackTable.get( + (BuildCallback.AFTER_BUILD,) + callbackKey, lambda d: d + )(dest) + + return dest + + +class TableUnbuilder: + def __init__(self, callbackTable=None): + if callbackTable is None: + callbackTable = {} + self._callbackTable = callbackTable + + def unbuild(self, table): + assert isinstance(table, BaseTable) + + source = {} + + callbackKey = (type(table),) + if isinstance(table, FormatSwitchingBaseTable): + source["Format"] = int(table.Format) + callbackKey += (table.Format,) + + for converter in table.getConverters(): + if isinstance(converter, ComputedInt): + continue + value = getattr(table, converter.name) + + enumClass = getattr(converter, "enumClass", None) + if enumClass: + source[converter.name] = value.name.lower() + elif isinstance(converter, Struct): + if converter.repeat: + source[converter.name] = [self.unbuild(v) for v in value] + else: + source[converter.name] = self.unbuild(value) + elif isinstance(converter, SimpleValue): + # "simple" values (e.g. int, float, str) need no further un-building + source[converter.name] = value + else: + raise NotImplementedError( + "Don't know how unbuild {value!r} with {converter!r}" + ) + + source = self._callbackTable.get(callbackKey, lambda s: s)(source) + + return source diff --git a/.venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py b/.venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py new file mode 100644 index 00000000..fa229b09 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py @@ -0,0 +1,82 @@ +from fontTools.ttLib.tables import otTables as ot +from .table_builder import TableUnbuilder + + +def unbuildColrV1(layerList, baseGlyphList): + layers = [] + if layerList: + layers = layerList.Paint + unbuilder = LayerListUnbuilder(layers) + return { + rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint) + for rec in baseGlyphList.BaseGlyphPaintRecord + } + + +def _flatten(lst): + for el in lst: + if isinstance(el, list): + yield from _flatten(el) + else: + yield el + + +class LayerListUnbuilder: + def __init__(self, layers): + self.layers = layers + + callbacks = { + ( + ot.Paint, + ot.PaintFormat.PaintColrLayers, + ): self._unbuildPaintColrLayers, + } + self.tableUnbuilder = TableUnbuilder(callbacks) + + def unbuildPaint(self, paint): + assert isinstance(paint, ot.Paint) + return self.tableUnbuilder.unbuild(paint) + + def _unbuildPaintColrLayers(self, source): + assert source["Format"] == ot.PaintFormat.PaintColrLayers + + layers = list( + _flatten( + [ + self.unbuildPaint(childPaint) + for childPaint in self.layers[ + source["FirstLayerIndex"] : source["FirstLayerIndex"] + + source["NumLayers"] + ] + ] + ) + ) + + if len(layers) == 1: + return layers[0] + + return {"Format": source["Format"], "Layers": layers} + + +if __name__ == "__main__": + from pprint import pprint + import sys + from fontTools.ttLib import TTFont + + try: + fontfile = sys.argv[1] + except IndexError: + sys.exit("usage: fonttools colorLib.unbuilder FONTFILE") + + font = TTFont(fontfile) + colr = font["COLR"] + if colr.version < 1: + sys.exit(f"error: No COLR table version=1 found in {fontfile}") + + colorGlyphs = unbuildColrV1( + colr.table.LayerList, + colr.table.BaseGlyphList, + ignoreVarIdx=not colr.table.VarStore, + ) + + pprint(colorGlyphs) diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__init__.py new file mode 100644 index 00000000..4ae6356e --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .cu2qu import * diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__main__.py b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__main__.py new file mode 100644 index 00000000..084bf8f9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__main__.py @@ -0,0 +1,6 @@ +import sys +from .cli import main + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..1082f263 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/__main__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/__main__.cpython-39.pyc new file mode 100644 index 00000000..37a077f5 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/__main__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/cli.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/cli.cpython-39.pyc new file mode 100644 index 00000000..9b4ab99d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/cli.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/cu2qu.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/cu2qu.cpython-39.pyc new file mode 100644 index 00000000..75249b6d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/cu2qu.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/errors.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/errors.cpython-39.pyc new file mode 100644 index 00000000..eaf22817 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/errors.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/ufo.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/ufo.cpython-39.pyc new file mode 100644 index 00000000..bce2c098 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/__pycache__/ufo.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/cli.py b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/cli.py new file mode 100644 index 00000000..34520fc0 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/cli.py @@ -0,0 +1,183 @@ +import os +import argparse +import logging +import shutil +import multiprocessing as mp +from contextlib import closing +from functools import partial + +import fontTools +from .ufo import font_to_quadratic, fonts_to_quadratic + +ufo_module = None +try: + import ufoLib2 as ufo_module +except ImportError: + try: + import defcon as ufo_module + except ImportError as e: + pass + + +logger = logging.getLogger("fontTools.cu2qu") + + +def _cpu_count(): + try: + return mp.cpu_count() + except NotImplementedError: # pragma: no cover + return 1 + + +def open_ufo(path): + if hasattr(ufo_module.Font, "open"): # ufoLib2 + return ufo_module.Font.open(path) + return ufo_module.Font(path) # defcon + + +def _font_to_quadratic(input_path, output_path=None, **kwargs): + ufo = open_ufo(input_path) + logger.info('Converting curves for %s', input_path) + if font_to_quadratic(ufo, **kwargs): + logger.info("Saving %s", output_path) + if output_path: + ufo.save(output_path) + else: + ufo.save() # save in-place + elif output_path: + _copytree(input_path, output_path) + + +def _samepath(path1, path2): + # TODO on python3+, there's os.path.samefile + path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1))) + path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2))) + return path1 == path2 + + +def _copytree(input_path, output_path): + if _samepath(input_path, output_path): + logger.debug("input and output paths are the same file; skipped copy") + return + if os.path.exists(output_path): + shutil.rmtree(output_path) + shutil.copytree(input_path, output_path) + + +def main(args=None): + """Convert a UFO font from cubic to quadratic curves""" + parser = argparse.ArgumentParser(prog="cu2qu") + parser.add_argument( + "--version", action="version", version=fontTools.__version__) + parser.add_argument( + "infiles", + nargs="+", + metavar="INPUT", + help="one or more input UFO source file(s).") + parser.add_argument("-v", "--verbose", action="count", default=0) + parser.add_argument( + "-e", + "--conversion-error", + type=float, + metavar="ERROR", + default=None, + help="maxiumum approximation error measured in EM (default: 0.001)") + parser.add_argument( + "--keep-direction", + dest="reverse_direction", + action="store_false", + help="do not reverse the contour direction") + + mode_parser = parser.add_mutually_exclusive_group() + mode_parser.add_argument( + "-i", + "--interpolatable", + action="store_true", + help="whether curve conversion should keep interpolation compatibility" + ) + mode_parser.add_argument( + "-j", + "--jobs", + type=int, + nargs="?", + default=1, + const=_cpu_count(), + metavar="N", + help="Convert using N multiple processes (default: %(default)s)") + + output_parser = parser.add_mutually_exclusive_group() + output_parser.add_argument( + "-o", + "--output-file", + default=None, + metavar="OUTPUT", + help=("output filename for the converted UFO. By default fonts are " + "modified in place. This only works with a single input.")) + output_parser.add_argument( + "-d", + "--output-dir", + default=None, + metavar="DIRECTORY", + help="output directory where to save converted UFOs") + + options = parser.parse_args(args) + + if ufo_module is None: + parser.error("Either ufoLib2 or defcon are required to run this script.") + + if not options.verbose: + level = "WARNING" + elif options.verbose == 1: + level = "INFO" + else: + level = "DEBUG" + logging.basicConfig(level=level) + + if len(options.infiles) > 1 and options.output_file: + parser.error("-o/--output-file can't be used with multile inputs") + + if options.output_dir: + output_dir = options.output_dir + if not os.path.exists(output_dir): + os.mkdir(output_dir) + elif not os.path.isdir(output_dir): + parser.error("'%s' is not a directory" % output_dir) + output_paths = [ + os.path.join(output_dir, os.path.basename(p)) + for p in options.infiles + ] + elif options.output_file: + output_paths = [options.output_file] + else: + # save in-place + output_paths = [None] * len(options.infiles) + + kwargs = dict(dump_stats=options.verbose > 0, + max_err_em=options.conversion_error, + reverse_direction=options.reverse_direction) + + if options.interpolatable: + logger.info('Converting curves compatibly') + ufos = [open_ufo(infile) for infile in options.infiles] + if fonts_to_quadratic(ufos, **kwargs): + for ufo, output_path in zip(ufos, output_paths): + logger.info("Saving %s", output_path) + if output_path: + ufo.save(output_path) + else: + ufo.save() + else: + for input_path, output_path in zip(options.infiles, output_paths): + if output_path: + _copytree(input_path, output_path) + else: + jobs = min(len(options.infiles), + options.jobs) if options.jobs > 1 else 1 + if jobs > 1: + func = partial(_font_to_quadratic, **kwargs) + logger.info('Running %d parallel processes', jobs) + with closing(mp.Pool(jobs)) as pool: + pool.starmap(func, zip(options.infiles, output_paths)) + else: + for input_path, output_path in zip(options.infiles, output_paths): + _font_to_quadratic(input_path, output_path, **kwargs) diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/cu2qu.py b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/cu2qu.py new file mode 100644 index 00000000..c9ce93ae --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/cu2qu.py @@ -0,0 +1,496 @@ +#cython: language_level=3 +#distutils: define_macros=CYTHON_TRACE_NOGIL=1 + +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import cython +except ImportError: + # if cython not installed, use mock module with no-op decorators and types + from fontTools.misc import cython + +import math + +from .errors import Error as Cu2QuError, ApproxNotFoundError + + +__all__ = ['curve_to_quadratic', 'curves_to_quadratic'] + +MAX_N = 100 + +NAN = float("NaN") + + +if cython.compiled: + # Yep, I'm compiled. + COMPILED = True +else: + # Just a lowly interpreted script. + COMPILED = False + + +@cython.cfunc +@cython.inline +@cython.returns(cython.double) +@cython.locals(v1=cython.complex, v2=cython.complex) +def dot(v1, v2): + """Return the dot product of two vectors. + + Args: + v1 (complex): First vector. + v2 (complex): Second vector. + + Returns: + double: Dot product. + """ + return (v1 * v2.conjugate()).real + + +@cython.cfunc +@cython.inline +@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +@cython.locals(_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex) +def calc_cubic_points(a, b, c, d): + _1 = d + _2 = (c / 3.0) + d + _3 = (b + c) / 3.0 + _2 + _4 = a + d + c + b + return _1, _2, _3, _4 + + +@cython.cfunc +@cython.inline +@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) +@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +def calc_cubic_parameters(p0, p1, p2, p3): + c = (p1 - p0) * 3.0 + b = (p2 - p1) * 3.0 - c + d = p0 + a = p3 - d - c - b + return a, b, c, d + + +@cython.cfunc +@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) +def split_cubic_into_n_iter(p0, p1, p2, p3, n): + """Split a cubic Bezier into n equal parts. + + Splits the curve into `n` equal parts by curve time. + (t=0..1/n, t=1/n..2/n, ...) + + Args: + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + + Returns: + An iterator yielding the control points (four complex values) of the + subcurves. + """ + # Hand-coded special-cases + if n == 2: + return iter(split_cubic_into_two(p0, p1, p2, p3)) + if n == 3: + return iter(split_cubic_into_three(p0, p1, p2, p3)) + if n == 4: + a, b = split_cubic_into_two(p0, p1, p2, p3) + return iter(split_cubic_into_two(*a) + split_cubic_into_two(*b)) + if n == 6: + a, b = split_cubic_into_two(p0, p1, p2, p3) + return iter(split_cubic_into_three(*a) + split_cubic_into_three(*b)) + + return _split_cubic_into_n_gen(p0,p1,p2,p3,n) + + +@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, n=cython.int) +@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +@cython.locals(dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int) +@cython.locals(a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex) +def _split_cubic_into_n_gen(p0, p1, p2, p3, n): + a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3) + dt = 1 / n + delta_2 = dt * dt + delta_3 = dt * delta_2 + for i in range(n): + t1 = i * dt + t1_2 = t1 * t1 + # calc new a, b, c and d + a1 = a * delta_3 + b1 = (3*a*t1 + b) * delta_2 + c1 = (2*b*t1 + c + 3*a*t1_2) * dt + d1 = a*t1*t1_2 + b*t1_2 + c*t1 + d + yield calc_cubic_points(a1, b1, c1, d1) + + +@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) +@cython.locals(mid=cython.complex, deriv3=cython.complex) +def split_cubic_into_two(p0, p1, p2, p3): + """Split a cubic Bezier into two equal parts. + + Splits the curve into two equal parts at t = 0.5 + + Args: + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + + Returns: + tuple: Two cubic Beziers (each expressed as a tuple of four complex + values). + """ + mid = (p0 + 3 * (p1 + p2) + p3) * .125 + deriv3 = (p3 + p2 - p1 - p0) * .125 + return ((p0, (p0 + p1) * .5, mid - deriv3, mid), + (mid, mid + deriv3, (p2 + p3) * .5, p3)) + + +@cython.locals(p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex, _27=cython.double) +@cython.locals(mid1=cython.complex, deriv1=cython.complex, mid2=cython.complex, deriv2=cython.complex) +def split_cubic_into_three(p0, p1, p2, p3, _27=1/27): + """Split a cubic Bezier into three equal parts. + + Splits the curve into three equal parts at t = 1/3 and t = 2/3 + + Args: + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + + Returns: + tuple: Three cubic Beziers (each expressed as a tuple of four complex + values). + """ + # we define 1/27 as a keyword argument so that it will be evaluated only + # once but still in the scope of this function + mid1 = (8*p0 + 12*p1 + 6*p2 + p3) * _27 + deriv1 = (p3 + 3*p2 - 4*p0) * _27 + mid2 = (p0 + 6*p1 + 12*p2 + 8*p3) * _27 + deriv2 = (4*p3 - 3*p1 - p0) * _27 + return ((p0, (2*p0 + p1) / 3.0, mid1 - deriv1, mid1), + (mid1, mid1 + deriv1, mid2 - deriv2, mid2), + (mid2, mid2 + deriv2, (p2 + 2*p3) / 3.0, p3)) + + +@cython.returns(cython.complex) +@cython.locals(t=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) +@cython.locals(_p1=cython.complex, _p2=cython.complex) +def cubic_approx_control(t, p0, p1, p2, p3): + """Approximate a cubic Bezier using a quadratic one. + + Args: + t (double): Position of control point. + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + + Returns: + complex: Location of candidate control point on quadratic curve. + """ + _p1 = p0 + (p1 - p0) * 1.5 + _p2 = p3 + (p2 - p3) * 1.5 + return _p1 + (_p2 - _p1) * t + + +@cython.returns(cython.complex) +@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex) +@cython.locals(ab=cython.complex, cd=cython.complex, p=cython.complex, h=cython.double) +def calc_intersect(a, b, c, d): + """Calculate the intersection of two lines. + + Args: + a (complex): Start point of first line. + b (complex): End point of first line. + c (complex): Start point of second line. + d (complex): End point of second line. + + Returns: + complex: Location of intersection if one present, ``complex(NaN,NaN)`` + if no intersection was found. + """ + ab = b - a + cd = d - c + p = ab * 1j + try: + h = dot(p, a - c) / dot(p, cd) + except ZeroDivisionError: + return complex(NAN, NAN) + return c + cd * h + + +@cython.cfunc +@cython.returns(cython.int) +@cython.locals(tolerance=cython.double, p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex) +@cython.locals(mid=cython.complex, deriv3=cython.complex) +def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance): + """Check if a cubic Bezier lies within a given distance of the origin. + + "Origin" means *the* origin (0,0), not the start of the curve. Note that no + checks are made on the start and end positions of the curve; this function + only checks the inside of the curve. + + Args: + p0 (complex): Start point of curve. + p1 (complex): First handle of curve. + p2 (complex): Second handle of curve. + p3 (complex): End point of curve. + tolerance (double): Distance from origin. + + Returns: + bool: True if the cubic Bezier ``p`` entirely lies within a distance + ``tolerance`` of the origin, False otherwise. + """ + # First check p2 then p1, as p2 has higher error early on. + if abs(p2) <= tolerance and abs(p1) <= tolerance: + return True + + # Split. + mid = (p0 + 3 * (p1 + p2) + p3) * .125 + if abs(mid) > tolerance: + return False + deriv3 = (p3 + p2 - p1 - p0) * .125 + return (cubic_farthest_fit_inside(p0, (p0+p1)*.5, mid-deriv3, mid, tolerance) and + cubic_farthest_fit_inside(mid, mid+deriv3, (p2+p3)*.5, p3, tolerance)) + + +@cython.cfunc +@cython.locals(tolerance=cython.double, _2_3=cython.double) +@cython.locals(q1=cython.complex, c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex) +def cubic_approx_quadratic(cubic, tolerance, _2_3=2/3): + """Approximate a cubic Bezier with a single quadratic within a given tolerance. + + Args: + cubic (sequence): Four complex numbers representing control points of + the cubic Bezier curve. + tolerance (double): Permitted deviation from the original curve. + + Returns: + Three complex numbers representing control points of the quadratic + curve if it fits within the given tolerance, or ``None`` if no suitable + curve could be calculated. + """ + # we define 2/3 as a keyword argument so that it will be evaluated only + # once but still in the scope of this function + + q1 = calc_intersect(*cubic) + if math.isnan(q1.imag): + return None + c0 = cubic[0] + c3 = cubic[3] + c1 = c0 + (q1 - c0) * _2_3 + c2 = c3 + (q1 - c3) * _2_3 + if not cubic_farthest_fit_inside(0, + c1 - cubic[1], + c2 - cubic[2], + 0, tolerance): + return None + return c0, q1, c3 + + +@cython.cfunc +@cython.locals(n=cython.int, tolerance=cython.double, _2_3=cython.double) +@cython.locals(i=cython.int) +@cython.locals(c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex) +@cython.locals(q0=cython.complex, q1=cython.complex, next_q1=cython.complex, q2=cython.complex, d1=cython.complex) +def cubic_approx_spline(cubic, n, tolerance, _2_3=2/3): + """Approximate a cubic Bezier curve with a spline of n quadratics. + + Args: + cubic (sequence): Four complex numbers representing control points of + the cubic Bezier curve. + n (int): Number of quadratic Bezier curves in the spline. + tolerance (double): Permitted deviation from the original curve. + + Returns: + A list of ``n+2`` complex numbers, representing control points of the + quadratic spline if it fits within the given tolerance, or ``None`` if + no suitable spline could be calculated. + """ + # we define 2/3 as a keyword argument so that it will be evaluated only + # once but still in the scope of this function + + if n == 1: + return cubic_approx_quadratic(cubic, tolerance) + + cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n) + + # calculate the spline of quadratics and check errors at the same time. + next_cubic = next(cubics) + next_q1 = cubic_approx_control(0, *next_cubic) + q2 = cubic[0] + d1 = 0j + spline = [cubic[0], next_q1] + for i in range(1, n+1): + + # Current cubic to convert + c0, c1, c2, c3 = next_cubic + + # Current quadratic approximation of current cubic + q0 = q2 + q1 = next_q1 + if i < n: + next_cubic = next(cubics) + next_q1 = cubic_approx_control(i / (n-1), *next_cubic) + spline.append(next_q1) + q2 = (q1 + next_q1) * .5 + else: + q2 = c3 + + # End-point deltas + d0 = d1 + d1 = q2 - c3 + + if (abs(d1) > tolerance or + not cubic_farthest_fit_inside(d0, + q0 + (q1 - q0) * _2_3 - c1, + q2 + (q1 - q2) * _2_3 - c2, + d1, + tolerance)): + return None + spline.append(cubic[3]) + + return spline + + +@cython.locals(max_err=cython.double) +@cython.locals(n=cython.int) +def curve_to_quadratic(curve, max_err): + """Approximate a cubic Bezier curve with a spline of n quadratics. + + Args: + cubic (sequence): Four 2D tuples representing control points of + the cubic Bezier curve. + max_err (double): Permitted deviation from the original curve. + + Returns: + A list of 2D tuples, representing control points of the quadratic + spline if it fits within the given tolerance, or ``None`` if no + suitable spline could be calculated. + """ + + curve = [complex(*p) for p in curve] + + for n in range(1, MAX_N + 1): + spline = cubic_approx_spline(curve, n, max_err) + if spline is not None: + # done. go home + return [(s.real, s.imag) for s in spline] + + raise ApproxNotFoundError(curve) + + + +@cython.locals(l=cython.int, last_i=cython.int, i=cython.int) +def curves_to_quadratic(curves, max_errors): + """Return quadratic Bezier splines approximating the input cubic Beziers. + + Args: + curves: A sequence of *n* curves, each curve being a sequence of four + 2D tuples. + max_errors: A sequence of *n* floats representing the maximum permissible + deviation from each of the cubic Bezier curves. + + Example:: + + >>> curves_to_quadratic( [ + ... [ (50,50), (100,100), (150,100), (200,50) ], + ... [ (75,50), (120,100), (150,75), (200,60) ] + ... ], [1,1] ) + [[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]] + + The returned splines have "implied oncurve points" suitable for use in + TrueType ``glif`` outlines - i.e. in the first spline returned above, + the first quadratic segment runs from (50,50) to + ( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...). + + Returns: + A list of splines, each spline being a list of 2D tuples. + + Raises: + fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation + can be found for all curves with the given parameters. + """ + + curves = [[complex(*p) for p in curve] for curve in curves] + assert len(max_errors) == len(curves) + + l = len(curves) + splines = [None] * l + last_i = i = 0 + n = 1 + while True: + spline = cubic_approx_spline(curves[i], n, max_errors[i]) + if spline is None: + if n == MAX_N: + break + n += 1 + last_i = i + continue + splines[i] = spline + i = (i + 1) % l + if i == last_i: + # done. go home + return [[(s.real, s.imag) for s in spline] for spline in splines] + + raise ApproxNotFoundError(curves) + + +if __name__ == '__main__': + import random + import timeit + + MAX_ERR = 5 + + def generate_curve(): + return [ + tuple(float(random.randint(0, 2048)) for coord in range(2)) + for point in range(4)] + + def setup_curve_to_quadratic(): + return generate_curve(), MAX_ERR + + def setup_curves_to_quadratic(): + num_curves = 3 + return ( + [generate_curve() for curve in range(num_curves)], + [MAX_ERR] * num_curves) + + def run_benchmark( + benchmark_module, module, function, setup_suffix='', repeat=5, number=1000): + setup_func = 'setup_' + function + if setup_suffix: + print('%s with %s:' % (function, setup_suffix), end='') + setup_func += '_' + setup_suffix + else: + print('%s:' % function, end='') + + def wrapper(function, setup_func): + function = globals()[function] + setup_func = globals()[setup_func] + def wrapped(): + return function(*setup_func()) + return wrapped + results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number) + print('\t%5.1fus' % (min(results) * 1000000. / number)) + + def main(): + run_benchmark('cu2qu.benchmark', 'cu2qu', 'curve_to_quadratic') + run_benchmark('cu2qu.benchmark', 'cu2qu', 'curves_to_quadratic') + + random.seed(1) + main() diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/errors.py b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/errors.py new file mode 100644 index 00000000..74c4c227 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/errors.py @@ -0,0 +1,76 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +class Error(Exception): + """Base Cu2Qu exception class for all other errors.""" + + +class ApproxNotFoundError(Error): + def __init__(self, curve): + message = "no approximation found: %s" % curve + super().__init__(message) + self.curve = curve + + +class UnequalZipLengthsError(Error): + pass + + +class IncompatibleGlyphsError(Error): + def __init__(self, glyphs): + assert len(glyphs) > 1 + self.glyphs = glyphs + names = set(repr(g.name) for g in glyphs) + if len(names) > 1: + self.combined_name = "{%s}" % ", ".join(sorted(names)) + else: + self.combined_name = names.pop() + + def __repr__(self): + return "<%s %s>" % (type(self).__name__, self.combined_name) + + +class IncompatibleSegmentNumberError(IncompatibleGlyphsError): + def __str__(self): + return "Glyphs named %s have different number of segments" % ( + self.combined_name + ) + + +class IncompatibleSegmentTypesError(IncompatibleGlyphsError): + def __init__(self, glyphs, segments): + IncompatibleGlyphsError.__init__(self, glyphs) + self.segments = segments + + def __str__(self): + lines = [] + ndigits = len(str(max(self.segments))) + for i, tags in sorted(self.segments.items()): + lines.append( + "%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags)) + ) + return "Glyphs named %s have incompatible segment types:\n %s" % ( + self.combined_name, + "\n ".join(lines), + ) + + +class IncompatibleFontsError(Error): + def __init__(self, glyph_errors): + self.glyph_errors = glyph_errors + + def __str__(self): + return "fonts contains incompatible glyphs: %s" % ( + ", ".join(repr(g) for g in sorted(self.glyph_errors.keys())) + ) diff --git a/.venv/lib/python3.9/site-packages/fontTools/cu2qu/ufo.py b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/ufo.py new file mode 100644 index 00000000..447de7bb --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/cu2qu/ufo.py @@ -0,0 +1,324 @@ +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Converts cubic bezier curves to quadratic splines. + +Conversion is performed such that the quadratic splines keep the same end-curve +tangents as the original cubics. The approach is iterative, increasing the +number of segments for a spline until the error gets below a bound. + +Respective curves from multiple fonts will be converted at once to ensure that +the resulting splines are interpolation-compatible. +""" + +import logging +from fontTools.pens.basePen import AbstractPen +from fontTools.pens.pointPen import PointToSegmentPen +from fontTools.pens.reverseContourPen import ReverseContourPen + +from . import curves_to_quadratic +from .errors import ( + UnequalZipLengthsError, IncompatibleSegmentNumberError, + IncompatibleSegmentTypesError, IncompatibleGlyphsError, + IncompatibleFontsError) + + +__all__ = ['fonts_to_quadratic', 'font_to_quadratic'] + +# The default approximation error below is a relative value (1/1000 of the EM square). +# Later on, we convert it to absolute font units by multiplying it by a font's UPEM +# (see fonts_to_quadratic). +DEFAULT_MAX_ERR = 0.001 +CURVE_TYPE_LIB_KEY = "com.github.googlei18n.cu2qu.curve_type" + +logger = logging.getLogger(__name__) + + +_zip = zip +def zip(*args): + """Ensure each argument to zip has the same length. Also make sure a list is + returned for python 2/3 compatibility. + """ + + if len(set(len(a) for a in args)) != 1: + raise UnequalZipLengthsError(*args) + return list(_zip(*args)) + + +class GetSegmentsPen(AbstractPen): + """Pen to collect segments into lists of points for conversion. + + Curves always include their initial on-curve point, so some points are + duplicated between segments. + """ + + def __init__(self): + self._last_pt = None + self.segments = [] + + def _add_segment(self, tag, *args): + if tag in ['move', 'line', 'qcurve', 'curve']: + self._last_pt = args[-1] + self.segments.append((tag, args)) + + def moveTo(self, pt): + self._add_segment('move', pt) + + def lineTo(self, pt): + self._add_segment('line', pt) + + def qCurveTo(self, *points): + self._add_segment('qcurve', self._last_pt, *points) + + def curveTo(self, *points): + self._add_segment('curve', self._last_pt, *points) + + def closePath(self): + self._add_segment('close') + + def endPath(self): + self._add_segment('end') + + def addComponent(self, glyphName, transformation): + pass + + +def _get_segments(glyph): + """Get a glyph's segments as extracted by GetSegmentsPen.""" + + pen = GetSegmentsPen() + # glyph.draw(pen) + # We can't simply draw the glyph with the pen, but we must initialize the + # PointToSegmentPen explicitly with outputImpliedClosingLine=True. + # By default PointToSegmentPen does not outputImpliedClosingLine -- unless + # last and first point on closed contour are duplicated. Because we are + # converting multiple glyphs at the same time, we want to make sure + # this function returns the same number of segments, whether or not + # the last and first point overlap. + # https://github.com/googlefonts/fontmake/issues/572 + # https://github.com/fonttools/fonttools/pull/1720 + pointPen = PointToSegmentPen(pen, outputImpliedClosingLine=True) + glyph.drawPoints(pointPen) + return pen.segments + + +def _set_segments(glyph, segments, reverse_direction): + """Draw segments as extracted by GetSegmentsPen back to a glyph.""" + + glyph.clearContours() + pen = glyph.getPen() + if reverse_direction: + pen = ReverseContourPen(pen) + for tag, args in segments: + if tag == 'move': + pen.moveTo(*args) + elif tag == 'line': + pen.lineTo(*args) + elif tag == 'curve': + pen.curveTo(*args[1:]) + elif tag == 'qcurve': + pen.qCurveTo(*args[1:]) + elif tag == 'close': + pen.closePath() + elif tag == 'end': + pen.endPath() + else: + raise AssertionError('Unhandled segment type "%s"' % tag) + + +def _segments_to_quadratic(segments, max_err, stats): + """Return quadratic approximations of cubic segments.""" + + assert all(s[0] == 'curve' for s in segments), 'Non-cubic given to convert' + + new_points = curves_to_quadratic([s[1] for s in segments], max_err) + n = len(new_points[0]) + assert all(len(s) == n for s in new_points[1:]), 'Converted incompatibly' + + spline_length = str(n - 2) + stats[spline_length] = stats.get(spline_length, 0) + 1 + + return [('qcurve', p) for p in new_points] + + +def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats): + """Do the actual conversion of a set of compatible glyphs, after arguments + have been set up. + + Return True if the glyphs were modified, else return False. + """ + + try: + segments_by_location = zip(*[_get_segments(g) for g in glyphs]) + except UnequalZipLengthsError: + raise IncompatibleSegmentNumberError(glyphs) + if not any(segments_by_location): + return False + + # always modify input glyphs if reverse_direction is True + glyphs_modified = reverse_direction + + new_segments_by_location = [] + incompatible = {} + for i, segments in enumerate(segments_by_location): + tag = segments[0][0] + if not all(s[0] == tag for s in segments[1:]): + incompatible[i] = [s[0] for s in segments] + elif tag == 'curve': + segments = _segments_to_quadratic(segments, max_err, stats) + glyphs_modified = True + new_segments_by_location.append(segments) + + if glyphs_modified: + new_segments_by_glyph = zip(*new_segments_by_location) + for glyph, new_segments in zip(glyphs, new_segments_by_glyph): + _set_segments(glyph, new_segments, reverse_direction) + + if incompatible: + raise IncompatibleSegmentTypesError(glyphs, segments=incompatible) + return glyphs_modified + + +def glyphs_to_quadratic( + glyphs, max_err=None, reverse_direction=False, stats=None): + """Convert the curves of a set of compatible of glyphs to quadratic. + + All curves will be converted to quadratic at once, ensuring interpolation + compatibility. If this is not required, calling glyphs_to_quadratic with one + glyph at a time may yield slightly more optimized results. + + Return True if glyphs were modified, else return False. + + Raises IncompatibleGlyphsError if glyphs have non-interpolatable outlines. + """ + if stats is None: + stats = {} + + if not max_err: + # assume 1000 is the default UPEM + max_err = DEFAULT_MAX_ERR * 1000 + + if isinstance(max_err, (list, tuple)): + max_errors = max_err + else: + max_errors = [max_err] * len(glyphs) + assert len(max_errors) == len(glyphs) + + return _glyphs_to_quadratic(glyphs, max_errors, reverse_direction, stats) + + +def fonts_to_quadratic( + fonts, max_err_em=None, max_err=None, reverse_direction=False, + stats=None, dump_stats=False, remember_curve_type=True): + """Convert the curves of a collection of fonts to quadratic. + + All curves will be converted to quadratic at once, ensuring interpolation + compatibility. If this is not required, calling fonts_to_quadratic with one + font at a time may yield slightly more optimized results. + + Return True if fonts were modified, else return False. + + By default, cu2qu stores the curve type in the fonts' lib, under a private + key "com.github.googlei18n.cu2qu.curve_type", and will not try to convert + them again if the curve type is already set to "quadratic". + Setting 'remember_curve_type' to False disables this optimization. + + Raises IncompatibleFontsError if same-named glyphs from different fonts + have non-interpolatable outlines. + """ + + if remember_curve_type: + curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts} + if len(curve_types) == 1: + curve_type = next(iter(curve_types)) + if curve_type == "quadratic": + logger.info("Curves already converted to quadratic") + return False + elif curve_type == "cubic": + pass # keep converting + else: + raise NotImplementedError(curve_type) + elif len(curve_types) > 1: + # going to crash later if they do differ + logger.warning("fonts may contain different curve types") + + if stats is None: + stats = {} + + if max_err_em and max_err: + raise TypeError('Only one of max_err and max_err_em can be specified.') + if not (max_err_em or max_err): + max_err_em = DEFAULT_MAX_ERR + + if isinstance(max_err, (list, tuple)): + assert len(max_err) == len(fonts) + max_errors = max_err + elif max_err: + max_errors = [max_err] * len(fonts) + + if isinstance(max_err_em, (list, tuple)): + assert len(fonts) == len(max_err_em) + max_errors = [f.info.unitsPerEm * e + for f, e in zip(fonts, max_err_em)] + elif max_err_em: + max_errors = [f.info.unitsPerEm * max_err_em for f in fonts] + + modified = False + glyph_errors = {} + for name in set().union(*(f.keys() for f in fonts)): + glyphs = [] + cur_max_errors = [] + for font, error in zip(fonts, max_errors): + if name in font: + glyphs.append(font[name]) + cur_max_errors.append(error) + try: + modified |= _glyphs_to_quadratic( + glyphs, cur_max_errors, reverse_direction, stats) + except IncompatibleGlyphsError as exc: + logger.error(exc) + glyph_errors[name] = exc + + if glyph_errors: + raise IncompatibleFontsError(glyph_errors) + + if modified and dump_stats: + spline_lengths = sorted(stats.keys()) + logger.info('New spline lengths: %s' % (', '.join( + '%s: %d' % (l, stats[l]) for l in spline_lengths))) + + if remember_curve_type: + for font in fonts: + curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic") + if curve_type != "quadratic": + font.lib[CURVE_TYPE_LIB_KEY] = "quadratic" + modified = True + return modified + + +def glyph_to_quadratic(glyph, **kwargs): + """Convenience wrapper around glyphs_to_quadratic, for just one glyph. + Return True if the glyph was modified, else return False. + """ + + return glyphs_to_quadratic([glyph], **kwargs) + + +def font_to_quadratic(font, **kwargs): + """Convenience wrapper around fonts_to_quadratic, for just one font. + Return True if the font was modified, else return False. + """ + + return fonts_to_quadratic([font], **kwargs) diff --git a/.venv/lib/python3.9/site-packages/fontTools/designspaceLib/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/designspaceLib/__init__.py new file mode 100644 index 00000000..b447a1b7 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/designspaceLib/__init__.py @@ -0,0 +1,1407 @@ +# -*- coding: utf-8 -*- + +from fontTools.misc.loggingTools import LogMixin +from fontTools.misc.textTools import tobytes, tostr +import collections +from io import BytesIO, StringIO +import os +import posixpath +from fontTools.misc import etree as ET +from fontTools.misc import plistlib + +""" + designSpaceDocument + + - read and write designspace files +""" + +__all__ = [ + 'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor', + 'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader', + 'BaseDocWriter' +] + +# ElementTree allows to find namespace-prefixed elements, but not attributes +# so we have to do it ourselves for 'xml:lang' +XML_NS = "{http://www.w3.org/XML/1998/namespace}" +XML_LANG = XML_NS + "lang" + + +def posix(path): + """Normalize paths using forward slash to work also on Windows.""" + new_path = posixpath.join(*path.split(os.path.sep)) + if path.startswith('/'): + # The above transformation loses absolute paths + new_path = '/' + new_path + elif path.startswith(r'\\'): + # The above transformation loses leading slashes of UNC path mounts + new_path = '//' + new_path + return new_path + + +def posixpath_property(private_name): + def getter(self): + # Normal getter + return getattr(self, private_name) + + def setter(self, value): + # The setter rewrites paths using forward slashes + if value is not None: + value = posix(value) + setattr(self, private_name, value) + + return property(getter, setter) + + +class DesignSpaceDocumentError(Exception): + def __init__(self, msg, obj=None): + self.msg = msg + self.obj = obj + + def __str__(self): + return str(self.msg) + ( + ": %r" % self.obj if self.obj is not None else "") + + +class AsDictMixin(object): + + def asdict(self): + d = {} + for attr, value in self.__dict__.items(): + if attr.startswith("_"): + continue + if hasattr(value, "asdict"): + value = value.asdict() + elif isinstance(value, list): + value = [ + v.asdict() if hasattr(v, "asdict") else v for v in value + ] + d[attr] = value + return d + + +class SimpleDescriptor(AsDictMixin): + """ Containers for a bunch of attributes""" + + # XXX this is ugly. The 'print' is inappropriate here, and instead of + # assert, it should simply return True/False + def compare(self, other): + # test if this object contains the same data as the other + for attr in self._attrs: + try: + assert(getattr(self, attr) == getattr(other, attr)) + except AssertionError: + print("failed attribute", attr, getattr(self, attr), "!=", getattr(other, attr)) + + +class SourceDescriptor(SimpleDescriptor): + """Simple container for data related to the source""" + flavor = "source" + _attrs = ['filename', 'path', 'name', 'layerName', + 'location', 'copyLib', + 'copyGroups', 'copyFeatures', + 'muteKerning', 'muteInfo', + 'mutedGlyphNames', + 'familyName', 'styleName'] + + def __init__( + self, + *, + filename=None, + path=None, + font=None, + name=None, + location=None, + layerName=None, + familyName=None, + styleName=None, + copyLib=False, + copyInfo=False, + copyGroups=False, + copyFeatures=False, + muteKerning=False, + muteInfo=False, + mutedGlyphNames=None, + ): + self.filename = filename + """The original path as found in the document.""" + + self.path = path + """The absolute path, calculated from filename.""" + + self.font = font + """Any Python object. Optional. Points to a representation of this + source font that is loaded in memory, as a Python object (e.g. a + ``defcon.Font`` or a ``fontTools.ttFont.TTFont``). + + The default document reader will not fill-in this attribute, and the + default writer will not use this attribute. It is up to the user of + ``designspaceLib`` to either load the resource identified by + ``filename`` and store it in this field, or write the contents of + this field to the disk and make ```filename`` point to that. + """ + + self.name = name + self.location = location + self.layerName = layerName + self.familyName = familyName + self.styleName = styleName + + self.copyLib = copyLib + self.copyInfo = copyInfo + self.copyGroups = copyGroups + self.copyFeatures = copyFeatures + self.muteKerning = muteKerning + self.muteInfo = muteInfo + self.mutedGlyphNames = mutedGlyphNames or [] + + path = posixpath_property("_path") + filename = posixpath_property("_filename") + + +class RuleDescriptor(SimpleDescriptor): + """ + + + + + + + + + + + + """ + _attrs = ['name', 'conditionSets', 'subs'] # what do we need here + + def __init__(self, *, name=None, conditionSets=None, subs=None): + self.name = name + # list of lists of dict(name='aaaa', minimum=0, maximum=1000) + self.conditionSets = conditionSets or [] + # list of substitutions stored as tuples of glyphnames ("a", "a.alt") + self.subs = subs or [] + + +def evaluateRule(rule, location): + """ Return True if any of the rule's conditionsets matches the given location.""" + return any(evaluateConditions(c, location) for c in rule.conditionSets) + + +def evaluateConditions(conditions, location): + """ Return True if all the conditions matches the given location. + If a condition has no minimum, check for < maximum. + If a condition has no maximum, check for > minimum. + """ + for cd in conditions: + value = location[cd['name']] + if cd.get('minimum') is None: + if value > cd['maximum']: + return False + elif cd.get('maximum') is None: + if cd['minimum'] > value: + return False + elif not cd['minimum'] <= value <= cd['maximum']: + return False + return True + + +def processRules(rules, location, glyphNames): + """ Apply these rules at this location to these glyphnames + - rule order matters + """ + newNames = [] + for rule in rules: + if evaluateRule(rule, location): + for name in glyphNames: + swap = False + for a, b in rule.subs: + if name == a: + swap = True + break + if swap: + newNames.append(b) + else: + newNames.append(name) + glyphNames = newNames + newNames = [] + return glyphNames + + +class InstanceDescriptor(SimpleDescriptor): + """Simple container for data related to the instance""" + flavor = "instance" + _defaultLanguageCode = "en" + _attrs = ['path', + 'name', + 'location', + 'familyName', + 'styleName', + 'postScriptFontName', + 'styleMapFamilyName', + 'styleMapStyleName', + 'kerning', + 'info', + 'lib'] + + def __init__( + self, + *, + filename=None, + path=None, + font=None, + name=None, + location=None, + familyName=None, + styleName=None, + postScriptFontName=None, + styleMapFamilyName=None, + styleMapStyleName=None, + localisedFamilyName=None, + localisedStyleName=None, + localisedStyleMapFamilyName=None, + localisedStyleMapStyleName=None, + glyphs=None, + kerning=True, + info=True, + lib=None, + ): + # the original path as found in the document + self.filename = filename + # the absolute path, calculated from filename + self.path = path + # Same as in SourceDescriptor. + self.font = font + self.name = name + self.location = location + self.familyName = familyName + self.styleName = styleName + self.postScriptFontName = postScriptFontName + self.styleMapFamilyName = styleMapFamilyName + self.styleMapStyleName = styleMapStyleName + self.localisedFamilyName = localisedFamilyName or {} + self.localisedStyleName = localisedStyleName or {} + self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {} + self.localisedStyleMapStyleName = localisedStyleMapStyleName or {} + self.glyphs = glyphs or {} + self.kerning = kerning + self.info = info + + self.lib = lib or {} + """Custom data associated with this instance.""" + + path = posixpath_property("_path") + filename = posixpath_property("_filename") + + def setStyleName(self, styleName, languageCode="en"): + self.localisedStyleName[languageCode] = tostr(styleName) + + def getStyleName(self, languageCode="en"): + return self.localisedStyleName.get(languageCode) + + def setFamilyName(self, familyName, languageCode="en"): + self.localisedFamilyName[languageCode] = tostr(familyName) + + def getFamilyName(self, languageCode="en"): + return self.localisedFamilyName.get(languageCode) + + def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"): + self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName) + + def getStyleMapStyleName(self, languageCode="en"): + return self.localisedStyleMapStyleName.get(languageCode) + + def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"): + self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName) + + def getStyleMapFamilyName(self, languageCode="en"): + return self.localisedStyleMapFamilyName.get(languageCode) + + +def tagForAxisName(name): + # try to find or make a tag name for this axis name + names = { + 'weight': ('wght', dict(en = 'Weight')), + 'width': ('wdth', dict(en = 'Width')), + 'optical': ('opsz', dict(en = 'Optical Size')), + 'slant': ('slnt', dict(en = 'Slant')), + 'italic': ('ital', dict(en = 'Italic')), + } + if name.lower() in names: + return names[name.lower()] + if len(name) < 4: + tag = name + "*" * (4 - len(name)) + else: + tag = name[:4] + return tag, dict(en=name) + + +class AxisDescriptor(SimpleDescriptor): + """ Simple container for the axis data + Add more localisations? + """ + flavor = "axis" + _attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map'] + + def __init__( + self, + *, + tag=None, + name=None, + labelNames=None, + minimum=None, + default=None, + maximum=None, + hidden=False, + map=None, + ): + # opentype tag for this axis + self.tag = tag + # name of the axis used in locations + self.name = name + # names for UI purposes, if this is not a standard axis, + self.labelNames = labelNames or {} + self.minimum = minimum + self.maximum = maximum + self.default = default + self.hidden = hidden + self.map = map or [] + + def serialize(self): + # output to a dict, used in testing + return dict( + tag=self.tag, + name=self.name, + labelNames=self.labelNames, + maximum=self.maximum, + minimum=self.minimum, + default=self.default, + hidden=self.hidden, + map=self.map, + ) + + def map_forward(self, v): + from fontTools.varLib.models import piecewiseLinearMap + + if not self.map: + return v + return piecewiseLinearMap(v, {k: v for k, v in self.map}) + + def map_backward(self, v): + from fontTools.varLib.models import piecewiseLinearMap + + if not self.map: + return v + return piecewiseLinearMap(v, {v: k for k, v in self.map}) + + +class BaseDocWriter(object): + _whiteSpace = " " + ruleDescriptorClass = RuleDescriptor + axisDescriptorClass = AxisDescriptor + sourceDescriptorClass = SourceDescriptor + instanceDescriptorClass = InstanceDescriptor + + @classmethod + def getAxisDecriptor(cls): + return cls.axisDescriptorClass() + + @classmethod + def getSourceDescriptor(cls): + return cls.sourceDescriptorClass() + + @classmethod + def getInstanceDescriptor(cls): + return cls.instanceDescriptorClass() + + @classmethod + def getRuleDescriptor(cls): + return cls.ruleDescriptorClass() + + def __init__(self, documentPath, documentObject): + self.path = documentPath + self.documentObject = documentObject + self.documentVersion = "4.1" + self.root = ET.Element("designspace") + self.root.attrib['format'] = self.documentVersion + self._axes = [] # for use by the writer only + self._rules = [] # for use by the writer only + + def write(self, pretty=True, encoding="UTF-8", xml_declaration=True): + if self.documentObject.axes: + self.root.append(ET.Element("axes")) + for axisObject in self.documentObject.axes: + self._addAxis(axisObject) + + if self.documentObject.rules: + if getattr(self.documentObject, "rulesProcessingLast", False): + attributes = {"processing": "last"} + else: + attributes = {} + self.root.append(ET.Element("rules", attributes)) + for ruleObject in self.documentObject.rules: + self._addRule(ruleObject) + + if self.documentObject.sources: + self.root.append(ET.Element("sources")) + for sourceObject in self.documentObject.sources: + self._addSource(sourceObject) + + if self.documentObject.instances: + self.root.append(ET.Element("instances")) + for instanceObject in self.documentObject.instances: + self._addInstance(instanceObject) + + if self.documentObject.lib: + self._addLib(self.documentObject.lib) + + tree = ET.ElementTree(self.root) + tree.write( + self.path, + encoding=encoding, + method='xml', + xml_declaration=xml_declaration, + pretty_print=pretty, + ) + + def _makeLocationElement(self, locationObject, name=None): + """ Convert Location dict to a locationElement.""" + locElement = ET.Element("location") + if name is not None: + locElement.attrib['name'] = name + validatedLocation = self.documentObject.newDefaultLocation() + for axisName, axisValue in locationObject.items(): + if axisName in validatedLocation: + # only accept values we know + validatedLocation[axisName] = axisValue + for dimensionName, dimensionValue in validatedLocation.items(): + dimElement = ET.Element('dimension') + dimElement.attrib['name'] = dimensionName + if type(dimensionValue) == tuple: + dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0]) + dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1]) + else: + dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue) + locElement.append(dimElement) + return locElement, validatedLocation + + def intOrFloat(self, num): + if int(num) == num: + return "%d" % num + return "%f" % num + + def _addRule(self, ruleObject): + # if none of the conditions have minimum or maximum values, do not add the rule. + self._rules.append(ruleObject) + ruleElement = ET.Element('rule') + if ruleObject.name is not None: + ruleElement.attrib['name'] = ruleObject.name + for conditions in ruleObject.conditionSets: + conditionsetElement = ET.Element('conditionset') + for cond in conditions: + if cond.get('minimum') is None and cond.get('maximum') is None: + # neither is defined, don't add this condition + continue + conditionElement = ET.Element('condition') + conditionElement.attrib['name'] = cond.get('name') + if cond.get('minimum') is not None: + conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum')) + if cond.get('maximum') is not None: + conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum')) + conditionsetElement.append(conditionElement) + if len(conditionsetElement): + ruleElement.append(conditionsetElement) + for sub in ruleObject.subs: + subElement = ET.Element('sub') + subElement.attrib['name'] = sub[0] + subElement.attrib['with'] = sub[1] + ruleElement.append(subElement) + if len(ruleElement): + self.root.findall('.rules')[0].append(ruleElement) + + def _addAxis(self, axisObject): + self._axes.append(axisObject) + axisElement = ET.Element('axis') + axisElement.attrib['tag'] = axisObject.tag + axisElement.attrib['name'] = axisObject.name + axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum) + axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum) + axisElement.attrib['default'] = self.intOrFloat(axisObject.default) + if axisObject.hidden: + axisElement.attrib['hidden'] = "1" + for languageCode, labelName in sorted(axisObject.labelNames.items()): + languageElement = ET.Element('labelname') + languageElement.attrib[XML_LANG] = languageCode + languageElement.text = labelName + axisElement.append(languageElement) + if axisObject.map: + for inputValue, outputValue in axisObject.map: + mapElement = ET.Element('map') + mapElement.attrib['input'] = self.intOrFloat(inputValue) + mapElement.attrib['output'] = self.intOrFloat(outputValue) + axisElement.append(mapElement) + self.root.findall('.axes')[0].append(axisElement) + + def _addInstance(self, instanceObject): + instanceElement = ET.Element('instance') + if instanceObject.name is not None: + instanceElement.attrib['name'] = instanceObject.name + if instanceObject.familyName is not None: + instanceElement.attrib['familyname'] = instanceObject.familyName + if instanceObject.styleName is not None: + instanceElement.attrib['stylename'] = instanceObject.styleName + # add localisations + if instanceObject.localisedStyleName: + languageCodes = list(instanceObject.localisedStyleName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue # already stored in the element attribute + localisedStyleNameElement = ET.Element('stylename') + localisedStyleNameElement.attrib[XML_LANG] = code + localisedStyleNameElement.text = instanceObject.getStyleName(code) + instanceElement.append(localisedStyleNameElement) + if instanceObject.localisedFamilyName: + languageCodes = list(instanceObject.localisedFamilyName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue # already stored in the element attribute + localisedFamilyNameElement = ET.Element('familyname') + localisedFamilyNameElement.attrib[XML_LANG] = code + localisedFamilyNameElement.text = instanceObject.getFamilyName(code) + instanceElement.append(localisedFamilyNameElement) + if instanceObject.localisedStyleMapStyleName: + languageCodes = list(instanceObject.localisedStyleMapStyleName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue + localisedStyleMapStyleNameElement = ET.Element('stylemapstylename') + localisedStyleMapStyleNameElement.attrib[XML_LANG] = code + localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code) + instanceElement.append(localisedStyleMapStyleNameElement) + if instanceObject.localisedStyleMapFamilyName: + languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys()) + languageCodes.sort() + for code in languageCodes: + if code == "en": + continue + localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname') + localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code + localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code) + instanceElement.append(localisedStyleMapFamilyNameElement) + + if instanceObject.location is not None: + locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location) + instanceElement.append(locationElement) + if instanceObject.filename is not None: + instanceElement.attrib['filename'] = instanceObject.filename + if instanceObject.postScriptFontName is not None: + instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName + if instanceObject.styleMapFamilyName is not None: + instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName + if instanceObject.styleMapStyleName is not None: + instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName + if instanceObject.glyphs: + if instanceElement.findall('.glyphs') == []: + glyphsElement = ET.Element('glyphs') + instanceElement.append(glyphsElement) + glyphsElement = instanceElement.findall('.glyphs')[0] + for glyphName, data in sorted(instanceObject.glyphs.items()): + glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data) + glyphsElement.append(glyphElement) + if instanceObject.kerning: + kerningElement = ET.Element('kerning') + instanceElement.append(kerningElement) + if instanceObject.info: + infoElement = ET.Element('info') + instanceElement.append(infoElement) + if instanceObject.lib: + libElement = ET.Element('lib') + libElement.append(plistlib.totree(instanceObject.lib, indent_level=4)) + instanceElement.append(libElement) + self.root.findall('.instances')[0].append(instanceElement) + + def _addSource(self, sourceObject): + sourceElement = ET.Element("source") + if sourceObject.filename is not None: + sourceElement.attrib['filename'] = sourceObject.filename + if sourceObject.name is not None: + if sourceObject.name.find("temp_master") != 0: + # do not save temporary source names + sourceElement.attrib['name'] = sourceObject.name + if sourceObject.familyName is not None: + sourceElement.attrib['familyname'] = sourceObject.familyName + if sourceObject.styleName is not None: + sourceElement.attrib['stylename'] = sourceObject.styleName + if sourceObject.layerName is not None: + sourceElement.attrib['layer'] = sourceObject.layerName + if sourceObject.copyLib: + libElement = ET.Element('lib') + libElement.attrib['copy'] = "1" + sourceElement.append(libElement) + if sourceObject.copyGroups: + groupsElement = ET.Element('groups') + groupsElement.attrib['copy'] = "1" + sourceElement.append(groupsElement) + if sourceObject.copyFeatures: + featuresElement = ET.Element('features') + featuresElement.attrib['copy'] = "1" + sourceElement.append(featuresElement) + if sourceObject.copyInfo or sourceObject.muteInfo: + infoElement = ET.Element('info') + if sourceObject.copyInfo: + infoElement.attrib['copy'] = "1" + if sourceObject.muteInfo: + infoElement.attrib['mute'] = "1" + sourceElement.append(infoElement) + if sourceObject.muteKerning: + kerningElement = ET.Element("kerning") + kerningElement.attrib["mute"] = '1' + sourceElement.append(kerningElement) + if sourceObject.mutedGlyphNames: + for name in sourceObject.mutedGlyphNames: + glyphElement = ET.Element("glyph") + glyphElement.attrib["name"] = name + glyphElement.attrib["mute"] = '1' + sourceElement.append(glyphElement) + locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location) + sourceElement.append(locationElement) + self.root.findall('.sources')[0].append(sourceElement) + + def _addLib(self, dict): + libElement = ET.Element('lib') + libElement.append(plistlib.totree(dict, indent_level=2)) + self.root.append(libElement) + + def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data): + glyphElement = ET.Element('glyph') + if data.get('mute'): + glyphElement.attrib['mute'] = "1" + if data.get('unicodes') is not None: + glyphElement.attrib['unicode'] = " ".join([hex(u) for u in data.get('unicodes')]) + if data.get('instanceLocation') is not None: + locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation')) + glyphElement.append(locationElement) + if glyphName is not None: + glyphElement.attrib['name'] = glyphName + if data.get('note') is not None: + noteElement = ET.Element('note') + noteElement.text = data.get('note') + glyphElement.append(noteElement) + if data.get('masters') is not None: + mastersElement = ET.Element("masters") + for m in data.get('masters'): + masterElement = ET.Element("master") + if m.get('glyphName') is not None: + masterElement.attrib['glyphname'] = m.get('glyphName') + if m.get('font') is not None: + masterElement.attrib['source'] = m.get('font') + if m.get('location') is not None: + locationElement, m['location'] = self._makeLocationElement(m.get('location')) + masterElement.append(locationElement) + mastersElement.append(masterElement) + glyphElement.append(mastersElement) + return glyphElement + + +class BaseDocReader(LogMixin): + ruleDescriptorClass = RuleDescriptor + axisDescriptorClass = AxisDescriptor + sourceDescriptorClass = SourceDescriptor + instanceDescriptorClass = InstanceDescriptor + + def __init__(self, documentPath, documentObject): + self.path = documentPath + self.documentObject = documentObject + tree = ET.parse(self.path) + self.root = tree.getroot() + self.documentObject.formatVersion = self.root.attrib.get("format", "3.0") + self._axes = [] + self.rules = [] + self.sources = [] + self.instances = [] + self.axisDefaults = {} + self._strictAxisNames = True + + @classmethod + def fromstring(cls, string, documentObject): + f = BytesIO(tobytes(string, encoding="utf-8")) + self = cls(f, documentObject) + self.path = None + return self + + def read(self): + self.readAxes() + self.readRules() + self.readSources() + self.readInstances() + self.readLib() + + def readRules(self): + # we also need to read any conditions that are outside of a condition set. + rules = [] + rulesElement = self.root.find(".rules") + if rulesElement is not None: + processingValue = rulesElement.attrib.get("processing", "first") + if processingValue not in {"first", "last"}: + raise DesignSpaceDocumentError( + " processing attribute value is not valid: %r, " + "expected 'first' or 'last'" % processingValue) + self.documentObject.rulesProcessingLast = processingValue == "last" + for ruleElement in self.root.findall(".rules/rule"): + ruleObject = self.ruleDescriptorClass() + ruleName = ruleObject.name = ruleElement.attrib.get("name") + # read any stray conditions outside a condition set + externalConditions = self._readConditionElements( + ruleElement, + ruleName, + ) + if externalConditions: + ruleObject.conditionSets.append(externalConditions) + self.log.info( + "Found stray rule conditions outside a conditionset. " + "Wrapped them in a new conditionset." + ) + # read the conditionsets + for conditionSetElement in ruleElement.findall('.conditionset'): + conditionSet = self._readConditionElements( + conditionSetElement, + ruleName, + ) + if conditionSet is not None: + ruleObject.conditionSets.append(conditionSet) + for subElement in ruleElement.findall('.sub'): + a = subElement.attrib['name'] + b = subElement.attrib['with'] + ruleObject.subs.append((a, b)) + rules.append(ruleObject) + self.documentObject.rules = rules + + def _readConditionElements(self, parentElement, ruleName=None): + cds = [] + for conditionElement in parentElement.findall('.condition'): + cd = {} + cdMin = conditionElement.attrib.get("minimum") + if cdMin is not None: + cd['minimum'] = float(cdMin) + else: + # will allow these to be None, assume axis.minimum + cd['minimum'] = None + cdMax = conditionElement.attrib.get("maximum") + if cdMax is not None: + cd['maximum'] = float(cdMax) + else: + # will allow these to be None, assume axis.maximum + cd['maximum'] = None + cd['name'] = conditionElement.attrib.get("name") + # # test for things + if cd.get('minimum') is None and cd.get('maximum') is None: + raise DesignSpaceDocumentError( + "condition missing required minimum or maximum in rule" + + (" '%s'" % ruleName if ruleName is not None else "")) + cds.append(cd) + return cds + + def readAxes(self): + # read the axes elements, including the warp map. + axisElements = self.root.findall(".axes/axis") + if not axisElements: + return + for axisElement in axisElements: + axisObject = self.axisDescriptorClass() + axisObject.name = axisElement.attrib.get("name") + axisObject.minimum = float(axisElement.attrib.get("minimum")) + axisObject.maximum = float(axisElement.attrib.get("maximum")) + if axisElement.attrib.get('hidden', False): + axisObject.hidden = True + axisObject.default = float(axisElement.attrib.get("default")) + axisObject.tag = axisElement.attrib.get("tag") + for mapElement in axisElement.findall('map'): + a = float(mapElement.attrib['input']) + b = float(mapElement.attrib['output']) + axisObject.map.append((a, b)) + for labelNameElement in axisElement.findall('labelname'): + # Note: elementtree reads the "xml:lang" attribute name as + # '{http://www.w3.org/XML/1998/namespace}lang' + for key, lang in labelNameElement.items(): + if key == XML_LANG: + axisObject.labelNames[lang] = tostr(labelNameElement.text) + self.documentObject.axes.append(axisObject) + self.axisDefaults[axisObject.name] = axisObject.default + + def readSources(self): + for sourceCount, sourceElement in enumerate(self.root.findall(".sources/source")): + filename = sourceElement.attrib.get('filename') + if filename is not None and self.path is not None: + sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename)) + else: + sourcePath = None + sourceName = sourceElement.attrib.get('name') + if sourceName is None: + # add a temporary source name + sourceName = "temp_master.%d" % (sourceCount) + sourceObject = self.sourceDescriptorClass() + sourceObject.path = sourcePath # absolute path to the ufo source + sourceObject.filename = filename # path as it is stored in the document + sourceObject.name = sourceName + familyName = sourceElement.attrib.get("familyname") + if familyName is not None: + sourceObject.familyName = familyName + styleName = sourceElement.attrib.get("stylename") + if styleName is not None: + sourceObject.styleName = styleName + sourceObject.location = self.locationFromElement(sourceElement) + layerName = sourceElement.attrib.get('layer') + if layerName is not None: + sourceObject.layerName = layerName + for libElement in sourceElement.findall('.lib'): + if libElement.attrib.get('copy') == '1': + sourceObject.copyLib = True + for groupsElement in sourceElement.findall('.groups'): + if groupsElement.attrib.get('copy') == '1': + sourceObject.copyGroups = True + for infoElement in sourceElement.findall(".info"): + if infoElement.attrib.get('copy') == '1': + sourceObject.copyInfo = True + if infoElement.attrib.get('mute') == '1': + sourceObject.muteInfo = True + for featuresElement in sourceElement.findall(".features"): + if featuresElement.attrib.get('copy') == '1': + sourceObject.copyFeatures = True + for glyphElement in sourceElement.findall(".glyph"): + glyphName = glyphElement.attrib.get('name') + if glyphName is None: + continue + if glyphElement.attrib.get('mute') == '1': + sourceObject.mutedGlyphNames.append(glyphName) + for kerningElement in sourceElement.findall(".kerning"): + if kerningElement.attrib.get('mute') == '1': + sourceObject.muteKerning = True + self.documentObject.sources.append(sourceObject) + + def locationFromElement(self, element): + elementLocation = None + for locationElement in element.findall('.location'): + elementLocation = self.readLocationElement(locationElement) + break + return elementLocation + + def readLocationElement(self, locationElement): + """ Format 0 location reader """ + if self._strictAxisNames and not self.documentObject.axes: + raise DesignSpaceDocumentError("No axes defined") + loc = {} + for dimensionElement in locationElement.findall(".dimension"): + dimName = dimensionElement.attrib.get("name") + if self._strictAxisNames and dimName not in self.axisDefaults: + # In case the document contains no axis definitions, + self.log.warning("Location with undefined axis: \"%s\".", dimName) + continue + xValue = yValue = None + try: + xValue = dimensionElement.attrib.get('xvalue') + xValue = float(xValue) + except ValueError: + self.log.warning("KeyError in readLocation xValue %3.3f", xValue) + try: + yValue = dimensionElement.attrib.get('yvalue') + if yValue is not None: + yValue = float(yValue) + except ValueError: + pass + if yValue is not None: + loc[dimName] = (xValue, yValue) + else: + loc[dimName] = xValue + return loc + + def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True): + instanceElements = self.root.findall('.instances/instance') + for instanceElement in instanceElements: + self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) + + def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True): + filename = instanceElement.attrib.get('filename') + if filename is not None and self.documentObject.path is not None: + instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename) + else: + instancePath = None + instanceObject = self.instanceDescriptorClass() + instanceObject.path = instancePath # absolute path to the instance + instanceObject.filename = filename # path as it is stored in the document + name = instanceElement.attrib.get("name") + if name is not None: + instanceObject.name = name + familyname = instanceElement.attrib.get('familyname') + if familyname is not None: + instanceObject.familyName = familyname + stylename = instanceElement.attrib.get('stylename') + if stylename is not None: + instanceObject.styleName = stylename + postScriptFontName = instanceElement.attrib.get('postscriptfontname') + if postScriptFontName is not None: + instanceObject.postScriptFontName = postScriptFontName + styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname') + if styleMapFamilyName is not None: + instanceObject.styleMapFamilyName = styleMapFamilyName + styleMapStyleName = instanceElement.attrib.get('stylemapstylename') + if styleMapStyleName is not None: + instanceObject.styleMapStyleName = styleMapStyleName + # read localised names + for styleNameElement in instanceElement.findall('stylename'): + for key, lang in styleNameElement.items(): + if key == XML_LANG: + styleName = styleNameElement.text + instanceObject.setStyleName(styleName, lang) + for familyNameElement in instanceElement.findall('familyname'): + for key, lang in familyNameElement.items(): + if key == XML_LANG: + familyName = familyNameElement.text + instanceObject.setFamilyName(familyName, lang) + for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'): + for key, lang in styleMapStyleNameElement.items(): + if key == XML_LANG: + styleMapStyleName = styleMapStyleNameElement.text + instanceObject.setStyleMapStyleName(styleMapStyleName, lang) + for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'): + for key, lang in styleMapFamilyNameElement.items(): + if key == XML_LANG: + styleMapFamilyName = styleMapFamilyNameElement.text + instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang) + instanceLocation = self.locationFromElement(instanceElement) + if instanceLocation is not None: + instanceObject.location = instanceLocation + for glyphElement in instanceElement.findall('.glyphs/glyph'): + self.readGlyphElement(glyphElement, instanceObject) + for infoElement in instanceElement.findall("info"): + self.readInfoElement(infoElement, instanceObject) + for libElement in instanceElement.findall('lib'): + self.readLibElement(libElement, instanceObject) + self.documentObject.instances.append(instanceObject) + + def readLibElement(self, libElement, instanceObject): + """Read the lib element for the given instance.""" + instanceObject.lib = plistlib.fromtree(libElement[0]) + + def readInfoElement(self, infoElement, instanceObject): + """ Read the info element.""" + instanceObject.info = True + + def readKerningElement(self, kerningElement, instanceObject): + """ Read the kerning element.""" + kerningLocation = self.locationFromElement(kerningElement) + instanceObject.addKerning(kerningLocation) + + def readGlyphElement(self, glyphElement, instanceObject): + """ + Read the glyph element. + + + + + + + This is an instance from an anisotropic interpolation. + + + """ + glyphData = {} + glyphName = glyphElement.attrib.get('name') + if glyphName is None: + raise DesignSpaceDocumentError("Glyph object without name attribute") + mute = glyphElement.attrib.get("mute") + if mute == "1": + glyphData['mute'] = True + # unicode + unicodes = glyphElement.attrib.get('unicode') + if unicodes is not None: + try: + unicodes = [int(u, 16) for u in unicodes.split(" ")] + glyphData['unicodes'] = unicodes + except ValueError: + raise DesignSpaceDocumentError("unicode values %s are not integers" % unicodes) + + for noteElement in glyphElement.findall('.note'): + glyphData['note'] = noteElement.text + break + instanceLocation = self.locationFromElement(glyphElement) + if instanceLocation is not None: + glyphData['instanceLocation'] = instanceLocation + glyphSources = None + for masterElement in glyphElement.findall('.masters/master'): + fontSourceName = masterElement.attrib.get('source') + sourceLocation = self.locationFromElement(masterElement) + masterGlyphName = masterElement.attrib.get('glyphname') + if masterGlyphName is None: + # if we don't read a glyphname, use the one we have + masterGlyphName = glyphName + d = dict(font=fontSourceName, + location=sourceLocation, + glyphName=masterGlyphName) + if glyphSources is None: + glyphSources = [] + glyphSources.append(d) + if glyphSources is not None: + glyphData['masters'] = glyphSources + instanceObject.glyphs[glyphName] = glyphData + + def readLib(self): + """Read the lib element for the whole document.""" + for libElement in self.root.findall(".lib"): + self.documentObject.lib = plistlib.fromtree(libElement[0]) + + +class DesignSpaceDocument(LogMixin, AsDictMixin): + """ Read, write data from the designspace file""" + def __init__(self, readerClass=None, writerClass=None): + self.path = None + self.filename = None + """String, optional. When the document is read from the disk, this is + its original file name, i.e. the last part of its path. + + When the document is produced by a Python script and still only exists + in memory, the producing script can write here an indication of a + possible "good" filename, in case one wants to save the file somewhere. + """ + + self.formatVersion = None + self.sources = [] + self.instances = [] + self.axes = [] + self.rules = [] + self.rulesProcessingLast = False + self.default = None # name of the default master + + self.lib = {} + """Custom data associated with the whole document.""" + + # + if readerClass is not None: + self.readerClass = readerClass + else: + self.readerClass = BaseDocReader + if writerClass is not None: + self.writerClass = writerClass + else: + self.writerClass = BaseDocWriter + + @classmethod + def fromfile(cls, path, readerClass=None, writerClass=None): + self = cls(readerClass=readerClass, writerClass=writerClass) + self.read(path) + return self + + @classmethod + def fromstring(cls, string, readerClass=None, writerClass=None): + self = cls(readerClass=readerClass, writerClass=writerClass) + reader = self.readerClass.fromstring(string, self) + reader.read() + if self.sources: + self.findDefault() + return self + + def tostring(self, encoding=None): + if encoding is str or ( + encoding is not None and encoding.lower() == "unicode" + ): + f = StringIO() + xml_declaration = False + elif encoding is None or encoding == "utf-8": + f = BytesIO() + encoding = "UTF-8" + xml_declaration = True + else: + raise ValueError("unsupported encoding: '%s'" % encoding) + writer = self.writerClass(f, self) + writer.write(encoding=encoding, xml_declaration=xml_declaration) + return f.getvalue() + + def read(self, path): + if hasattr(path, "__fspath__"): # support os.PathLike objects + path = path.__fspath__() + self.path = path + self.filename = os.path.basename(path) + reader = self.readerClass(path, self) + reader.read() + if self.sources: + self.findDefault() + + def write(self, path): + if hasattr(path, "__fspath__"): # support os.PathLike objects + path = path.__fspath__() + self.path = path + self.filename = os.path.basename(path) + self.updatePaths() + writer = self.writerClass(path, self) + writer.write() + + def _posixRelativePath(self, otherPath): + relative = os.path.relpath(otherPath, os.path.dirname(self.path)) + return posix(relative) + + def updatePaths(self): + """ + Right before we save we need to identify and respond to the following situations: + In each descriptor, we have to do the right thing for the filename attribute. + + case 1. + descriptor.filename == None + descriptor.path == None + + -- action: + write as is, descriptors will not have a filename attr. + useless, but no reason to interfere. + + + case 2. + descriptor.filename == "../something" + descriptor.path == None + + -- action: + write as is. The filename attr should not be touched. + + + case 3. + descriptor.filename == None + descriptor.path == "~/absolute/path/there" + + -- action: + calculate the relative path for filename. + We're not overwriting some other value for filename, it should be fine + + + case 4. + descriptor.filename == '../somewhere' + descriptor.path == "~/absolute/path/there" + + -- action: + there is a conflict between the given filename, and the path. + So we know where the file is relative to the document. + Can't guess why they're different, we just choose for path to be correct and update filename. + + + """ + assert self.path is not None + for descriptor in self.sources + self.instances: + if descriptor.path is not None: + # case 3 and 4: filename gets updated and relativized + descriptor.filename = self._posixRelativePath(descriptor.path) + + def addSource(self, sourceDescriptor): + self.sources.append(sourceDescriptor) + + def addSourceDescriptor(self, **kwargs): + source = self.writerClass.sourceDescriptorClass(**kwargs) + self.addSource(source) + return source + + def addInstance(self, instanceDescriptor): + self.instances.append(instanceDescriptor) + + def addInstanceDescriptor(self, **kwargs): + instance = self.writerClass.instanceDescriptorClass(**kwargs) + self.addInstance(instance) + return instance + + def addAxis(self, axisDescriptor): + self.axes.append(axisDescriptor) + + def addAxisDescriptor(self, **kwargs): + axis = self.writerClass.axisDescriptorClass(**kwargs) + self.addAxis(axis) + return axis + + def addRule(self, ruleDescriptor): + self.rules.append(ruleDescriptor) + + def addRuleDescriptor(self, **kwargs): + rule = self.writerClass.ruleDescriptorClass(**kwargs) + self.addRule(rule) + return rule + + def newDefaultLocation(self): + """Return default location in design space.""" + # Without OrderedDict, output XML would be non-deterministic. + # https://github.com/LettError/designSpaceDocument/issues/10 + loc = collections.OrderedDict() + for axisDescriptor in self.axes: + loc[axisDescriptor.name] = axisDescriptor.map_forward( + axisDescriptor.default + ) + return loc + + def updateFilenameFromPath(self, masters=True, instances=True, force=False): + # set a descriptor filename attr from the path and this document path + # if the filename attribute is not None: skip it. + if masters: + for descriptor in self.sources: + if descriptor.filename is not None and not force: + continue + if self.path is not None: + descriptor.filename = self._posixRelativePath(descriptor.path) + if instances: + for descriptor in self.instances: + if descriptor.filename is not None and not force: + continue + if self.path is not None: + descriptor.filename = self._posixRelativePath(descriptor.path) + + def newAxisDescriptor(self): + # Ask the writer class to make us a new axisDescriptor + return self.writerClass.getAxisDecriptor() + + def newSourceDescriptor(self): + # Ask the writer class to make us a new sourceDescriptor + return self.writerClass.getSourceDescriptor() + + def newInstanceDescriptor(self): + # Ask the writer class to make us a new instanceDescriptor + return self.writerClass.getInstanceDescriptor() + + def getAxisOrder(self): + names = [] + for axisDescriptor in self.axes: + names.append(axisDescriptor.name) + return names + + def getAxis(self, name): + for axisDescriptor in self.axes: + if axisDescriptor.name == name: + return axisDescriptor + return None + + def findDefault(self): + """Set and return SourceDescriptor at the default location or None. + + The default location is the set of all `default` values in user space + of all axes. + """ + self.default = None + + # Convert the default location from user space to design space before comparing + # it against the SourceDescriptor locations (always in design space). + default_location_design = self.newDefaultLocation() + + for sourceDescriptor in self.sources: + if sourceDescriptor.location == default_location_design: + self.default = sourceDescriptor + return sourceDescriptor + + return None + + def normalizeLocation(self, location): + from fontTools.varLib.models import normalizeValue + + new = {} + for axis in self.axes: + if axis.name not in location: + # skipping this dimension it seems + continue + value = location[axis.name] + # 'anisotropic' location, take first coord only + if isinstance(value, tuple): + value = value[0] + triple = [ + axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum) + ] + new[axis.name] = normalizeValue(value, triple) + return new + + def normalize(self): + # Normalise the geometry of this designspace: + # scale all the locations of all masters and instances to the -1 - 0 - 1 value. + # we need the axis data to do the scaling, so we do those last. + # masters + for item in self.sources: + item.location = self.normalizeLocation(item.location) + # instances + for item in self.instances: + # glyph masters for this instance + for _, glyphData in item.glyphs.items(): + glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation']) + for glyphMaster in glyphData['masters']: + glyphMaster['location'] = self.normalizeLocation(glyphMaster['location']) + item.location = self.normalizeLocation(item.location) + # the axes + for axis in self.axes: + # scale the map first + newMap = [] + for inputValue, outputValue in axis.map: + newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name) + newMap.append((inputValue, newOutputValue)) + if newMap: + axis.map = newMap + # finally the axis values + minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name) + maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name) + default = self.normalizeLocation({axis.name: axis.default}).get(axis.name) + # and set them in the axis.minimum + axis.minimum = minimum + axis.maximum = maximum + axis.default = default + # now the rules + for rule in self.rules: + newConditionSets = [] + for conditions in rule.conditionSets: + newConditions = [] + for cond in conditions: + if cond.get('minimum') is not None: + minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name']) + else: + minimum = None + if cond.get('maximum') is not None: + maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name']) + else: + maximum = None + newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum)) + newConditionSets.append(newConditions) + rule.conditionSets = newConditionSets + + def loadSourceFonts(self, opener, **kwargs): + """Ensure SourceDescriptor.font attributes are loaded, and return list of fonts. + + Takes a callable which initializes a new font object (e.g. TTFont, or + defcon.Font, etc.) from the SourceDescriptor.path, and sets the + SourceDescriptor.font attribute. + If the font attribute is already not None, it is not loaded again. + Fonts with the same path are only loaded once and shared among SourceDescriptors. + + For example, to load UFO sources using defcon: + + designspace = DesignSpaceDocument.fromfile("path/to/my.designspace") + designspace.loadSourceFonts(defcon.Font) + + Or to load masters as FontTools binary fonts, including extra options: + + designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False) + + Args: + opener (Callable): takes one required positional argument, the source.path, + and an optional list of keyword arguments, and returns a new font object + loaded from the path. + **kwargs: extra options passed on to the opener function. + + Returns: + List of font objects in the order they appear in the sources list. + """ + # we load fonts with the same source.path only once + loaded = {} + fonts = [] + for source in self.sources: + if source.font is not None: # font already loaded + fonts.append(source.font) + continue + if source.path in loaded: + source.font = loaded[source.path] + else: + if source.path is None: + raise DesignSpaceDocumentError( + "Designspace source '%s' has no 'path' attribute" + % (source.name or "") + ) + source.font = opener(source.path, **kwargs) + loaded[source.path] = source.font + fonts.append(source.font) + return fonts diff --git a/.venv/lib/python3.9/site-packages/fontTools/designspaceLib/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/designspaceLib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..0b9752a6 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/designspaceLib/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/encodings/MacRoman.py b/.venv/lib/python3.9/site-packages/fontTools/encodings/MacRoman.py new file mode 100644 index 00000000..25232d38 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/encodings/MacRoman.py @@ -0,0 +1,36 @@ +MacRoman = [ + 'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute', + 'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1', + 'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters', + 'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US', + 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand', + 'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma', + 'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', + 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', + 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore', + 'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', + 'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute', + 'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex', + 'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex', + 'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde', + 'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave', + 'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section', + 'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark', + 'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus', + 'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation', + 'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae', + 'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin', + 'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis', + 'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', + 'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge', + 'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft', + 'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase', + 'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute', + 'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute', + 'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi', + 'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla', + 'hungarumlaut', 'ogonek', 'caron' + ] diff --git a/.venv/lib/python3.9/site-packages/fontTools/encodings/StandardEncoding.py b/.venv/lib/python3.9/site-packages/fontTools/encodings/StandardEncoding.py new file mode 100644 index 00000000..810b2a09 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/encodings/StandardEncoding.py @@ -0,0 +1,48 @@ +StandardEncoding = [ + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', 'space', 'exclam', 'quotedbl', + 'numbersign', 'dollar', 'percent', 'ampersand', + 'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus', + 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two', + 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', + 'colon', 'semicolon', 'less', 'equal', 'greater', + 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', + 'bracketright', 'asciicircum', 'underscore', 'quoteleft', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', + 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', + 'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown', + 'cent', 'sterling', 'fraction', 'yen', 'florin', 'section', + 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', + 'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef', + 'endash', 'dagger', 'daggerdbl', 'periodcentered', + '.notdef', 'paragraph', 'bullet', 'quotesinglbase', + 'quotedblbase', 'quotedblright', 'guillemotright', + 'ellipsis', 'perthousand', '.notdef', 'questiondown', + '.notdef', 'grave', 'acute', 'circumflex', 'tilde', + 'macron', 'breve', 'dotaccent', 'dieresis', '.notdef', + 'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek', + 'caron', 'emdash', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', + '.notdef', '.notdef', '.notdef', 'AE', '.notdef', + 'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef', + 'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef', + '.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef', + '.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef', + 'lslash', 'oslash', 'oe', 'germandbls', '.notdef', + '.notdef', '.notdef', '.notdef' + ] diff --git a/.venv/lib/python3.9/site-packages/fontTools/encodings/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/encodings/__init__.py new file mode 100644 index 00000000..156cb232 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/encodings/__init__.py @@ -0,0 +1 @@ +"""Empty __init__.py file to signal Python this directory is a package.""" diff --git a/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/MacRoman.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/MacRoman.cpython-39.pyc new file mode 100644 index 00000000..7c90fbae Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/MacRoman.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/StandardEncoding.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/StandardEncoding.cpython-39.pyc new file mode 100644 index 00000000..aa390f1d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/StandardEncoding.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..f9acf60c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/codecs.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/codecs.cpython-39.pyc new file mode 100644 index 00000000..4584591c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/encodings/__pycache__/codecs.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/encodings/codecs.py b/.venv/lib/python3.9/site-packages/fontTools/encodings/codecs.py new file mode 100644 index 00000000..3b1a8256 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/encodings/codecs.py @@ -0,0 +1,119 @@ +"""Extend the Python codecs module with a few encodings that are used in OpenType (name table) +but missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details.""" + +import codecs +import encodings + +class ExtendCodec(codecs.Codec): + + def __init__(self, name, base_encoding, mapping): + self.name = name + self.base_encoding = base_encoding + self.mapping = mapping + self.reverse = {v:k for k,v in mapping.items()} + self.max_len = max(len(v) for v in mapping.values()) + self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode) + codecs.register_error(name, self.error) + + def _map(self, mapper, output_type, exc_type, input, errors): + base_error_handler = codecs.lookup_error(errors) + length = len(input) + out = output_type() + while input: + # first try to use self.error as the error handler + try: + part = mapper(input, self.base_encoding, errors=self.name) + out += part + break # All converted + except exc_type as e: + # else convert the correct part, handle error as requested and continue + out += mapper(input[:e.start], self.base_encoding, self.name) + replacement, pos = base_error_handler(e) + out += replacement + input = input[pos:] + return out, length + + def encode(self, input, errors='strict'): + return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors) + + def decode(self, input, errors='strict'): + return self._map(codecs.decode, str, UnicodeDecodeError, input, errors) + + def error(self, e): + if isinstance(e, UnicodeDecodeError): + for end in range(e.start + 1, e.end + 1): + s = e.object[e.start:end] + if s in self.mapping: + return self.mapping[s], end + elif isinstance(e, UnicodeEncodeError): + for end in range(e.start + 1, e.start + self.max_len + 1): + s = e.object[e.start:end] + if s in self.reverse: + return self.reverse[s], end + e.encoding = self.name + raise e + + +_extended_encodings = { + "x_mac_japanese_ttx": ("shift_jis", { + b"\xFC": chr(0x007C), + b"\x7E": chr(0x007E), + b"\x80": chr(0x005C), + b"\xA0": chr(0x00A0), + b"\xFD": chr(0x00A9), + b"\xFE": chr(0x2122), + b"\xFF": chr(0x2026), + }), + "x_mac_trad_chinese_ttx": ("big5", { + b"\x80": chr(0x005C), + b"\xA0": chr(0x00A0), + b"\xFD": chr(0x00A9), + b"\xFE": chr(0x2122), + b"\xFF": chr(0x2026), + }), + "x_mac_korean_ttx": ("euc_kr", { + b"\x80": chr(0x00A0), + b"\x81": chr(0x20A9), + b"\x82": chr(0x2014), + b"\x83": chr(0x00A9), + b"\xFE": chr(0x2122), + b"\xFF": chr(0x2026), + }), + "x_mac_simp_chinese_ttx": ("gb2312", { + b"\x80": chr(0x00FC), + b"\xA0": chr(0x00A0), + b"\xFD": chr(0x00A9), + b"\xFE": chr(0x2122), + b"\xFF": chr(0x2026), + }), +} + +_cache = {} + +def search_function(name): + name = encodings.normalize_encoding(name) # Rather undocumented... + if name in _extended_encodings: + if name not in _cache: + base_encoding, mapping = _extended_encodings[name] + assert(name[-4:] == "_ttx") + # Python 2 didn't have any of the encodings that we are implementing + # in this file. Python 3 added aliases for the East Asian ones, mapping + # them "temporarily" to the same base encoding as us, with a comment + # suggesting that full implementation will appear some time later. + # As such, try the Python version of the x_mac_... first, if that is found, + # use *that* as our base encoding. This would make our encoding upgrade + # to the full encoding when and if Python finally implements that. + # http://bugs.python.org/issue24041 + base_encodings = [name[:-4], base_encoding] + for base_encoding in base_encodings: + try: + codecs.lookup(base_encoding) + except LookupError: + continue + _cache[name] = ExtendCodec(name, base_encoding, mapping) + break + return _cache[name].info + + return None + +codecs.register(search_function) diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__init__.py new file mode 100644 index 00000000..ae532cd3 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__init__.py @@ -0,0 +1,4 @@ +"""fontTools.feaLib -- a package for dealing with OpenType feature files.""" + +# The structure of OpenType feature files is defined here: +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py new file mode 100644 index 00000000..99c64231 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py @@ -0,0 +1,77 @@ +from fontTools.ttLib import TTFont +from fontTools.feaLib.builder import addOpenTypeFeatures, Builder +from fontTools.feaLib.error import FeatureLibError +from fontTools import configLogger +from fontTools.misc.cliTools import makeOutputFileName +import sys +import argparse +import logging + + +log = logging.getLogger("fontTools.feaLib") + + +def main(args=None): + """Add features from a feature file (.fea) into a OTF font""" + parser = argparse.ArgumentParser( + description="Use fontTools to compile OpenType feature files (*.fea)." + ) + parser.add_argument( + "input_fea", metavar="FEATURES", help="Path to the feature file" + ) + parser.add_argument( + "input_font", metavar="INPUT_FONT", help="Path to the input font" + ) + parser.add_argument( + "-o", + "--output", + dest="output_font", + metavar="OUTPUT_FONT", + help="Path to the output font.", + ) + parser.add_argument( + "-t", + "--tables", + metavar="TABLE_TAG", + choices=Builder.supportedTables, + nargs="+", + help="Specify the table(s) to be built.", + ) + parser.add_argument( + "-d", + "--debug", + action="store_true", + help="Add source-level debugging information to font.", + ) + parser.add_argument( + "-v", + "--verbose", + help="increase the logger verbosity. Multiple -v " "options are allowed.", + action="count", + default=0, + ) + parser.add_argument( + "--traceback", help="show traceback for exceptions.", action="store_true" + ) + options = parser.parse_args(args) + + levels = ["WARNING", "INFO", "DEBUG"] + configLogger(level=levels[min(len(levels) - 1, options.verbose)]) + + output_font = options.output_font or makeOutputFileName(options.input_font) + log.info("Compiling features to '%s'" % (output_font)) + + font = TTFont(options.input_font) + try: + addOpenTypeFeatures( + font, options.input_fea, tables=options.tables, debug=options.debug + ) + except FeatureLibError as e: + if options.traceback: + raise + log.error(e) + font.save(output_font) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..e008b1bd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/__main__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/__main__.cpython-39.pyc new file mode 100644 index 00000000..2d9a628c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/__main__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/ast.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/ast.cpython-39.pyc new file mode 100644 index 00000000..02754250 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/ast.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/builder.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/builder.cpython-39.pyc new file mode 100644 index 00000000..579b6b5d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/builder.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/error.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/error.cpython-39.pyc new file mode 100644 index 00000000..d97a1ec2 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/error.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/lexer.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/lexer.cpython-39.pyc new file mode 100644 index 00000000..36f2fd17 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/lexer.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/location.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/location.cpython-39.pyc new file mode 100644 index 00000000..fc4876a0 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/location.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/lookupDebugInfo.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/lookupDebugInfo.cpython-39.pyc new file mode 100644 index 00000000..937e41d2 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/lookupDebugInfo.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/parser.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/parser.cpython-39.pyc new file mode 100644 index 00000000..e5739b9e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/parser.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/variableScalar.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/variableScalar.cpython-39.pyc new file mode 100644 index 00000000..bd979b91 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/feaLib/__pycache__/variableScalar.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/ast.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/ast.py new file mode 100644 index 00000000..1273343d --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/ast.py @@ -0,0 +1,2128 @@ +from fontTools.feaLib.error import FeatureLibError +from fontTools.feaLib.location import FeatureLibLocation +from fontTools.misc.encodingTools import getEncoding +from fontTools.misc.textTools import byteord, tobytes +from collections import OrderedDict +import itertools + +SHIFT = " " * 4 + +__all__ = [ + "Element", + "FeatureFile", + "Comment", + "GlyphName", + "GlyphClass", + "GlyphClassName", + "MarkClassName", + "AnonymousBlock", + "Block", + "FeatureBlock", + "NestedBlock", + "LookupBlock", + "GlyphClassDefinition", + "GlyphClassDefStatement", + "MarkClass", + "MarkClassDefinition", + "AlternateSubstStatement", + "Anchor", + "AnchorDefinition", + "AttachStatement", + "AxisValueLocationStatement", + "BaseAxis", + "CVParametersNameStatement", + "ChainContextPosStatement", + "ChainContextSubstStatement", + "CharacterStatement", + "ConditionsetStatement", + "CursivePosStatement", + "ElidedFallbackName", + "ElidedFallbackNameID", + "Expression", + "FeatureNameStatement", + "FeatureReferenceStatement", + "FontRevisionStatement", + "HheaField", + "IgnorePosStatement", + "IgnoreSubstStatement", + "IncludeStatement", + "LanguageStatement", + "LanguageSystemStatement", + "LigatureCaretByIndexStatement", + "LigatureCaretByPosStatement", + "LigatureSubstStatement", + "LookupFlagStatement", + "LookupReferenceStatement", + "MarkBasePosStatement", + "MarkLigPosStatement", + "MarkMarkPosStatement", + "MultipleSubstStatement", + "NameRecord", + "OS2Field", + "PairPosStatement", + "ReverseChainSingleSubstStatement", + "ScriptStatement", + "SinglePosStatement", + "SingleSubstStatement", + "SizeParameters", + "Statement", + "STATAxisValueStatement", + "STATDesignAxisStatement", + "STATNameStatement", + "SubtableStatement", + "TableBlock", + "ValueRecord", + "ValueRecordDefinition", + "VheaField", +] + + +def deviceToString(device): + if device is None: + return "" + else: + return "" % ", ".join("%d %d" % t for t in device) + + +fea_keywords = set( + [ + "anchor", + "anchordef", + "anon", + "anonymous", + "by", + "contour", + "cursive", + "device", + "enum", + "enumerate", + "excludedflt", + "exclude_dflt", + "feature", + "from", + "ignore", + "ignorebaseglyphs", + "ignoreligatures", + "ignoremarks", + "include", + "includedflt", + "include_dflt", + "language", + "languagesystem", + "lookup", + "lookupflag", + "mark", + "markattachmenttype", + "markclass", + "nameid", + "null", + "parameters", + "pos", + "position", + "required", + "righttoleft", + "reversesub", + "rsub", + "script", + "sub", + "substitute", + "subtable", + "table", + "usemarkfilteringset", + "useextension", + "valuerecorddef", + "base", + "gdef", + "head", + "hhea", + "name", + "vhea", + "vmtx", + ] +) + + +def asFea(g): + if hasattr(g, "asFea"): + return g.asFea() + elif isinstance(g, tuple) and len(g) == 2: + return asFea(g[0]) + " - " + asFea(g[1]) # a range + elif g.lower() in fea_keywords: + return "\\" + g + else: + return g + + +class Element(object): + """A base class representing "something" in a feature file.""" + + def __init__(self, location=None): + #: location of this element as a `FeatureLibLocation` object. + if location and not isinstance(location, FeatureLibLocation): + location = FeatureLibLocation(*location) + self.location = location + + def build(self, builder): + pass + + def asFea(self, indent=""): + """Returns this element as a string of feature code. For block-type + elements (such as :class:`FeatureBlock`), the `indent` string is + added to the start of each line in the output.""" + raise NotImplementedError + + def __str__(self): + return self.asFea() + + +class Statement(Element): + pass + + +class Expression(Element): + pass + + +class Comment(Element): + """A comment in a feature file.""" + + def __init__(self, text, location=None): + super(Comment, self).__init__(location) + #: Text of the comment + self.text = text + + def asFea(self, indent=""): + return self.text + + +class NullGlyph(Expression): + """The NULL glyph, used in glyph deletion substitutions.""" + + def __init__(self, location=None): + Expression.__init__(self, location) + #: The name itself as a string + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return () + + def asFea(self, indent=""): + return "NULL" + + +class GlyphName(Expression): + """A single glyph name, such as ``cedilla``.""" + + def __init__(self, glyph, location=None): + Expression.__init__(self, location) + #: The name itself as a string + self.glyph = glyph + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return (self.glyph,) + + def asFea(self, indent=""): + return asFea(self.glyph) + + +class GlyphClass(Expression): + """A glyph class, such as ``[acute cedilla grave]``.""" + + def __init__(self, glyphs=None, location=None): + Expression.__init__(self, location) + #: The list of glyphs in this class, as :class:`GlyphName` objects. + self.glyphs = glyphs if glyphs is not None else [] + self.original = [] + self.curr = 0 + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return tuple(self.glyphs) + + def asFea(self, indent=""): + if len(self.original): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr :]) + self.curr = len(self.glyphs) + return "[" + " ".join(map(asFea, self.original)) + "]" + else: + return "[" + " ".join(map(asFea, self.glyphs)) + "]" + + def extend(self, glyphs): + """Add a list of :class:`GlyphName` objects to the class.""" + self.glyphs.extend(glyphs) + + def append(self, glyph): + """Add a single :class:`GlyphName` object to the class.""" + self.glyphs.append(glyph) + + def add_range(self, start, end, glyphs): + """Add a range (e.g. ``A-Z``) to the class. ``start`` and ``end`` + are either :class:`GlyphName` objects or strings representing the + start and end glyphs in the class, and ``glyphs`` is the full list of + :class:`GlyphName` objects in the range.""" + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr :]) + self.original.append((start, end)) + self.glyphs.extend(glyphs) + self.curr = len(self.glyphs) + + def add_cid_range(self, start, end, glyphs): + """Add a range to the class by glyph ID. ``start`` and ``end`` are the + initial and final IDs, and ``glyphs`` is the full list of + :class:`GlyphName` objects in the range.""" + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr :]) + self.original.append(("\\{}".format(start), "\\{}".format(end))) + self.glyphs.extend(glyphs) + self.curr = len(self.glyphs) + + def add_class(self, gc): + """Add glyphs from the given :class:`GlyphClassName` object to the + class.""" + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr :]) + self.original.append(gc) + self.glyphs.extend(gc.glyphSet()) + self.curr = len(self.glyphs) + + +class GlyphClassName(Expression): + """A glyph class name, such as ``@FRENCH_MARKS``. This must be instantiated + with a :class:`GlyphClassDefinition` object.""" + + def __init__(self, glyphclass, location=None): + Expression.__init__(self, location) + assert isinstance(glyphclass, GlyphClassDefinition) + self.glyphclass = glyphclass + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return tuple(self.glyphclass.glyphSet()) + + def asFea(self, indent=""): + return "@" + self.glyphclass.name + + +class MarkClassName(Expression): + """A mark class name, such as ``@FRENCH_MARKS`` defined with ``markClass``. + This must be instantiated with a :class:`MarkClass` object.""" + + def __init__(self, markClass, location=None): + Expression.__init__(self, location) + assert isinstance(markClass, MarkClass) + self.markClass = markClass + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return self.markClass.glyphSet() + + def asFea(self, indent=""): + return "@" + self.markClass.name + + +class AnonymousBlock(Statement): + """An anonymous data block.""" + + def __init__(self, tag, content, location=None): + Statement.__init__(self, location) + self.tag = tag #: string containing the block's "tag" + self.content = content #: block data as string + + def asFea(self, indent=""): + res = "anon {} {{\n".format(self.tag) + res += self.content + res += "}} {};\n\n".format(self.tag) + return res + + +class Block(Statement): + """A block of statements: feature, lookup, etc.""" + + def __init__(self, location=None): + Statement.__init__(self, location) + self.statements = [] #: Statements contained in the block + + def build(self, builder): + """When handed a 'builder' object of comparable interface to + :class:`fontTools.feaLib.builder`, walks the statements in this + block, calling the builder callbacks.""" + for s in self.statements: + s.build(builder) + + def asFea(self, indent=""): + indent += SHIFT + return ( + indent + + ("\n" + indent).join([s.asFea(indent=indent) for s in self.statements]) + + "\n" + ) + + +class FeatureFile(Block): + """The top-level element of the syntax tree, containing the whole feature + file in its ``statements`` attribute.""" + + def __init__(self): + Block.__init__(self, location=None) + self.markClasses = {} # name --> ast.MarkClass + + def asFea(self, indent=""): + return "\n".join(s.asFea(indent=indent) for s in self.statements) + + +class FeatureBlock(Block): + """A named feature block.""" + + def __init__(self, name, use_extension=False, location=None): + Block.__init__(self, location) + self.name, self.use_extension = name, use_extension + + def build(self, builder): + """Call the ``start_feature`` callback on the builder object, visit + all the statements in this feature, and then call ``end_feature``.""" + # TODO(sascha): Handle use_extension. + builder.start_feature(self.location, self.name) + # language exclude_dflt statements modify builder.features_ + # limit them to this block with temporary builder.features_ + features = builder.features_ + builder.features_ = {} + Block.build(self, builder) + for key, value in builder.features_.items(): + features.setdefault(key, []).extend(value) + builder.features_ = features + builder.end_feature() + + def asFea(self, indent=""): + res = indent + "feature %s " % self.name.strip() + if self.use_extension: + res += "useExtension " + res += "{\n" + res += Block.asFea(self, indent=indent) + res += indent + "} %s;\n" % self.name.strip() + return res + + +class NestedBlock(Block): + """A block inside another block, for example when found inside a + ``cvParameters`` block.""" + + def __init__(self, tag, block_name, location=None): + Block.__init__(self, location) + self.tag = tag + self.block_name = block_name + + def build(self, builder): + Block.build(self, builder) + if self.block_name == "ParamUILabelNameID": + builder.add_to_cv_num_named_params(self.tag) + + def asFea(self, indent=""): + res = "{}{} {{\n".format(indent, self.block_name) + res += Block.asFea(self, indent=indent) + res += "{}}};\n".format(indent) + return res + + +class LookupBlock(Block): + """A named lookup, containing ``statements``.""" + + def __init__(self, name, use_extension=False, location=None): + Block.__init__(self, location) + self.name, self.use_extension = name, use_extension + + def build(self, builder): + # TODO(sascha): Handle use_extension. + builder.start_lookup_block(self.location, self.name) + Block.build(self, builder) + builder.end_lookup_block() + + def asFea(self, indent=""): + res = "lookup {} ".format(self.name) + if self.use_extension: + res += "useExtension " + res += "{\n" + res += Block.asFea(self, indent=indent) + res += "{}}} {};\n".format(indent, self.name) + return res + + +class TableBlock(Block): + """A ``table ... { }`` block.""" + + def __init__(self, name, location=None): + Block.__init__(self, location) + self.name = name + + def asFea(self, indent=""): + res = "table {} {{\n".format(self.name.strip()) + res += super(TableBlock, self).asFea(indent=indent) + res += "}} {};\n".format(self.name.strip()) + return res + + +class GlyphClassDefinition(Statement): + """Example: ``@UPPERCASE = [A-Z];``.""" + + def __init__(self, name, glyphs, location=None): + Statement.__init__(self, location) + self.name = name #: class name as a string, without initial ``@`` + self.glyphs = glyphs #: a :class:`GlyphClass` object + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return tuple(self.glyphs.glyphSet()) + + def asFea(self, indent=""): + return "@" + self.name + " = " + self.glyphs.asFea() + ";" + + +class GlyphClassDefStatement(Statement): + """Example: ``GlyphClassDef @UPPERCASE, [B], [C], [D];``. The parameters + must be either :class:`GlyphClass` or :class:`GlyphClassName` objects, or + ``None``.""" + + def __init__( + self, baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=None + ): + Statement.__init__(self, location) + self.baseGlyphs, self.markGlyphs = (baseGlyphs, markGlyphs) + self.ligatureGlyphs = ligatureGlyphs + self.componentGlyphs = componentGlyphs + + def build(self, builder): + """Calls the builder's ``add_glyphClassDef`` callback.""" + base = self.baseGlyphs.glyphSet() if self.baseGlyphs else tuple() + liga = self.ligatureGlyphs.glyphSet() if self.ligatureGlyphs else tuple() + mark = self.markGlyphs.glyphSet() if self.markGlyphs else tuple() + comp = self.componentGlyphs.glyphSet() if self.componentGlyphs else tuple() + builder.add_glyphClassDef(self.location, base, liga, mark, comp) + + def asFea(self, indent=""): + return "GlyphClassDef {}, {}, {}, {};".format( + self.baseGlyphs.asFea() if self.baseGlyphs else "", + self.ligatureGlyphs.asFea() if self.ligatureGlyphs else "", + self.markGlyphs.asFea() if self.markGlyphs else "", + self.componentGlyphs.asFea() if self.componentGlyphs else "", + ) + + +class MarkClass(object): + """One `or more` ``markClass`` statements for the same mark class. + + While glyph classes can be defined only once, the feature file format + allows expanding mark classes with multiple definitions, each using + different glyphs and anchors. The following are two ``MarkClassDefinitions`` + for the same ``MarkClass``:: + + markClass [acute grave] @FRENCH_ACCENTS; + markClass [cedilla] @FRENCH_ACCENTS; + + The ``MarkClass`` object is therefore just a container for a list of + :class:`MarkClassDefinition` statements. + """ + + def __init__(self, name): + self.name = name + self.definitions = [] + self.glyphs = OrderedDict() # glyph --> ast.MarkClassDefinitions + + def addDefinition(self, definition): + """Add a :class:`MarkClassDefinition` statement to this mark class.""" + assert isinstance(definition, MarkClassDefinition) + self.definitions.append(definition) + for glyph in definition.glyphSet(): + if glyph in self.glyphs: + otherLoc = self.glyphs[glyph].location + if otherLoc is None: + end = "" + else: + end = f" at {otherLoc}" + raise FeatureLibError( + "Glyph %s already defined%s" % (glyph, end), definition.location + ) + self.glyphs[glyph] = definition + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return tuple(self.glyphs.keys()) + + def asFea(self, indent=""): + res = "\n".join(d.asFea() for d in self.definitions) + return res + + +class MarkClassDefinition(Statement): + """A single ``markClass`` statement. The ``markClass`` should be a + :class:`MarkClass` object, the ``anchor`` an :class:`Anchor` object, + and the ``glyphs`` parameter should be a `glyph-containing object`_ . + + Example: + + .. code:: python + + mc = MarkClass("FRENCH_ACCENTS") + mc.addDefinition( MarkClassDefinition(mc, Anchor(350, 800), + GlyphClass([ GlyphName("acute"), GlyphName("grave") ]) + ) ) + mc.addDefinition( MarkClassDefinition(mc, Anchor(350, -200), + GlyphClass([ GlyphName("cedilla") ]) + ) ) + + mc.asFea() + # markClass [acute grave] @FRENCH_ACCENTS; + # markClass [cedilla] @FRENCH_ACCENTS; + + """ + + def __init__(self, markClass, anchor, glyphs, location=None): + Statement.__init__(self, location) + assert isinstance(markClass, MarkClass) + assert isinstance(anchor, Anchor) and isinstance(glyphs, Expression) + self.markClass, self.anchor, self.glyphs = markClass, anchor, glyphs + + def glyphSet(self): + """The glyphs in this class as a tuple of :class:`GlyphName` objects.""" + return self.glyphs.glyphSet() + + def asFea(self, indent=""): + return "markClass {} {} @{};".format( + self.glyphs.asFea(), self.anchor.asFea(), self.markClass.name + ) + + +class AlternateSubstStatement(Statement): + """A ``sub ... from ...`` statement. + + ``prefix``, ``glyph``, ``suffix`` and ``replacement`` should be lists of + `glyph-containing objects`_. ``glyph`` should be a `one element list`.""" + + def __init__(self, prefix, glyph, suffix, replacement, location=None): + Statement.__init__(self, location) + self.prefix, self.glyph, self.suffix = (prefix, glyph, suffix) + self.replacement = replacement + + def build(self, builder): + """Calls the builder's ``add_alternate_subst`` callback.""" + glyph = self.glyph.glyphSet() + assert len(glyph) == 1, glyph + glyph = list(glyph)[0] + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + replacement = self.replacement.glyphSet() + builder.add_alternate_subst(self.location, prefix, glyph, suffix, replacement) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix): + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += asFea(self.glyph) + "'" # even though we really only use 1 + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += asFea(self.glyph) + res += " from " + res += asFea(self.replacement) + res += ";" + return res + + +class Anchor(Expression): + """An ``Anchor`` element, used inside a ``pos`` rule. + + If a ``name`` is given, this will be used in preference to the coordinates. + Other values should be integer. + """ + + def __init__( + self, + x, + y, + name=None, + contourpoint=None, + xDeviceTable=None, + yDeviceTable=None, + location=None, + ): + Expression.__init__(self, location) + self.name = name + self.x, self.y, self.contourpoint = x, y, contourpoint + self.xDeviceTable, self.yDeviceTable = xDeviceTable, yDeviceTable + + def asFea(self, indent=""): + if self.name is not None: + return "".format(self.name) + res = "" + exit = self.exitAnchor.asFea() if self.exitAnchor else "" + return "pos cursive {} {} {};".format(self.glyphclass.asFea(), entry, exit) + + +class FeatureReferenceStatement(Statement): + """Example: ``feature salt;``""" + + def __init__(self, featureName, location=None): + Statement.__init__(self, location) + self.location, self.featureName = (location, featureName) + + def build(self, builder): + """Calls the builder object's ``add_feature_reference`` callback.""" + builder.add_feature_reference(self.location, self.featureName) + + def asFea(self, indent=""): + return "feature {};".format(self.featureName) + + +class IgnorePosStatement(Statement): + """An ``ignore pos`` statement, containing `one or more` contexts to ignore. + + ``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples, + with each of ``prefix``, ``glyphs`` and ``suffix`` being + `glyph-containing objects`_ .""" + + def __init__(self, chainContexts, location=None): + Statement.__init__(self, location) + self.chainContexts = chainContexts + + def build(self, builder): + """Calls the builder object's ``add_chain_context_pos`` callback on each + rule context.""" + for prefix, glyphs, suffix in self.chainContexts: + prefix = [p.glyphSet() for p in prefix] + glyphs = [g.glyphSet() for g in glyphs] + suffix = [s.glyphSet() for s in suffix] + builder.add_chain_context_pos(self.location, prefix, glyphs, suffix, []) + + def asFea(self, indent=""): + contexts = [] + for prefix, glyphs, suffix in self.chainContexts: + res = "" + if len(prefix) or len(suffix): + if len(prefix): + res += " ".join(map(asFea, prefix)) + " " + res += " ".join(g.asFea() + "'" for g in glyphs) + if len(suffix): + res += " " + " ".join(map(asFea, suffix)) + else: + res += " ".join(map(asFea, glyphs)) + contexts.append(res) + return "ignore pos " + ", ".join(contexts) + ";" + + +class IgnoreSubstStatement(Statement): + """An ``ignore sub`` statement, containing `one or more` contexts to ignore. + + ``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples, + with each of ``prefix``, ``glyphs`` and ``suffix`` being + `glyph-containing objects`_ .""" + + def __init__(self, chainContexts, location=None): + Statement.__init__(self, location) + self.chainContexts = chainContexts + + def build(self, builder): + """Calls the builder object's ``add_chain_context_subst`` callback on + each rule context.""" + for prefix, glyphs, suffix in self.chainContexts: + prefix = [p.glyphSet() for p in prefix] + glyphs = [g.glyphSet() for g in glyphs] + suffix = [s.glyphSet() for s in suffix] + builder.add_chain_context_subst(self.location, prefix, glyphs, suffix, []) + + def asFea(self, indent=""): + contexts = [] + for prefix, glyphs, suffix in self.chainContexts: + res = "" + if len(prefix) or len(suffix): + if len(prefix): + res += " ".join(map(asFea, prefix)) + " " + res += " ".join(g.asFea() + "'" for g in glyphs) + if len(suffix): + res += " " + " ".join(map(asFea, suffix)) + else: + res += " ".join(map(asFea, glyphs)) + contexts.append(res) + return "ignore sub " + ", ".join(contexts) + ";" + + +class IncludeStatement(Statement): + """An ``include()`` statement.""" + + def __init__(self, filename, location=None): + super(IncludeStatement, self).__init__(location) + self.filename = filename #: String containing name of file to include + + def build(self): + # TODO: consider lazy-loading the including parser/lexer? + raise FeatureLibError( + "Building an include statement is not implemented yet. " + "Instead, use Parser(..., followIncludes=True) for building.", + self.location, + ) + + def asFea(self, indent=""): + return indent + "include(%s);" % self.filename + + +class LanguageStatement(Statement): + """A ``language`` statement within a feature.""" + + def __init__(self, language, include_default=True, required=False, location=None): + Statement.__init__(self, location) + assert len(language) == 4 + self.language = language #: A four-character language tag + self.include_default = include_default #: If false, "exclude_dflt" + self.required = required + + def build(self, builder): + """Call the builder object's ``set_language`` callback.""" + builder.set_language( + location=self.location, + language=self.language, + include_default=self.include_default, + required=self.required, + ) + + def asFea(self, indent=""): + res = "language {}".format(self.language.strip()) + if not self.include_default: + res += " exclude_dflt" + if self.required: + res += " required" + res += ";" + return res + + +class LanguageSystemStatement(Statement): + """A top-level ``languagesystem`` statement.""" + + def __init__(self, script, language, location=None): + Statement.__init__(self, location) + self.script, self.language = (script, language) + + def build(self, builder): + """Calls the builder object's ``add_language_system`` callback.""" + builder.add_language_system(self.location, self.script, self.language) + + def asFea(self, indent=""): + return "languagesystem {} {};".format(self.script, self.language.strip()) + + +class FontRevisionStatement(Statement): + """A ``head`` table ``FontRevision`` statement. ``revision`` should be a + number, and will be formatted to three significant decimal places.""" + + def __init__(self, revision, location=None): + Statement.__init__(self, location) + self.revision = revision + + def build(self, builder): + builder.set_font_revision(self.location, self.revision) + + def asFea(self, indent=""): + return "FontRevision {:.3f};".format(self.revision) + + +class LigatureCaretByIndexStatement(Statement): + """A ``GDEF`` table ``LigatureCaretByIndex`` statement. ``glyphs`` should be + a `glyph-containing object`_, and ``carets`` should be a list of integers.""" + + def __init__(self, glyphs, carets, location=None): + Statement.__init__(self, location) + self.glyphs, self.carets = (glyphs, carets) + + def build(self, builder): + """Calls the builder object's ``add_ligatureCaretByIndex_`` callback.""" + glyphs = self.glyphs.glyphSet() + builder.add_ligatureCaretByIndex_(self.location, glyphs, set(self.carets)) + + def asFea(self, indent=""): + return "LigatureCaretByIndex {} {};".format( + self.glyphs.asFea(), " ".join(str(x) for x in self.carets) + ) + + +class LigatureCaretByPosStatement(Statement): + """A ``GDEF`` table ``LigatureCaretByPos`` statement. ``glyphs`` should be + a `glyph-containing object`_, and ``carets`` should be a list of integers.""" + + def __init__(self, glyphs, carets, location=None): + Statement.__init__(self, location) + self.glyphs, self.carets = (glyphs, carets) + + def build(self, builder): + """Calls the builder object's ``add_ligatureCaretByPos_`` callback.""" + glyphs = self.glyphs.glyphSet() + builder.add_ligatureCaretByPos_(self.location, glyphs, set(self.carets)) + + def asFea(self, indent=""): + return "LigatureCaretByPos {} {};".format( + self.glyphs.asFea(), " ".join(str(x) for x in self.carets) + ) + + +class LigatureSubstStatement(Statement): + """A chained contextual substitution statement. + + ``prefix``, ``glyphs``, and ``suffix`` should be lists of + `glyph-containing objects`_; ``replacement`` should be a single + `glyph-containing object`_. + + If ``forceChain`` is True, this is expressed as a chaining rule + (e.g. ``sub f' i' by f_i``) even when no context is given.""" + + def __init__(self, prefix, glyphs, suffix, replacement, forceChain, location=None): + Statement.__init__(self, location) + self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix) + self.replacement, self.forceChain = replacement, forceChain + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + glyphs = [g.glyphSet() for g in self.glyphs] + suffix = [s.glyphSet() for s in self.suffix] + builder.add_ligature_subst( + self.location, prefix, glyphs, suffix, self.replacement, self.forceChain + ) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(g.asFea() for g in self.prefix) + " " + res += " ".join(g.asFea() + "'" for g in self.glyphs) + if len(self.suffix): + res += " " + " ".join(g.asFea() for g in self.suffix) + else: + res += " ".join(g.asFea() for g in self.glyphs) + res += " by " + res += asFea(self.replacement) + res += ";" + return res + + +class LookupFlagStatement(Statement): + """A ``lookupflag`` statement. The ``value`` should be an integer value + representing the flags in use, but not including the ``markAttachment`` + class and ``markFilteringSet`` values, which must be specified as + glyph-containing objects.""" + + def __init__( + self, value=0, markAttachment=None, markFilteringSet=None, location=None + ): + Statement.__init__(self, location) + self.value = value + self.markAttachment = markAttachment + self.markFilteringSet = markFilteringSet + + def build(self, builder): + """Calls the builder object's ``set_lookup_flag`` callback.""" + markAttach = None + if self.markAttachment is not None: + markAttach = self.markAttachment.glyphSet() + markFilter = None + if self.markFilteringSet is not None: + markFilter = self.markFilteringSet.glyphSet() + builder.set_lookup_flag(self.location, self.value, markAttach, markFilter) + + def asFea(self, indent=""): + res = [] + flags = ["RightToLeft", "IgnoreBaseGlyphs", "IgnoreLigatures", "IgnoreMarks"] + curr = 1 + for i in range(len(flags)): + if self.value & curr != 0: + res.append(flags[i]) + curr = curr << 1 + if self.markAttachment is not None: + res.append("MarkAttachmentType {}".format(self.markAttachment.asFea())) + if self.markFilteringSet is not None: + res.append("UseMarkFilteringSet {}".format(self.markFilteringSet.asFea())) + if not res: + res = ["0"] + return "lookupflag {};".format(" ".join(res)) + + +class LookupReferenceStatement(Statement): + """Represents a ``lookup ...;`` statement to include a lookup in a feature. + + The ``lookup`` should be a :class:`LookupBlock` object.""" + + def __init__(self, lookup, location=None): + Statement.__init__(self, location) + self.location, self.lookup = (location, lookup) + + def build(self, builder): + """Calls the builder object's ``add_lookup_call`` callback.""" + builder.add_lookup_call(self.lookup.name) + + def asFea(self, indent=""): + return "lookup {};".format(self.lookup.name) + + +class MarkBasePosStatement(Statement): + """A mark-to-base positioning rule. The ``base`` should be a + `glyph-containing object`_. The ``marks`` should be a list of + (:class:`Anchor`, :class:`MarkClass`) tuples.""" + + def __init__(self, base, marks, location=None): + Statement.__init__(self, location) + self.base, self.marks = base, marks + + def build(self, builder): + """Calls the builder object's ``add_mark_base_pos`` callback.""" + builder.add_mark_base_pos(self.location, self.base.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos base {}".format(self.base.asFea()) + for a, m in self.marks: + res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name) + res += ";" + return res + + +class MarkLigPosStatement(Statement): + """A mark-to-ligature positioning rule. The ``ligatures`` must be a + `glyph-containing object`_. The ``marks`` should be a list of lists: each + element in the top-level list represents a component glyph, and is made + up of a list of (:class:`Anchor`, :class:`MarkClass`) tuples representing + mark attachment points for that position. + + Example:: + + m1 = MarkClass("TOP_MARKS") + m2 = MarkClass("BOTTOM_MARKS") + # ... add definitions to mark classes... + + glyph = GlyphName("lam_meem_jeem") + marks = [ + [ (Anchor(625,1800), m1) ], # Attachments on 1st component (lam) + [ (Anchor(376,-378), m2) ], # Attachments on 2nd component (meem) + [ ] # No attachments on the jeem + ] + mlp = MarkLigPosStatement(glyph, marks) + + mlp.asFea() + # pos ligature lam_meem_jeem mark @TOP_MARKS + # ligComponent mark @BOTTOM_MARKS; + + """ + + def __init__(self, ligatures, marks, location=None): + Statement.__init__(self, location) + self.ligatures, self.marks = ligatures, marks + + def build(self, builder): + """Calls the builder object's ``add_mark_lig_pos`` callback.""" + builder.add_mark_lig_pos(self.location, self.ligatures.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos ligature {}".format(self.ligatures.asFea()) + ligs = [] + for l in self.marks: + temp = "" + if l is None or not len(l): + temp = "\n" + indent + SHIFT * 2 + "" + else: + for a, m in l: + temp += ( + "\n" + + indent + + SHIFT * 2 + + "{} mark @{}".format(a.asFea(), m.name) + ) + ligs.append(temp) + res += ("\n" + indent + SHIFT + "ligComponent").join(ligs) + res += ";" + return res + + +class MarkMarkPosStatement(Statement): + """A mark-to-mark positioning rule. The ``baseMarks`` must be a + `glyph-containing object`_. The ``marks`` should be a list of + (:class:`Anchor`, :class:`MarkClass`) tuples.""" + + def __init__(self, baseMarks, marks, location=None): + Statement.__init__(self, location) + self.baseMarks, self.marks = baseMarks, marks + + def build(self, builder): + """Calls the builder object's ``add_mark_mark_pos`` callback.""" + builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos mark {}".format(self.baseMarks.asFea()) + for a, m in self.marks: + res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name) + res += ";" + return res + + +class MultipleSubstStatement(Statement): + """A multiple substitution statement. + + Args: + prefix: a list of `glyph-containing objects`_. + glyph: a single glyph-containing object. + suffix: a list of glyph-containing objects. + replacement: a list of glyph-containing objects. + forceChain: If true, the statement is expressed as a chaining rule + (e.g. ``sub f' i' by f_i``) even when no context is given. + """ + + def __init__( + self, prefix, glyph, suffix, replacement, forceChain=False, location=None + ): + Statement.__init__(self, location) + self.prefix, self.glyph, self.suffix = prefix, glyph, suffix + self.replacement = replacement + self.forceChain = forceChain + + def build(self, builder): + """Calls the builder object's ``add_multiple_subst`` callback.""" + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + if not self.replacement and hasattr(self.glyph, "glyphSet"): + for glyph in self.glyph.glyphSet(): + builder.add_multiple_subst( + self.location, + prefix, + glyph, + suffix, + self.replacement, + self.forceChain, + ) + else: + builder.add_multiple_subst( + self.location, + prefix, + self.glyph, + suffix, + self.replacement, + self.forceChain, + ) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += asFea(self.glyph) + "'" + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += asFea(self.glyph) + replacement = self.replacement or [NullGlyph()] + res += " by " + res += " ".join(map(asFea, replacement)) + res += ";" + return res + + +class PairPosStatement(Statement): + """A pair positioning statement. + + ``glyphs1`` and ``glyphs2`` should be `glyph-containing objects`_. + ``valuerecord1`` should be a :class:`ValueRecord` object; + ``valuerecord2`` should be either a :class:`ValueRecord` object or ``None``. + If ``enumerated`` is true, then this is expressed as an + `enumerated pair `_. + """ + + def __init__( + self, + glyphs1, + valuerecord1, + glyphs2, + valuerecord2, + enumerated=False, + location=None, + ): + Statement.__init__(self, location) + self.enumerated = enumerated + self.glyphs1, self.valuerecord1 = glyphs1, valuerecord1 + self.glyphs2, self.valuerecord2 = glyphs2, valuerecord2 + + def build(self, builder): + """Calls a callback on the builder object: + + * If the rule is enumerated, calls ``add_specific_pair_pos`` on each + combination of first and second glyphs. + * If the glyphs are both single :class:`GlyphName` objects, calls + ``add_specific_pair_pos``. + * Else, calls ``add_class_pair_pos``. + """ + if self.enumerated: + g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()] + seen_pair = False + for glyph1, glyph2 in itertools.product(*g): + seen_pair = True + builder.add_specific_pair_pos( + self.location, glyph1, self.valuerecord1, glyph2, self.valuerecord2 + ) + if not seen_pair: + raise FeatureLibError( + "Empty glyph class in positioning rule", self.location + ) + return + + is_specific = isinstance(self.glyphs1, GlyphName) and isinstance( + self.glyphs2, GlyphName + ) + if is_specific: + builder.add_specific_pair_pos( + self.location, + self.glyphs1.glyph, + self.valuerecord1, + self.glyphs2.glyph, + self.valuerecord2, + ) + else: + builder.add_class_pair_pos( + self.location, + self.glyphs1.glyphSet(), + self.valuerecord1, + self.glyphs2.glyphSet(), + self.valuerecord2, + ) + + def asFea(self, indent=""): + res = "enum " if self.enumerated else "" + if self.valuerecord2: + res += "pos {} {} {} {};".format( + self.glyphs1.asFea(), + self.valuerecord1.asFea(), + self.glyphs2.asFea(), + self.valuerecord2.asFea(), + ) + else: + res += "pos {} {} {};".format( + self.glyphs1.asFea(), self.glyphs2.asFea(), self.valuerecord1.asFea() + ) + return res + + +class ReverseChainSingleSubstStatement(Statement): + """A reverse chaining substitution statement. You don't see those every day. + + Note the unusual argument order: ``suffix`` comes `before` ``glyphs``. + ``old_prefix``, ``old_suffix``, ``glyphs`` and ``replacements`` should be + lists of `glyph-containing objects`_. ``glyphs`` and ``replacements`` should + be one-item lists. + """ + + def __init__(self, old_prefix, old_suffix, glyphs, replacements, location=None): + Statement.__init__(self, location) + self.old_prefix, self.old_suffix = old_prefix, old_suffix + self.glyphs = glyphs + self.replacements = replacements + + def build(self, builder): + prefix = [p.glyphSet() for p in self.old_prefix] + suffix = [s.glyphSet() for s in self.old_suffix] + originals = self.glyphs[0].glyphSet() + replaces = self.replacements[0].glyphSet() + if len(replaces) == 1: + replaces = replaces * len(originals) + builder.add_reverse_chain_single_subst( + self.location, prefix, suffix, dict(zip(originals, replaces)) + ) + + def asFea(self, indent=""): + res = "rsub " + if len(self.old_prefix) or len(self.old_suffix): + if len(self.old_prefix): + res += " ".join(asFea(g) for g in self.old_prefix) + " " + res += " ".join(asFea(g) + "'" for g in self.glyphs) + if len(self.old_suffix): + res += " " + " ".join(asFea(g) for g in self.old_suffix) + else: + res += " ".join(map(asFea, self.glyphs)) + res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) + return res + + +class SingleSubstStatement(Statement): + """A single substitution statement. + + Note the unusual argument order: ``prefix`` and suffix come `after` + the replacement ``glyphs``. ``prefix``, ``suffix``, ``glyphs`` and + ``replace`` should be lists of `glyph-containing objects`_. ``glyphs`` and + ``replace`` should be one-item lists. + """ + + def __init__(self, glyphs, replace, prefix, suffix, forceChain, location=None): + Statement.__init__(self, location) + self.prefix, self.suffix = prefix, suffix + self.forceChain = forceChain + self.glyphs = glyphs + self.replacements = replace + + def build(self, builder): + """Calls the builder object's ``add_single_subst`` callback.""" + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + originals = self.glyphs[0].glyphSet() + replaces = self.replacements[0].glyphSet() + if len(replaces) == 1: + replaces = replaces * len(originals) + builder.add_single_subst( + self.location, + prefix, + suffix, + OrderedDict(zip(originals, replaces)), + self.forceChain, + ) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(asFea(g) for g in self.prefix) + " " + res += " ".join(asFea(g) + "'" for g in self.glyphs) + if len(self.suffix): + res += " " + " ".join(asFea(g) for g in self.suffix) + else: + res += " ".join(asFea(g) for g in self.glyphs) + res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) + return res + + +class ScriptStatement(Statement): + """A ``script`` statement.""" + + def __init__(self, script, location=None): + Statement.__init__(self, location) + self.script = script #: the script code + + def build(self, builder): + """Calls the builder's ``set_script`` callback.""" + builder.set_script(self.location, self.script) + + def asFea(self, indent=""): + return "script {};".format(self.script.strip()) + + +class SinglePosStatement(Statement): + """A single position statement. ``prefix`` and ``suffix`` should be + lists of `glyph-containing objects`_. + + ``pos`` should be a one-element list containing a (`glyph-containing object`_, + :class:`ValueRecord`) tuple.""" + + def __init__(self, pos, prefix, suffix, forceChain, location=None): + Statement.__init__(self, location) + self.pos, self.prefix, self.suffix = pos, prefix, suffix + self.forceChain = forceChain + + def build(self, builder): + """Calls the builder object's ``add_single_pos`` callback.""" + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + pos = [(g.glyphSet(), value) for g, value in self.pos] + builder.add_single_pos(self.location, prefix, suffix, pos, self.forceChain) + + def asFea(self, indent=""): + res = "pos " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += " ".join( + [ + asFea(x[0]) + "'" + ((" " + x[1].asFea()) if x[1] else "") + for x in self.pos + ] + ) + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += " ".join( + [asFea(x[0]) + " " + (x[1].asFea() if x[1] else "") for x in self.pos] + ) + res += ";" + return res + + +class SubtableStatement(Statement): + """Represents a subtable break.""" + + def __init__(self, location=None): + Statement.__init__(self, location) + + def build(self, builder): + """Calls the builder objects's ``add_subtable_break`` callback.""" + builder.add_subtable_break(self.location) + + def asFea(self, indent=""): + return "subtable;" + + +class ValueRecord(Expression): + """Represents a value record.""" + + def __init__( + self, + xPlacement=None, + yPlacement=None, + xAdvance=None, + yAdvance=None, + xPlaDevice=None, + yPlaDevice=None, + xAdvDevice=None, + yAdvDevice=None, + vertical=False, + location=None, + ): + Expression.__init__(self, location) + self.xPlacement, self.yPlacement = (xPlacement, yPlacement) + self.xAdvance, self.yAdvance = (xAdvance, yAdvance) + self.xPlaDevice, self.yPlaDevice = (xPlaDevice, yPlaDevice) + self.xAdvDevice, self.yAdvDevice = (xAdvDevice, yAdvDevice) + self.vertical = vertical + + def __eq__(self, other): + return ( + self.xPlacement == other.xPlacement + and self.yPlacement == other.yPlacement + and self.xAdvance == other.xAdvance + and self.yAdvance == other.yAdvance + and self.xPlaDevice == other.xPlaDevice + and self.xAdvDevice == other.xAdvDevice + ) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return ( + hash(self.xPlacement) + ^ hash(self.yPlacement) + ^ hash(self.xAdvance) + ^ hash(self.yAdvance) + ^ hash(self.xPlaDevice) + ^ hash(self.yPlaDevice) + ^ hash(self.xAdvDevice) + ^ hash(self.yAdvDevice) + ) + + def asFea(self, indent=""): + if not self: + return "" + + x, y = self.xPlacement, self.yPlacement + xAdvance, yAdvance = self.xAdvance, self.yAdvance + xPlaDevice, yPlaDevice = self.xPlaDevice, self.yPlaDevice + xAdvDevice, yAdvDevice = self.xAdvDevice, self.yAdvDevice + vertical = self.vertical + + # Try format A, if possible. + if x is None and y is None: + if xAdvance is None and vertical: + return str(yAdvance) + elif yAdvance is None and not vertical: + return str(xAdvance) + + # Make any remaining None value 0 to avoid generating invalid records. + x = x or 0 + y = y or 0 + xAdvance = xAdvance or 0 + yAdvance = yAdvance or 0 + + # Try format B, if possible. + if ( + xPlaDevice is None + and yPlaDevice is None + and xAdvDevice is None + and yAdvDevice is None + ): + return "<%s %s %s %s>" % (x, y, xAdvance, yAdvance) + + # Last resort is format C. + return "<%s %s %s %s %s %s %s %s>" % ( + x, + y, + xAdvance, + yAdvance, + deviceToString(xPlaDevice), + deviceToString(yPlaDevice), + deviceToString(xAdvDevice), + deviceToString(yAdvDevice), + ) + + def __bool__(self): + return any( + getattr(self, v) is not None + for v in [ + "xPlacement", + "yPlacement", + "xAdvance", + "yAdvance", + "xPlaDevice", + "yPlaDevice", + "xAdvDevice", + "yAdvDevice", + ] + ) + + __nonzero__ = __bool__ + + +class ValueRecordDefinition(Statement): + """Represents a named value record definition.""" + + def __init__(self, name, value, location=None): + Statement.__init__(self, location) + self.name = name #: Value record name as string + self.value = value #: :class:`ValueRecord` object + + def asFea(self, indent=""): + return "valueRecordDef {} {};".format(self.value.asFea(), self.name) + + +def simplify_name_attributes(pid, eid, lid): + if pid == 3 and eid == 1 and lid == 1033: + return "" + elif pid == 1 and eid == 0 and lid == 0: + return "1" + else: + return "{} {} {}".format(pid, eid, lid) + + +class NameRecord(Statement): + """Represents a name record. (`Section 9.e. `_)""" + + def __init__(self, nameID, platformID, platEncID, langID, string, location=None): + Statement.__init__(self, location) + self.nameID = nameID #: Name ID as integer (e.g. 9 for designer's name) + self.platformID = platformID #: Platform ID as integer + self.platEncID = platEncID #: Platform encoding ID as integer + self.langID = langID #: Language ID as integer + self.string = string #: Name record value + + def build(self, builder): + """Calls the builder object's ``add_name_record`` callback.""" + builder.add_name_record( + self.location, + self.nameID, + self.platformID, + self.platEncID, + self.langID, + self.string, + ) + + def asFea(self, indent=""): + def escape(c, escape_pattern): + # Also escape U+0022 QUOTATION MARK and U+005C REVERSE SOLIDUS + if c >= 0x20 and c <= 0x7E and c not in (0x22, 0x5C): + return chr(c) + else: + return escape_pattern % c + + encoding = getEncoding(self.platformID, self.platEncID, self.langID) + if encoding is None: + raise FeatureLibError("Unsupported encoding", self.location) + s = tobytes(self.string, encoding=encoding) + if encoding == "utf_16_be": + escaped_string = "".join( + [ + escape(byteord(s[i]) * 256 + byteord(s[i + 1]), r"\%04x") + for i in range(0, len(s), 2) + ] + ) + else: + escaped_string = "".join([escape(byteord(b), r"\%02x") for b in s]) + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return 'nameid {} {}"{}";'.format(self.nameID, plat, escaped_string) + + +class FeatureNameStatement(NameRecord): + """Represents a ``sizemenuname`` or ``name`` statement.""" + + def build(self, builder): + """Calls the builder object's ``add_featureName`` callback.""" + NameRecord.build(self, builder) + builder.add_featureName(self.nameID) + + def asFea(self, indent=""): + if self.nameID == "size": + tag = "sizemenuname" + else: + tag = "name" + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return '{} {}"{}";'.format(tag, plat, self.string) + + +class STATNameStatement(NameRecord): + """Represents a STAT table ``name`` statement.""" + + def asFea(self, indent=""): + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return 'name {}"{}";'.format(plat, self.string) + + +class SizeParameters(Statement): + """A ``parameters`` statement.""" + + def __init__(self, DesignSize, SubfamilyID, RangeStart, RangeEnd, location=None): + Statement.__init__(self, location) + self.DesignSize = DesignSize + self.SubfamilyID = SubfamilyID + self.RangeStart = RangeStart + self.RangeEnd = RangeEnd + + def build(self, builder): + """Calls the builder object's ``set_size_parameters`` callback.""" + builder.set_size_parameters( + self.location, + self.DesignSize, + self.SubfamilyID, + self.RangeStart, + self.RangeEnd, + ) + + def asFea(self, indent=""): + res = "parameters {:.1f} {}".format(self.DesignSize, self.SubfamilyID) + if self.RangeStart != 0 or self.RangeEnd != 0: + res += " {} {}".format(int(self.RangeStart * 10), int(self.RangeEnd * 10)) + return res + ";" + + +class CVParametersNameStatement(NameRecord): + """Represent a name statement inside a ``cvParameters`` block.""" + + def __init__( + self, nameID, platformID, platEncID, langID, string, block_name, location=None + ): + NameRecord.__init__( + self, nameID, platformID, platEncID, langID, string, location=location + ) + self.block_name = block_name + + def build(self, builder): + """Calls the builder object's ``add_cv_parameter`` callback.""" + item = "" + if self.block_name == "ParamUILabelNameID": + item = "_{}".format(builder.cv_num_named_params_.get(self.nameID, 0)) + builder.add_cv_parameter(self.nameID) + self.nameID = (self.nameID, self.block_name + item) + NameRecord.build(self, builder) + + def asFea(self, indent=""): + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return 'name {}"{}";'.format(plat, self.string) + + +class CharacterStatement(Statement): + """ + Statement used in cvParameters blocks of Character Variant features (cvXX). + The Unicode value may be written with either decimal or hexadecimal + notation. The value must be preceded by '0x' if it is a hexadecimal value. + The largest Unicode value allowed is 0xFFFFFF. + """ + + def __init__(self, character, tag, location=None): + Statement.__init__(self, location) + self.character = character + self.tag = tag + + def build(self, builder): + """Calls the builder object's ``add_cv_character`` callback.""" + builder.add_cv_character(self.character, self.tag) + + def asFea(self, indent=""): + return "Character {:#x};".format(self.character) + + +class BaseAxis(Statement): + """An axis definition, being either a ``VertAxis.BaseTagList/BaseScriptList`` + pair or a ``HorizAxis.BaseTagList/BaseScriptList`` pair.""" + + def __init__(self, bases, scripts, vertical, location=None): + Statement.__init__(self, location) + self.bases = bases #: A list of baseline tag names as strings + self.scripts = scripts #: A list of script record tuplets (script tag, default baseline tag, base coordinate) + self.vertical = vertical #: Boolean; VertAxis if True, HorizAxis if False + + def build(self, builder): + """Calls the builder object's ``set_base_axis`` callback.""" + builder.set_base_axis(self.bases, self.scripts, self.vertical) + + def asFea(self, indent=""): + direction = "Vert" if self.vertical else "Horiz" + scripts = [ + "{} {} {}".format(a[0], a[1], " ".join(map(str, a[2]))) + for a in self.scripts + ] + return "{}Axis.BaseTagList {};\n{}{}Axis.BaseScriptList {};".format( + direction, " ".join(self.bases), indent, direction, ", ".join(scripts) + ) + + +class OS2Field(Statement): + """An entry in the ``OS/2`` table. Most ``values`` should be numbers or + strings, apart from when the key is ``UnicodeRange``, ``CodePageRange`` + or ``Panose``, in which case it should be an array of integers.""" + + def __init__(self, key, value, location=None): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + """Calls the builder object's ``add_os2_field`` callback.""" + builder.add_os2_field(self.key, self.value) + + def asFea(self, indent=""): + def intarr2str(x): + return " ".join(map(str, x)) + + numbers = ( + "FSType", + "TypoAscender", + "TypoDescender", + "TypoLineGap", + "winAscent", + "winDescent", + "XHeight", + "CapHeight", + "WeightClass", + "WidthClass", + "LowerOpSize", + "UpperOpSize", + ) + ranges = ("UnicodeRange", "CodePageRange") + keywords = dict([(x.lower(), [x, str]) for x in numbers]) + keywords.update([(x.lower(), [x, intarr2str]) for x in ranges]) + keywords["panose"] = ["Panose", intarr2str] + keywords["vendor"] = ["Vendor", lambda y: '"{}"'.format(y)] + if self.key in keywords: + return "{} {};".format( + keywords[self.key][0], keywords[self.key][1](self.value) + ) + return "" # should raise exception + + +class HheaField(Statement): + """An entry in the ``hhea`` table.""" + + def __init__(self, key, value, location=None): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + """Calls the builder object's ``add_hhea_field`` callback.""" + builder.add_hhea_field(self.key, self.value) + + def asFea(self, indent=""): + fields = ("CaretOffset", "Ascender", "Descender", "LineGap") + keywords = dict([(x.lower(), x) for x in fields]) + return "{} {};".format(keywords[self.key], self.value) + + +class VheaField(Statement): + """An entry in the ``vhea`` table.""" + + def __init__(self, key, value, location=None): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + """Calls the builder object's ``add_vhea_field`` callback.""" + builder.add_vhea_field(self.key, self.value) + + def asFea(self, indent=""): + fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") + keywords = dict([(x.lower(), x) for x in fields]) + return "{} {};".format(keywords[self.key], self.value) + + +class STATDesignAxisStatement(Statement): + """A STAT table Design Axis + + Args: + tag (str): a 4 letter axis tag + axisOrder (int): an int + names (list): a list of :class:`STATNameStatement` objects + """ + + def __init__(self, tag, axisOrder, names, location=None): + Statement.__init__(self, location) + self.tag = tag + self.axisOrder = axisOrder + self.names = names + self.location = location + + def build(self, builder): + builder.addDesignAxis(self, self.location) + + def asFea(self, indent=""): + indent += SHIFT + res = f"DesignAxis {self.tag} {self.axisOrder} {{ \n" + res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n" + res += "};" + return res + + +class ElidedFallbackName(Statement): + """STAT table ElidedFallbackName + + Args: + names: a list of :class:`STATNameStatement` objects + """ + + def __init__(self, names, location=None): + Statement.__init__(self, location) + self.names = names + self.location = location + + def build(self, builder): + builder.setElidedFallbackName(self.names, self.location) + + def asFea(self, indent=""): + indent += SHIFT + res = "ElidedFallbackName { \n" + res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n" + res += "};" + return res + + +class ElidedFallbackNameID(Statement): + """STAT table ElidedFallbackNameID + + Args: + value: an int pointing to an existing name table name ID + """ + + def __init__(self, value, location=None): + Statement.__init__(self, location) + self.value = value + self.location = location + + def build(self, builder): + builder.setElidedFallbackName(self.value, self.location) + + def asFea(self, indent=""): + return f"ElidedFallbackNameID {self.value};" + + +class STATAxisValueStatement(Statement): + """A STAT table Axis Value Record + + Args: + names (list): a list of :class:`STATNameStatement` objects + locations (list): a list of :class:`AxisValueLocationStatement` objects + flags (int): an int + """ + + def __init__(self, names, locations, flags, location=None): + Statement.__init__(self, location) + self.names = names + self.locations = locations + self.flags = flags + + def build(self, builder): + builder.addAxisValueRecord(self, self.location) + + def asFea(self, indent=""): + res = "AxisValue {\n" + for location in self.locations: + res += location.asFea() + + for nameRecord in self.names: + res += nameRecord.asFea() + res += "\n" + + if self.flags: + flags = ["OlderSiblingFontAttribute", "ElidableAxisValueName"] + flagStrings = [] + curr = 1 + for i in range(len(flags)): + if self.flags & curr != 0: + flagStrings.append(flags[i]) + curr = curr << 1 + res += f"flag {' '.join(flagStrings)};\n" + res += "};" + return res + + +class AxisValueLocationStatement(Statement): + """ + A STAT table Axis Value Location + + Args: + tag (str): a 4 letter axis tag + values (list): a list of ints and/or floats + """ + + def __init__(self, tag, values, location=None): + Statement.__init__(self, location) + self.tag = tag + self.values = values + + def asFea(self, res=""): + res += f"location {self.tag} " + res += f"{' '.join(str(i) for i in self.values)};\n" + return res + + +class ConditionsetStatement(Statement): + """ + A variable layout conditionset + + Args: + name (str): the name of this conditionset + conditions (dict): a dictionary mapping axis tags to a + tuple of (min,max) userspace coordinates. + """ + + def __init__(self, name, conditions, location=None): + Statement.__init__(self, location) + self.name = name + self.conditions = conditions + + def build(self, builder): + builder.add_conditionset(self.name, self.conditions) + + def asFea(self, res="", indent=""): + res += indent + f"conditionset {self.name} " + "{\n" + for tag, (minvalue, maxvalue) in self.conditions.items(): + res += indent + SHIFT + f"{tag} {minvalue} {maxvalue};\n" + res += indent + "}" + f" {self.name};\n" + return res + + +class VariationBlock(Block): + """A variation feature block, applicable in a given set of conditions.""" + + def __init__(self, name, conditionset, use_extension=False, location=None): + Block.__init__(self, location) + self.name, self.conditionset, self.use_extension = ( + name, + conditionset, + use_extension, + ) + + def build(self, builder): + """Call the ``start_feature`` callback on the builder object, visit + all the statements in this feature, and then call ``end_feature``.""" + builder.start_feature(self.location, self.name) + if ( + self.conditionset != "NULL" + and self.conditionset not in builder.conditionsets_ + ): + raise FeatureLibError( + f"variation block used undefined conditionset {self.conditionset}", + self.location, + ) + + # language exclude_dflt statements modify builder.features_ + # limit them to this block with temporary builder.features_ + features = builder.features_ + builder.features_ = {} + Block.build(self, builder) + for key, value in builder.features_.items(): + items = builder.feature_variations_.setdefault(key, {}).setdefault( + self.conditionset, [] + ) + items.extend(value) + if key not in features: + features[key] = [] # Ensure we make a feature record + builder.features_ = features + builder.end_feature() + + def asFea(self, indent=""): + res = indent + "variation %s " % self.name.strip() + res += self.conditionset + " " + if self.use_extension: + res += "useExtension " + res += "{\n" + res += Block.asFea(self, indent=indent) + res += indent + "} %s;\n" % self.name.strip() + return res diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/builder.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/builder.py new file mode 100644 index 00000000..a1644875 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/builder.py @@ -0,0 +1,1654 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import Tag, tostr, binary2num, safeEval +from fontTools.feaLib.error import FeatureLibError +from fontTools.feaLib.lookupDebugInfo import ( + LookupDebugInfo, + LOOKUP_DEBUG_INFO_KEY, + LOOKUP_DEBUG_ENV_VAR, +) +from fontTools.feaLib.parser import Parser +from fontTools.feaLib.ast import FeatureFile +from fontTools.feaLib.variableScalar import VariableScalar +from fontTools.otlLib import builder as otl +from fontTools.otlLib.maxContextCalc import maxCtxFont +from fontTools.ttLib import newTable, getTableModule +from fontTools.ttLib.tables import otBase, otTables +from fontTools.otlLib.builder import ( + AlternateSubstBuilder, + ChainContextPosBuilder, + ChainContextSubstBuilder, + LigatureSubstBuilder, + MultipleSubstBuilder, + CursivePosBuilder, + MarkBasePosBuilder, + MarkLigPosBuilder, + MarkMarkPosBuilder, + ReverseChainSingleSubstBuilder, + SingleSubstBuilder, + ClassPairPosSubtableBuilder, + PairPosBuilder, + SinglePosBuilder, + ChainContextualRule, +) +from fontTools.otlLib.error import OpenTypeLibError +from fontTools.varLib.varStore import OnlineVarStoreBuilder +from fontTools.varLib.builder import buildVarDevTable +from fontTools.varLib.featureVars import addFeatureVariationsRaw +from fontTools.varLib.models import normalizeValue +from collections import defaultdict +import itertools +from io import StringIO +import logging +import warnings +import os + + +log = logging.getLogger(__name__) + + +def addOpenTypeFeatures(font, featurefile, tables=None, debug=False): + """Add features from a file to a font. Note that this replaces any features + currently present. + + Args: + font (feaLib.ttLib.TTFont): The font object. + featurefile: Either a path or file object (in which case we + parse it into an AST), or a pre-parsed AST instance. + tables: If passed, restrict the set of affected tables to those in the + list. + debug: Whether to add source debugging information to the font in the + ``Debg`` table + + """ + builder = Builder(font, featurefile) + builder.build(tables=tables, debug=debug) + + +def addOpenTypeFeaturesFromString( + font, features, filename=None, tables=None, debug=False +): + """Add features from a string to a font. Note that this replaces any + features currently present. + + Args: + font (feaLib.ttLib.TTFont): The font object. + features: A string containing feature code. + filename: The directory containing ``filename`` is used as the root of + relative ``include()`` paths; if ``None`` is provided, the current + directory is assumed. + tables: If passed, restrict the set of affected tables to those in the + list. + debug: Whether to add source debugging information to the font in the + ``Debg`` table + + """ + + featurefile = StringIO(tostr(features)) + if filename: + featurefile.name = filename + addOpenTypeFeatures(font, featurefile, tables=tables, debug=debug) + + +class Builder(object): + + supportedTables = frozenset( + Tag(tag) + for tag in [ + "BASE", + "GDEF", + "GPOS", + "GSUB", + "OS/2", + "head", + "hhea", + "name", + "vhea", + "STAT", + ] + ) + + def __init__(self, font, featurefile): + self.font = font + # 'featurefile' can be either a path or file object (in which case we + # parse it into an AST), or a pre-parsed AST instance + if isinstance(featurefile, FeatureFile): + self.parseTree, self.file = featurefile, None + else: + self.parseTree, self.file = None, featurefile + self.glyphMap = font.getReverseGlyphMap() + self.varstorebuilder = None + if "fvar" in font: + self.axes = font["fvar"].axes + self.varstorebuilder = OnlineVarStoreBuilder( + [ax.axisTag for ax in self.axes] + ) + self.default_language_systems_ = set() + self.script_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + self.language_systems = set() + self.seen_non_DFLT_script_ = False + self.named_lookups_ = {} + self.cur_lookup_ = None + self.cur_lookup_name_ = None + self.cur_feature_name_ = None + self.lookups_ = [] + self.lookup_locations = {"GSUB": {}, "GPOS": {}} + self.features_ = {} # ('latn', 'DEU ', 'smcp') --> [LookupBuilder*] + self.required_features_ = {} # ('latn', 'DEU ') --> 'scmp' + self.feature_variations_ = {} + # for feature 'aalt' + self.aalt_features_ = [] # [(location, featureName)*], for 'aalt' + self.aalt_location_ = None + self.aalt_alternates_ = {} + # for 'featureNames' + self.featureNames_ = set() + self.featureNames_ids_ = {} + # for 'cvParameters' + self.cv_parameters_ = set() + self.cv_parameters_ids_ = {} + self.cv_num_named_params_ = {} + self.cv_characters_ = defaultdict(list) + # for feature 'size' + self.size_parameters_ = None + # for table 'head' + self.fontRevision_ = None # 2.71 + # for table 'name' + self.names_ = [] + # for table 'BASE' + self.base_horiz_axis_ = None + self.base_vert_axis_ = None + # for table 'GDEF' + self.attachPoints_ = {} # "a" --> {3, 7} + self.ligCaretCoords_ = {} # "f_f_i" --> {300, 600} + self.ligCaretPoints_ = {} # "f_f_i" --> {3, 7} + self.glyphClassDefs_ = {} # "fi" --> (2, (file, line, column)) + self.markAttach_ = {} # "acute" --> (4, (file, line, column)) + self.markAttachClassID_ = {} # frozenset({"acute", "grave"}) --> 4 + self.markFilterSets_ = {} # frozenset({"acute", "grave"}) --> 4 + # for table 'OS/2' + self.os2_ = {} + # for table 'hhea' + self.hhea_ = {} + # for table 'vhea' + self.vhea_ = {} + # for table 'STAT' + self.stat_ = {} + # for conditionsets + self.conditionsets_ = {} + + def build(self, tables=None, debug=False): + if self.parseTree is None: + self.parseTree = Parser(self.file, self.glyphMap).parse() + self.parseTree.build(self) + # by default, build all the supported tables + if tables is None: + tables = self.supportedTables + else: + tables = frozenset(tables) + unsupported = tables - self.supportedTables + if unsupported: + unsupported_string = ", ".join(sorted(unsupported)) + raise NotImplementedError( + "The following tables were requested but are unsupported: " + f"{unsupported_string}." + ) + if "GSUB" in tables: + self.build_feature_aalt_() + if "head" in tables: + self.build_head() + if "hhea" in tables: + self.build_hhea() + if "vhea" in tables: + self.build_vhea() + if "name" in tables: + self.build_name() + if "OS/2" in tables: + self.build_OS_2() + if "STAT" in tables: + self.build_STAT() + for tag in ("GPOS", "GSUB"): + if tag not in tables: + continue + table = self.makeTable(tag) + if self.feature_variations_: + self.makeFeatureVariations(table, tag) + if ( + table.ScriptList.ScriptCount > 0 + or table.FeatureList.FeatureCount > 0 + or table.LookupList.LookupCount > 0 + ): + fontTable = self.font[tag] = newTable(tag) + fontTable.table = table + elif tag in self.font: + del self.font[tag] + if any(tag in self.font for tag in ("GPOS", "GSUB")) and "OS/2" in self.font: + self.font["OS/2"].usMaxContext = maxCtxFont(self.font) + if "GDEF" in tables: + gdef = self.buildGDEF() + if gdef: + self.font["GDEF"] = gdef + elif "GDEF" in self.font: + del self.font["GDEF"] + elif self.varstorebuilder: + raise FeatureLibError("Must save GDEF when compiling a variable font") + if "BASE" in tables: + base = self.buildBASE() + if base: + self.font["BASE"] = base + elif "BASE" in self.font: + del self.font["BASE"] + if debug or os.environ.get(LOOKUP_DEBUG_ENV_VAR): + self.buildDebg() + + def get_chained_lookup_(self, location, builder_class): + result = builder_class(self.font, location) + result.lookupflag = self.lookupflag_ + result.markFilterSet = self.lookupflag_markFilterSet_ + self.lookups_.append(result) + return result + + def add_lookup_to_feature_(self, lookup, feature_name): + for script, lang in self.language_systems: + key = (script, lang, feature_name) + self.features_.setdefault(key, []).append(lookup) + + def get_lookup_(self, location, builder_class): + if ( + self.cur_lookup_ + and type(self.cur_lookup_) == builder_class + and self.cur_lookup_.lookupflag == self.lookupflag_ + and self.cur_lookup_.markFilterSet == self.lookupflag_markFilterSet_ + ): + return self.cur_lookup_ + if self.cur_lookup_name_ and self.cur_lookup_: + raise FeatureLibError( + "Within a named lookup block, all rules must be of " + "the same lookup type and flag", + location, + ) + self.cur_lookup_ = builder_class(self.font, location) + self.cur_lookup_.lookupflag = self.lookupflag_ + self.cur_lookup_.markFilterSet = self.lookupflag_markFilterSet_ + self.lookups_.append(self.cur_lookup_) + if self.cur_lookup_name_: + # We are starting a lookup rule inside a named lookup block. + self.named_lookups_[self.cur_lookup_name_] = self.cur_lookup_ + if self.cur_feature_name_: + # We are starting a lookup rule inside a feature. This includes + # lookup rules inside named lookups inside features. + self.add_lookup_to_feature_(self.cur_lookup_, self.cur_feature_name_) + return self.cur_lookup_ + + def build_feature_aalt_(self): + if not self.aalt_features_ and not self.aalt_alternates_: + return + alternates = {g: set(a) for g, a in self.aalt_alternates_.items()} + for location, name in self.aalt_features_ + [(None, "aalt")]: + feature = [ + (script, lang, feature, lookups) + for (script, lang, feature), lookups in self.features_.items() + if feature == name + ] + # "aalt" does not have to specify its own lookups, but it might. + if not feature and name != "aalt": + raise FeatureLibError( + "Feature %s has not been defined" % name, location + ) + for script, lang, feature, lookups in feature: + for lookuplist in lookups: + if not isinstance(lookuplist, list): + lookuplist = [lookuplist] + for lookup in lookuplist: + for glyph, alts in lookup.getAlternateGlyphs().items(): + alternates.setdefault(glyph, set()).update(alts) + single = { + glyph: list(repl)[0] for glyph, repl in alternates.items() if len(repl) == 1 + } + # TODO: Figure out the glyph alternate ordering used by makeotf. + # https://github.com/fonttools/fonttools/issues/836 + multi = { + glyph: sorted(repl, key=self.font.getGlyphID) + for glyph, repl in alternates.items() + if len(repl) > 1 + } + if not single and not multi: + return + self.features_ = { + (script, lang, feature): lookups + for (script, lang, feature), lookups in self.features_.items() + if feature != "aalt" + } + old_lookups = self.lookups_ + self.lookups_ = [] + self.start_feature(self.aalt_location_, "aalt") + if single: + single_lookup = self.get_lookup_(location, SingleSubstBuilder) + single_lookup.mapping = single + if multi: + multi_lookup = self.get_lookup_(location, AlternateSubstBuilder) + multi_lookup.alternates = multi + self.end_feature() + self.lookups_.extend(old_lookups) + + def build_head(self): + if not self.fontRevision_: + return + table = self.font.get("head") + if not table: # this only happens for unit tests + table = self.font["head"] = newTable("head") + table.decompile(b"\0" * 54, self.font) + table.tableVersion = 1.0 + table.created = table.modified = 3406620153 # 2011-12-13 11:22:33 + table.fontRevision = self.fontRevision_ + + def build_hhea(self): + if not self.hhea_: + return + table = self.font.get("hhea") + if not table: # this only happens for unit tests + table = self.font["hhea"] = newTable("hhea") + table.decompile(b"\0" * 36, self.font) + table.tableVersion = 0x00010000 + if "caretoffset" in self.hhea_: + table.caretOffset = self.hhea_["caretoffset"] + if "ascender" in self.hhea_: + table.ascent = self.hhea_["ascender"] + if "descender" in self.hhea_: + table.descent = self.hhea_["descender"] + if "linegap" in self.hhea_: + table.lineGap = self.hhea_["linegap"] + + def build_vhea(self): + if not self.vhea_: + return + table = self.font.get("vhea") + if not table: # this only happens for unit tests + table = self.font["vhea"] = newTable("vhea") + table.decompile(b"\0" * 36, self.font) + table.tableVersion = 0x00011000 + if "verttypoascender" in self.vhea_: + table.ascent = self.vhea_["verttypoascender"] + if "verttypodescender" in self.vhea_: + table.descent = self.vhea_["verttypodescender"] + if "verttypolinegap" in self.vhea_: + table.lineGap = self.vhea_["verttypolinegap"] + + def get_user_name_id(self, table): + # Try to find first unused font-specific name id + nameIDs = [name.nameID for name in table.names] + for user_name_id in range(256, 32767): + if user_name_id not in nameIDs: + return user_name_id + + def buildFeatureParams(self, tag): + params = None + if tag == "size": + params = otTables.FeatureParamsSize() + ( + params.DesignSize, + params.SubfamilyID, + params.RangeStart, + params.RangeEnd, + ) = self.size_parameters_ + if tag in self.featureNames_ids_: + params.SubfamilyNameID = self.featureNames_ids_[tag] + else: + params.SubfamilyNameID = 0 + elif tag in self.featureNames_: + if not self.featureNames_ids_: + # name table wasn't selected among the tables to build; skip + pass + else: + assert tag in self.featureNames_ids_ + params = otTables.FeatureParamsStylisticSet() + params.Version = 0 + params.UINameID = self.featureNames_ids_[tag] + elif tag in self.cv_parameters_: + params = otTables.FeatureParamsCharacterVariants() + params.Format = 0 + params.FeatUILabelNameID = self.cv_parameters_ids_.get( + (tag, "FeatUILabelNameID"), 0 + ) + params.FeatUITooltipTextNameID = self.cv_parameters_ids_.get( + (tag, "FeatUITooltipTextNameID"), 0 + ) + params.SampleTextNameID = self.cv_parameters_ids_.get( + (tag, "SampleTextNameID"), 0 + ) + params.NumNamedParameters = self.cv_num_named_params_.get(tag, 0) + params.FirstParamUILabelNameID = self.cv_parameters_ids_.get( + (tag, "ParamUILabelNameID_0"), 0 + ) + params.CharCount = len(self.cv_characters_[tag]) + params.Character = self.cv_characters_[tag] + return params + + def build_name(self): + if not self.names_: + return + table = self.font.get("name") + if not table: # this only happens for unit tests + table = self.font["name"] = newTable("name") + table.names = [] + for name in self.names_: + nameID, platformID, platEncID, langID, string = name + # For featureNames block, nameID is 'feature tag' + # For cvParameters blocks, nameID is ('feature tag', 'block name') + if not isinstance(nameID, int): + tag = nameID + if tag in self.featureNames_: + if tag not in self.featureNames_ids_: + self.featureNames_ids_[tag] = self.get_user_name_id(table) + assert self.featureNames_ids_[tag] is not None + nameID = self.featureNames_ids_[tag] + elif tag[0] in self.cv_parameters_: + if tag not in self.cv_parameters_ids_: + self.cv_parameters_ids_[tag] = self.get_user_name_id(table) + assert self.cv_parameters_ids_[tag] is not None + nameID = self.cv_parameters_ids_[tag] + table.setName(string, nameID, platformID, platEncID, langID) + + def build_OS_2(self): + if not self.os2_: + return + table = self.font.get("OS/2") + if not table: # this only happens for unit tests + table = self.font["OS/2"] = newTable("OS/2") + data = b"\0" * sstruct.calcsize(getTableModule("OS/2").OS2_format_0) + table.decompile(data, self.font) + version = 0 + if "fstype" in self.os2_: + table.fsType = self.os2_["fstype"] + if "panose" in self.os2_: + panose = getTableModule("OS/2").Panose() + ( + panose.bFamilyType, + panose.bSerifStyle, + panose.bWeight, + panose.bProportion, + panose.bContrast, + panose.bStrokeVariation, + panose.bArmStyle, + panose.bLetterForm, + panose.bMidline, + panose.bXHeight, + ) = self.os2_["panose"] + table.panose = panose + if "typoascender" in self.os2_: + table.sTypoAscender = self.os2_["typoascender"] + if "typodescender" in self.os2_: + table.sTypoDescender = self.os2_["typodescender"] + if "typolinegap" in self.os2_: + table.sTypoLineGap = self.os2_["typolinegap"] + if "winascent" in self.os2_: + table.usWinAscent = self.os2_["winascent"] + if "windescent" in self.os2_: + table.usWinDescent = self.os2_["windescent"] + if "vendor" in self.os2_: + table.achVendID = safeEval("'''" + self.os2_["vendor"] + "'''") + if "weightclass" in self.os2_: + table.usWeightClass = self.os2_["weightclass"] + if "widthclass" in self.os2_: + table.usWidthClass = self.os2_["widthclass"] + if "unicoderange" in self.os2_: + table.setUnicodeRanges(self.os2_["unicoderange"]) + if "codepagerange" in self.os2_: + pages = self.build_codepages_(self.os2_["codepagerange"]) + table.ulCodePageRange1, table.ulCodePageRange2 = pages + version = 1 + if "xheight" in self.os2_: + table.sxHeight = self.os2_["xheight"] + version = 2 + if "capheight" in self.os2_: + table.sCapHeight = self.os2_["capheight"] + version = 2 + if "loweropsize" in self.os2_: + table.usLowerOpticalPointSize = self.os2_["loweropsize"] + version = 5 + if "upperopsize" in self.os2_: + table.usUpperOpticalPointSize = self.os2_["upperopsize"] + version = 5 + + def checkattr(table, attrs): + for attr in attrs: + if not hasattr(table, attr): + setattr(table, attr, 0) + + table.version = max(version, table.version) + # this only happens for unit tests + if version >= 1: + checkattr(table, ("ulCodePageRange1", "ulCodePageRange2")) + if version >= 2: + checkattr( + table, + ( + "sxHeight", + "sCapHeight", + "usDefaultChar", + "usBreakChar", + "usMaxContext", + ), + ) + if version >= 5: + checkattr(table, ("usLowerOpticalPointSize", "usUpperOpticalPointSize")) + + def setElidedFallbackName(self, value, location): + # ElidedFallbackName is a convenience method for setting + # ElidedFallbackNameID so only one can be allowed + for token in ("ElidedFallbackName", "ElidedFallbackNameID"): + if token in self.stat_: + raise FeatureLibError( + f"{token} is already set.", + location, + ) + if isinstance(value, int): + self.stat_["ElidedFallbackNameID"] = value + elif isinstance(value, list): + self.stat_["ElidedFallbackName"] = value + else: + raise AssertionError(value) + + def addDesignAxis(self, designAxis, location): + if "DesignAxes" not in self.stat_: + self.stat_["DesignAxes"] = [] + if designAxis.tag in (r.tag for r in self.stat_["DesignAxes"]): + raise FeatureLibError( + f'DesignAxis already defined for tag "{designAxis.tag}".', + location, + ) + if designAxis.axisOrder in (r.axisOrder for r in self.stat_["DesignAxes"]): + raise FeatureLibError( + f"DesignAxis already defined for axis number {designAxis.axisOrder}.", + location, + ) + self.stat_["DesignAxes"].append(designAxis) + + def addAxisValueRecord(self, axisValueRecord, location): + if "AxisValueRecords" not in self.stat_: + self.stat_["AxisValueRecords"] = [] + # Check for duplicate AxisValueRecords + for record_ in self.stat_["AxisValueRecords"]: + if ( + {n.asFea() for n in record_.names} + == {n.asFea() for n in axisValueRecord.names} + and {n.asFea() for n in record_.locations} + == {n.asFea() for n in axisValueRecord.locations} + and record_.flags == axisValueRecord.flags + ): + raise FeatureLibError( + "An AxisValueRecord with these values is already defined.", + location, + ) + self.stat_["AxisValueRecords"].append(axisValueRecord) + + def build_STAT(self): + if not self.stat_: + return + + axes = self.stat_.get("DesignAxes") + if not axes: + raise FeatureLibError("DesignAxes not defined", None) + axisValueRecords = self.stat_.get("AxisValueRecords") + axisValues = {} + format4_locations = [] + for tag in axes: + axisValues[tag.tag] = [] + if axisValueRecords is not None: + for avr in axisValueRecords: + valuesDict = {} + if avr.flags > 0: + valuesDict["flags"] = avr.flags + if len(avr.locations) == 1: + location = avr.locations[0] + values = location.values + if len(values) == 1: # format1 + valuesDict.update({"value": values[0], "name": avr.names}) + if len(values) == 2: # format3 + valuesDict.update( + { + "value": values[0], + "linkedValue": values[1], + "name": avr.names, + } + ) + if len(values) == 3: # format2 + nominal, minVal, maxVal = values + valuesDict.update( + { + "nominalValue": nominal, + "rangeMinValue": minVal, + "rangeMaxValue": maxVal, + "name": avr.names, + } + ) + axisValues[location.tag].append(valuesDict) + else: + valuesDict.update( + { + "location": {i.tag: i.values[0] for i in avr.locations}, + "name": avr.names, + } + ) + format4_locations.append(valuesDict) + + designAxes = [ + { + "ordering": a.axisOrder, + "tag": a.tag, + "name": a.names, + "values": axisValues[a.tag], + } + for a in axes + ] + + nameTable = self.font.get("name") + if not nameTable: # this only happens for unit tests + nameTable = self.font["name"] = newTable("name") + nameTable.names = [] + + if "ElidedFallbackNameID" in self.stat_: + nameID = self.stat_["ElidedFallbackNameID"] + name = nameTable.getDebugName(nameID) + if not name: + raise FeatureLibError( + f"ElidedFallbackNameID {nameID} points " + "to a nameID that does not exist in the " + '"name" table', + None, + ) + elif "ElidedFallbackName" in self.stat_: + nameID = self.stat_["ElidedFallbackName"] + + otl.buildStatTable( + self.font, + designAxes, + locations=format4_locations, + elidedFallbackName=nameID, + ) + + def build_codepages_(self, pages): + pages2bits = { + 1252: 0, + 1250: 1, + 1251: 2, + 1253: 3, + 1254: 4, + 1255: 5, + 1256: 6, + 1257: 7, + 1258: 8, + 874: 16, + 932: 17, + 936: 18, + 949: 19, + 950: 20, + 1361: 21, + 869: 48, + 866: 49, + 865: 50, + 864: 51, + 863: 52, + 862: 53, + 861: 54, + 860: 55, + 857: 56, + 855: 57, + 852: 58, + 775: 59, + 737: 60, + 708: 61, + 850: 62, + 437: 63, + } + bits = [pages2bits[p] for p in pages if p in pages2bits] + pages = [] + for i in range(2): + pages.append("") + for j in range(i * 32, (i + 1) * 32): + if j in bits: + pages[i] += "1" + else: + pages[i] += "0" + return [binary2num(p[::-1]) for p in pages] + + def buildBASE(self): + if not self.base_horiz_axis_ and not self.base_vert_axis_: + return None + base = otTables.BASE() + base.Version = 0x00010000 + base.HorizAxis = self.buildBASEAxis(self.base_horiz_axis_) + base.VertAxis = self.buildBASEAxis(self.base_vert_axis_) + + result = newTable("BASE") + result.table = base + return result + + def buildBASEAxis(self, axis): + if not axis: + return + bases, scripts = axis + axis = otTables.Axis() + axis.BaseTagList = otTables.BaseTagList() + axis.BaseTagList.BaselineTag = bases + axis.BaseTagList.BaseTagCount = len(bases) + axis.BaseScriptList = otTables.BaseScriptList() + axis.BaseScriptList.BaseScriptRecord = [] + axis.BaseScriptList.BaseScriptCount = len(scripts) + for script in sorted(scripts): + record = otTables.BaseScriptRecord() + record.BaseScriptTag = script[0] + record.BaseScript = otTables.BaseScript() + record.BaseScript.BaseLangSysCount = 0 + record.BaseScript.BaseValues = otTables.BaseValues() + record.BaseScript.BaseValues.DefaultIndex = bases.index(script[1]) + record.BaseScript.BaseValues.BaseCoord = [] + record.BaseScript.BaseValues.BaseCoordCount = len(script[2]) + for c in script[2]: + coord = otTables.BaseCoord() + coord.Format = 1 + coord.Coordinate = c + record.BaseScript.BaseValues.BaseCoord.append(coord) + axis.BaseScriptList.BaseScriptRecord.append(record) + return axis + + def buildGDEF(self): + gdef = otTables.GDEF() + gdef.GlyphClassDef = self.buildGDEFGlyphClassDef_() + gdef.AttachList = otl.buildAttachList(self.attachPoints_, self.glyphMap) + gdef.LigCaretList = otl.buildLigCaretList( + self.ligCaretCoords_, self.ligCaretPoints_, self.glyphMap + ) + gdef.MarkAttachClassDef = self.buildGDEFMarkAttachClassDef_() + gdef.MarkGlyphSetsDef = self.buildGDEFMarkGlyphSetsDef_() + gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef else 0x00010000 + if self.varstorebuilder: + store = self.varstorebuilder.finish() + if store.VarData: + gdef.Version = 0x00010003 + gdef.VarStore = store + varidx_map = store.optimize() + + gdef.remap_device_varidxes(varidx_map) + if 'GPOS' in self.font: + self.font['GPOS'].table.remap_device_varidxes(varidx_map) + if any( + ( + gdef.GlyphClassDef, + gdef.AttachList, + gdef.LigCaretList, + gdef.MarkAttachClassDef, + gdef.MarkGlyphSetsDef, + ) + ) or hasattr(gdef, "VarStore"): + result = newTable("GDEF") + result.table = gdef + return result + else: + return None + + def buildGDEFGlyphClassDef_(self): + if self.glyphClassDefs_: + classes = {g: c for (g, (c, _)) in self.glyphClassDefs_.items()} + else: + classes = {} + for lookup in self.lookups_: + classes.update(lookup.inferGlyphClasses()) + for markClass in self.parseTree.markClasses.values(): + for markClassDef in markClass.definitions: + for glyph in markClassDef.glyphSet(): + classes[glyph] = 3 + if classes: + result = otTables.GlyphClassDef() + result.classDefs = classes + return result + else: + return None + + def buildGDEFMarkAttachClassDef_(self): + classDefs = {g: c for g, (c, _) in self.markAttach_.items()} + if not classDefs: + return None + result = otTables.MarkAttachClassDef() + result.classDefs = classDefs + return result + + def buildGDEFMarkGlyphSetsDef_(self): + sets = [] + for glyphs, id_ in sorted( + self.markFilterSets_.items(), key=lambda item: item[1] + ): + sets.append(glyphs) + return otl.buildMarkGlyphSetsDef(sets, self.glyphMap) + + def buildDebg(self): + if "Debg" not in self.font: + self.font["Debg"] = newTable("Debg") + self.font["Debg"].data = {} + self.font["Debg"].data[LOOKUP_DEBUG_INFO_KEY] = self.lookup_locations + + def buildLookups_(self, tag): + assert tag in ("GPOS", "GSUB"), tag + for lookup in self.lookups_: + lookup.lookup_index = None + lookups = [] + for lookup in self.lookups_: + if lookup.table != tag: + continue + lookup.lookup_index = len(lookups) + self.lookup_locations[tag][str(lookup.lookup_index)] = LookupDebugInfo( + location=str(lookup.location), + name=self.get_lookup_name_(lookup), + feature=None, + ) + lookups.append(lookup) + try: + otLookups = [l.build() for l in lookups] + except OpenTypeLibError as e: + raise FeatureLibError(str(e), e.location) from e + return otLookups + + def makeTable(self, tag): + table = getattr(otTables, tag, None)() + table.Version = 0x00010000 + table.ScriptList = otTables.ScriptList() + table.ScriptList.ScriptRecord = [] + table.FeatureList = otTables.FeatureList() + table.FeatureList.FeatureRecord = [] + table.LookupList = otTables.LookupList() + table.LookupList.Lookup = self.buildLookups_(tag) + + # Build a table for mapping (tag, lookup_indices) to feature_index. + # For example, ('liga', (2,3,7)) --> 23. + feature_indices = {} + required_feature_indices = {} # ('latn', 'DEU') --> 23 + scripts = {} # 'latn' --> {'DEU': [23, 24]} for feature #23,24 + # Sort the feature table by feature tag: + # https://github.com/fonttools/fonttools/issues/568 + sortFeatureTag = lambda f: (f[0][2], f[0][1], f[0][0], f[1]) + for key, lookups in sorted(self.features_.items(), key=sortFeatureTag): + script, lang, feature_tag = key + # l.lookup_index will be None when a lookup is not needed + # for the table under construction. For example, substitution + # rules will have no lookup_index while building GPOS tables. + lookup_indices = tuple( + [l.lookup_index for l in lookups if l.lookup_index is not None] + ) + + size_feature = tag == "GPOS" and feature_tag == "size" + force_feature = self.any_feature_variations(feature_tag, tag) + if len(lookup_indices) == 0 and not size_feature and not force_feature: + continue + + for ix in lookup_indices: + try: + self.lookup_locations[tag][str(ix)] = self.lookup_locations[tag][ + str(ix) + ]._replace(feature=key) + except KeyError: + warnings.warn( + "feaLib.Builder subclass needs upgrading to " + "stash debug information. See fonttools#2065." + ) + + feature_key = (feature_tag, lookup_indices) + feature_index = feature_indices.get(feature_key) + if feature_index is None: + feature_index = len(table.FeatureList.FeatureRecord) + frec = otTables.FeatureRecord() + frec.FeatureTag = feature_tag + frec.Feature = otTables.Feature() + frec.Feature.FeatureParams = self.buildFeatureParams(feature_tag) + frec.Feature.LookupListIndex = list(lookup_indices) + frec.Feature.LookupCount = len(lookup_indices) + table.FeatureList.FeatureRecord.append(frec) + feature_indices[feature_key] = feature_index + scripts.setdefault(script, {}).setdefault(lang, []).append(feature_index) + if self.required_features_.get((script, lang)) == feature_tag: + required_feature_indices[(script, lang)] = feature_index + + # Build ScriptList. + for script, lang_features in sorted(scripts.items()): + srec = otTables.ScriptRecord() + srec.ScriptTag = script + srec.Script = otTables.Script() + srec.Script.DefaultLangSys = None + srec.Script.LangSysRecord = [] + for lang, feature_indices in sorted(lang_features.items()): + langrec = otTables.LangSysRecord() + langrec.LangSys = otTables.LangSys() + langrec.LangSys.LookupOrder = None + + req_feature_index = required_feature_indices.get((script, lang)) + if req_feature_index is None: + langrec.LangSys.ReqFeatureIndex = 0xFFFF + else: + langrec.LangSys.ReqFeatureIndex = req_feature_index + + langrec.LangSys.FeatureIndex = [ + i for i in feature_indices if i != req_feature_index + ] + langrec.LangSys.FeatureCount = len(langrec.LangSys.FeatureIndex) + + if lang == "dflt": + srec.Script.DefaultLangSys = langrec.LangSys + else: + langrec.LangSysTag = lang + srec.Script.LangSysRecord.append(langrec) + srec.Script.LangSysCount = len(srec.Script.LangSysRecord) + table.ScriptList.ScriptRecord.append(srec) + + table.ScriptList.ScriptCount = len(table.ScriptList.ScriptRecord) + table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord) + table.LookupList.LookupCount = len(table.LookupList.Lookup) + return table + + def makeFeatureVariations(self, table, table_tag): + feature_vars = {} + has_any_variations = False + # Sort out which lookups to build, gather their indices + for ( + script_, + language, + feature_tag, + ), variations in self.feature_variations_.items(): + feature_vars[feature_tag] = [] + for conditionset, builders in variations.items(): + raw_conditionset = self.conditionsets_[conditionset] + indices = [] + for b in builders: + if b.table != table_tag: + continue + assert b.lookup_index is not None + indices.append(b.lookup_index) + has_any_variations = True + feature_vars[feature_tag].append((raw_conditionset, indices)) + + if has_any_variations: + for feature_tag, conditions_and_lookups in feature_vars.items(): + addFeatureVariationsRaw( + self.font, table, conditions_and_lookups, feature_tag + ) + + def any_feature_variations(self, feature_tag, table_tag): + for (_, _, feature), variations in self.feature_variations_.items(): + if feature != feature_tag: + continue + for conditionset, builders in variations.items(): + if any(b.table == table_tag for b in builders): + return True + return False + + def get_lookup_name_(self, lookup): + rev = {v: k for k, v in self.named_lookups_.items()} + if lookup in rev: + return rev[lookup] + return None + + def add_language_system(self, location, script, language): + # OpenType Feature File Specification, section 4.b.i + if script == "DFLT" and language == "dflt" and self.default_language_systems_: + raise FeatureLibError( + 'If "languagesystem DFLT dflt" is present, it must be ' + "the first of the languagesystem statements", + location, + ) + if script == "DFLT": + if self.seen_non_DFLT_script_: + raise FeatureLibError( + 'languagesystems using the "DFLT" script tag must ' + "precede all other languagesystems", + location, + ) + else: + self.seen_non_DFLT_script_ = True + if (script, language) in self.default_language_systems_: + raise FeatureLibError( + '"languagesystem %s %s" has already been specified' + % (script.strip(), language.strip()), + location, + ) + self.default_language_systems_.add((script, language)) + + def get_default_language_systems_(self): + # OpenType Feature File specification, 4.b.i. languagesystem: + # If no "languagesystem" statement is present, then the + # implementation must behave exactly as though the following + # statement were present at the beginning of the feature file: + # languagesystem DFLT dflt; + if self.default_language_systems_: + return frozenset(self.default_language_systems_) + else: + return frozenset({("DFLT", "dflt")}) + + def start_feature(self, location, name): + self.language_systems = self.get_default_language_systems_() + self.script_ = "DFLT" + self.cur_lookup_ = None + self.cur_feature_name_ = name + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + if name == "aalt": + self.aalt_location_ = location + + def end_feature(self): + assert self.cur_feature_name_ is not None + self.cur_feature_name_ = None + self.language_systems = None + self.cur_lookup_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def start_lookup_block(self, location, name): + if name in self.named_lookups_: + raise FeatureLibError( + 'Lookup "%s" has already been defined' % name, location + ) + if self.cur_feature_name_ == "aalt": + raise FeatureLibError( + "Lookup blocks cannot be placed inside 'aalt' features; " + "move it out, and then refer to it with a lookup statement", + location, + ) + self.cur_lookup_name_ = name + self.named_lookups_[name] = None + self.cur_lookup_ = None + if self.cur_feature_name_ is None: + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def end_lookup_block(self): + assert self.cur_lookup_name_ is not None + self.cur_lookup_name_ = None + self.cur_lookup_ = None + if self.cur_feature_name_ is None: + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def add_lookup_call(self, lookup_name): + assert lookup_name in self.named_lookups_, lookup_name + self.cur_lookup_ = None + lookup = self.named_lookups_[lookup_name] + if lookup is not None: # skip empty named lookup + self.add_lookup_to_feature_(lookup, self.cur_feature_name_) + + def set_font_revision(self, location, revision): + self.fontRevision_ = revision + + def set_language(self, location, language, include_default, required): + assert len(language) == 4 + if self.cur_feature_name_ in ("aalt", "size"): + raise FeatureLibError( + "Language statements are not allowed " + 'within "feature %s"' % self.cur_feature_name_, + location, + ) + if self.cur_feature_name_ is None: + raise FeatureLibError( + "Language statements are not allowed " + "within standalone lookup blocks", + location, + ) + self.cur_lookup_ = None + + key = (self.script_, language, self.cur_feature_name_) + lookups = self.features_.get((key[0], "dflt", key[2])) + if (language == "dflt" or include_default) and lookups: + self.features_[key] = lookups[:] + else: + self.features_[key] = [] + self.language_systems = frozenset([(self.script_, language)]) + + if required: + key = (self.script_, language) + if key in self.required_features_: + raise FeatureLibError( + "Language %s (script %s) has already " + "specified feature %s as its required feature" + % ( + language.strip(), + self.script_.strip(), + self.required_features_[key].strip(), + ), + location, + ) + self.required_features_[key] = self.cur_feature_name_ + + def getMarkAttachClass_(self, location, glyphs): + glyphs = frozenset(glyphs) + id_ = self.markAttachClassID_.get(glyphs) + if id_ is not None: + return id_ + id_ = len(self.markAttachClassID_) + 1 + self.markAttachClassID_[glyphs] = id_ + for glyph in glyphs: + if glyph in self.markAttach_: + _, loc = self.markAttach_[glyph] + raise FeatureLibError( + "Glyph %s already has been assigned " + "a MarkAttachmentType at %s" % (glyph, loc), + location, + ) + self.markAttach_[glyph] = (id_, location) + return id_ + + def getMarkFilterSet_(self, location, glyphs): + glyphs = frozenset(glyphs) + id_ = self.markFilterSets_.get(glyphs) + if id_ is not None: + return id_ + id_ = len(self.markFilterSets_) + self.markFilterSets_[glyphs] = id_ + return id_ + + def set_lookup_flag(self, location, value, markAttach, markFilter): + value = value & 0xFF + if markAttach: + markAttachClass = self.getMarkAttachClass_(location, markAttach) + value = value | (markAttachClass << 8) + if markFilter: + markFilterSet = self.getMarkFilterSet_(location, markFilter) + value = value | 0x10 + self.lookupflag_markFilterSet_ = markFilterSet + else: + self.lookupflag_markFilterSet_ = None + self.lookupflag_ = value + + def set_script(self, location, script): + if self.cur_feature_name_ in ("aalt", "size"): + raise FeatureLibError( + "Script statements are not allowed " + 'within "feature %s"' % self.cur_feature_name_, + location, + ) + if self.cur_feature_name_ is None: + raise FeatureLibError( + "Script statements are not allowed " "within standalone lookup blocks", + location, + ) + if self.language_systems == {(script, "dflt")}: + # Nothing to do. + return + self.cur_lookup_ = None + self.script_ = script + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + self.set_language(location, "dflt", include_default=True, required=False) + + def find_lookup_builders_(self, lookups): + """Helper for building chain contextual substitutions + + Given a list of lookup names, finds the LookupBuilder for each name. + If an input name is None, it gets mapped to a None LookupBuilder. + """ + lookup_builders = [] + for lookuplist in lookups: + if lookuplist is not None: + lookup_builders.append( + [self.named_lookups_.get(l.name) for l in lookuplist] + ) + else: + lookup_builders.append(None) + return lookup_builders + + def add_attach_points(self, location, glyphs, contourPoints): + for glyph in glyphs: + self.attachPoints_.setdefault(glyph, set()).update(contourPoints) + + def add_feature_reference(self, location, featureName): + if self.cur_feature_name_ != "aalt": + raise FeatureLibError( + 'Feature references are only allowed inside "feature aalt"', location + ) + self.aalt_features_.append((location, featureName)) + + def add_featureName(self, tag): + self.featureNames_.add(tag) + + def add_cv_parameter(self, tag): + self.cv_parameters_.add(tag) + + def add_to_cv_num_named_params(self, tag): + """Adds new items to ``self.cv_num_named_params_`` + or increments the count of existing items.""" + if tag in self.cv_num_named_params_: + self.cv_num_named_params_[tag] += 1 + else: + self.cv_num_named_params_[tag] = 1 + + def add_cv_character(self, character, tag): + self.cv_characters_[tag].append(character) + + def set_base_axis(self, bases, scripts, vertical): + if vertical: + self.base_vert_axis_ = (bases, scripts) + else: + self.base_horiz_axis_ = (bases, scripts) + + def set_size_parameters( + self, location, DesignSize, SubfamilyID, RangeStart, RangeEnd + ): + if self.cur_feature_name_ != "size": + raise FeatureLibError( + "Parameters statements are not allowed " + 'within "feature %s"' % self.cur_feature_name_, + location, + ) + self.size_parameters_ = [DesignSize, SubfamilyID, RangeStart, RangeEnd] + for script, lang in self.language_systems: + key = (script, lang, self.cur_feature_name_) + self.features_.setdefault(key, []) + + # GSUB rules + + # GSUB 1 + def add_single_subst(self, location, prefix, suffix, mapping, forceChain): + if self.cur_feature_name_ == "aalt": + for (from_glyph, to_glyph) in mapping.items(): + alts = self.aalt_alternates_.setdefault(from_glyph, set()) + alts.add(to_glyph) + return + if prefix or suffix or forceChain: + self.add_single_subst_chained_(location, prefix, suffix, mapping) + return + lookup = self.get_lookup_(location, SingleSubstBuilder) + for (from_glyph, to_glyph) in mapping.items(): + if from_glyph in lookup.mapping: + if to_glyph == lookup.mapping[from_glyph]: + log.info( + "Removing duplicate single substitution from glyph" + ' "%s" to "%s" at %s', + from_glyph, + to_glyph, + location, + ) + else: + raise FeatureLibError( + 'Already defined rule for replacing glyph "%s" by "%s"' + % (from_glyph, lookup.mapping[from_glyph]), + location, + ) + lookup.mapping[from_glyph] = to_glyph + + # GSUB 2 + def add_multiple_subst( + self, location, prefix, glyph, suffix, replacements, forceChain=False + ): + if prefix or suffix or forceChain: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + sub = self.get_chained_lookup_(location, MultipleSubstBuilder) + sub.mapping[glyph] = replacements + chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [sub])) + return + lookup = self.get_lookup_(location, MultipleSubstBuilder) + if glyph in lookup.mapping: + if replacements == lookup.mapping[glyph]: + log.info( + "Removing duplicate multiple substitution from glyph" + ' "%s" to %s%s', + glyph, + replacements, + f" at {location}" if location else "", + ) + else: + raise FeatureLibError( + 'Already defined substitution for glyph "%s"' % glyph, location + ) + lookup.mapping[glyph] = replacements + + # GSUB 3 + def add_alternate_subst(self, location, prefix, glyph, suffix, replacement): + if self.cur_feature_name_ == "aalt": + alts = self.aalt_alternates_.setdefault(glyph, set()) + alts.update(replacement) + return + if prefix or suffix: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + lookup = self.get_chained_lookup_(location, AlternateSubstBuilder) + chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [lookup])) + else: + lookup = self.get_lookup_(location, AlternateSubstBuilder) + if glyph in lookup.alternates: + raise FeatureLibError( + 'Already defined alternates for glyph "%s"' % glyph, location + ) + # We allow empty replacement glyphs here. + lookup.alternates[glyph] = replacement + + # GSUB 4 + def add_ligature_subst( + self, location, prefix, glyphs, suffix, replacement, forceChain + ): + if prefix or suffix or forceChain: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + lookup = self.get_chained_lookup_(location, LigatureSubstBuilder) + chain.rules.append(ChainContextualRule(prefix, glyphs, suffix, [lookup])) + else: + lookup = self.get_lookup_(location, LigatureSubstBuilder) + + if not all(glyphs): + raise FeatureLibError("Empty glyph class in substitution", location) + + # OpenType feature file syntax, section 5.d, "Ligature substitution": + # "Since the OpenType specification does not allow ligature + # substitutions to be specified on target sequences that contain + # glyph classes, the implementation software will enumerate + # all specific glyph sequences if glyph classes are detected" + for g in sorted(itertools.product(*glyphs)): + lookup.ligatures[g] = replacement + + # GSUB 5/6 + def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups): + if not all(glyphs) or not all(prefix) or not all(suffix): + raise FeatureLibError("Empty glyph class in contextual substitution", location) + lookup = self.get_lookup_(location, ChainContextSubstBuilder) + lookup.rules.append( + ChainContextualRule( + prefix, glyphs, suffix, self.find_lookup_builders_(lookups) + ) + ) + + def add_single_subst_chained_(self, location, prefix, suffix, mapping): + if not mapping or not all(prefix) or not all(suffix): + raise FeatureLibError("Empty glyph class in contextual substitution", location) + # https://github.com/fonttools/fonttools/issues/512 + chain = self.get_lookup_(location, ChainContextSubstBuilder) + sub = chain.find_chainable_single_subst(set(mapping.keys())) + if sub is None: + sub = self.get_chained_lookup_(location, SingleSubstBuilder) + sub.mapping.update(mapping) + chain.rules.append( + ChainContextualRule(prefix, [list(mapping.keys())], suffix, [sub]) + ) + + # GSUB 8 + def add_reverse_chain_single_subst(self, location, old_prefix, old_suffix, mapping): + if not mapping: + raise FeatureLibError("Empty glyph class in substitution", location) + lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder) + lookup.rules.append((old_prefix, old_suffix, mapping)) + + # GPOS rules + + # GPOS 1 + def add_single_pos(self, location, prefix, suffix, pos, forceChain): + if prefix or suffix or forceChain: + self.add_single_pos_chained_(location, prefix, suffix, pos) + else: + lookup = self.get_lookup_(location, SinglePosBuilder) + for glyphs, value in pos: + if not glyphs: + raise FeatureLibError("Empty glyph class in positioning rule", location) + otValueRecord = self.makeOpenTypeValueRecord(location, value, pairPosContext=False) + for glyph in glyphs: + try: + lookup.add_pos(location, glyph, otValueRecord) + except OpenTypeLibError as e: + raise FeatureLibError(str(e), e.location) from e + + # GPOS 2 + def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2): + if not glyphclass1 or not glyphclass2: + raise FeatureLibError( + "Empty glyph class in positioning rule", location + ) + lookup = self.get_lookup_(location, PairPosBuilder) + v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True) + v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True) + lookup.addClassPair(location, glyphclass1, v1, glyphclass2, v2) + + def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2): + if not glyph1 or not glyph2: + raise FeatureLibError("Empty glyph class in positioning rule", location) + lookup = self.get_lookup_(location, PairPosBuilder) + v1 = self.makeOpenTypeValueRecord(location, value1, pairPosContext=True) + v2 = self.makeOpenTypeValueRecord(location, value2, pairPosContext=True) + lookup.addGlyphPair(location, glyph1, v1, glyph2, v2) + + # GPOS 3 + def add_cursive_pos(self, location, glyphclass, entryAnchor, exitAnchor): + if not glyphclass: + raise FeatureLibError("Empty glyph class in positioning rule", location) + lookup = self.get_lookup_(location, CursivePosBuilder) + lookup.add_attachment( + location, + glyphclass, + self.makeOpenTypeAnchor(location, entryAnchor), + self.makeOpenTypeAnchor(location, exitAnchor), + ) + + # GPOS 4 + def add_mark_base_pos(self, location, bases, marks): + builder = self.get_lookup_(location, MarkBasePosBuilder) + self.add_marks_(location, builder, marks) + if not bases: + raise FeatureLibError("Empty glyph class in positioning rule", location) + for baseAnchor, markClass in marks: + otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor) + for base in bases: + builder.bases.setdefault(base, {})[markClass.name] = otBaseAnchor + + # GPOS 5 + def add_mark_lig_pos(self, location, ligatures, components): + builder = self.get_lookup_(location, MarkLigPosBuilder) + componentAnchors = [] + if not ligatures: + raise FeatureLibError("Empty glyph class in positioning rule", location) + for marks in components: + anchors = {} + self.add_marks_(location, builder, marks) + for ligAnchor, markClass in marks: + anchors[markClass.name] = self.makeOpenTypeAnchor(location, ligAnchor) + componentAnchors.append(anchors) + for glyph in ligatures: + builder.ligatures[glyph] = componentAnchors + + # GPOS 6 + def add_mark_mark_pos(self, location, baseMarks, marks): + builder = self.get_lookup_(location, MarkMarkPosBuilder) + self.add_marks_(location, builder, marks) + if not baseMarks: + raise FeatureLibError("Empty glyph class in positioning rule", location) + for baseAnchor, markClass in marks: + otBaseAnchor = self.makeOpenTypeAnchor(location, baseAnchor) + for baseMark in baseMarks: + builder.baseMarks.setdefault(baseMark, {})[ + markClass.name + ] = otBaseAnchor + + # GPOS 7/8 + def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups): + if not all(glyphs) or not all(prefix) or not all(suffix): + raise FeatureLibError("Empty glyph class in contextual positioning rule", location) + lookup = self.get_lookup_(location, ChainContextPosBuilder) + lookup.rules.append( + ChainContextualRule( + prefix, glyphs, suffix, self.find_lookup_builders_(lookups) + ) + ) + + def add_single_pos_chained_(self, location, prefix, suffix, pos): + if not pos or not all(prefix) or not all(suffix): + raise FeatureLibError("Empty glyph class in contextual positioning rule", location) + # https://github.com/fonttools/fonttools/issues/514 + chain = self.get_lookup_(location, ChainContextPosBuilder) + targets = [] + for _, _, _, lookups in chain.rules: + targets.extend(lookups) + subs = [] + for glyphs, value in pos: + if value is None: + subs.append(None) + continue + otValue = self.makeOpenTypeValueRecord(location, value, pairPosContext=False) + sub = chain.find_chainable_single_pos(targets, glyphs, otValue) + if sub is None: + sub = self.get_chained_lookup_(location, SinglePosBuilder) + targets.append(sub) + for glyph in glyphs: + sub.add_pos(location, glyph, otValue) + subs.append(sub) + assert len(pos) == len(subs), (pos, subs) + chain.rules.append( + ChainContextualRule(prefix, [g for g, v in pos], suffix, subs) + ) + + def add_marks_(self, location, lookupBuilder, marks): + """Helper for add_mark_{base,liga,mark}_pos.""" + for _, markClass in marks: + for markClassDef in markClass.definitions: + for mark in markClassDef.glyphs.glyphSet(): + if mark not in lookupBuilder.marks: + otMarkAnchor = self.makeOpenTypeAnchor(location, markClassDef.anchor) + lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor) + else: + existingMarkClass = lookupBuilder.marks[mark][0] + if markClass.name != existingMarkClass: + raise FeatureLibError( + "Glyph %s cannot be in both @%s and @%s" + % (mark, existingMarkClass, markClass.name), + location, + ) + + def add_subtable_break(self, location): + self.cur_lookup_.add_subtable_break(location) + + def setGlyphClass_(self, location, glyph, glyphClass): + oldClass, oldLocation = self.glyphClassDefs_.get(glyph, (None, None)) + if oldClass and oldClass != glyphClass: + raise FeatureLibError( + "Glyph %s was assigned to a different class at %s" + % (glyph, oldLocation), + location, + ) + self.glyphClassDefs_[glyph] = (glyphClass, location) + + def add_glyphClassDef( + self, location, baseGlyphs, ligatureGlyphs, markGlyphs, componentGlyphs + ): + for glyph in baseGlyphs: + self.setGlyphClass_(location, glyph, 1) + for glyph in ligatureGlyphs: + self.setGlyphClass_(location, glyph, 2) + for glyph in markGlyphs: + self.setGlyphClass_(location, glyph, 3) + for glyph in componentGlyphs: + self.setGlyphClass_(location, glyph, 4) + + def add_ligatureCaretByIndex_(self, location, glyphs, carets): + for glyph in glyphs: + if glyph not in self.ligCaretPoints_: + self.ligCaretPoints_[glyph] = carets + + def add_ligatureCaretByPos_(self, location, glyphs, carets): + for glyph in glyphs: + if glyph not in self.ligCaretCoords_: + self.ligCaretCoords_[glyph] = carets + + def add_name_record(self, location, nameID, platformID, platEncID, langID, string): + self.names_.append([nameID, platformID, platEncID, langID, string]) + + def add_os2_field(self, key, value): + self.os2_[key] = value + + def add_hhea_field(self, key, value): + self.hhea_[key] = value + + def add_vhea_field(self, key, value): + self.vhea_[key] = value + + def add_conditionset(self, key, value): + if not "fvar" in self.font: + raise FeatureLibError( + "Cannot add feature variations to a font without an 'fvar' table" + ) + + # Normalize + axisMap = { + axis.axisTag: (axis.minValue, axis.defaultValue, axis.maxValue) + for axis in self.axes + } + + value = { + tag: ( + normalizeValue(bottom, axisMap[tag]), + normalizeValue(top, axisMap[tag]), + ) + for tag, (bottom, top) in value.items() + } + + self.conditionsets_[key] = value + + def makeOpenTypeAnchor(self, location, anchor): + """ast.Anchor --> otTables.Anchor""" + if anchor is None: + return None + variable = False + deviceX, deviceY = None, None + if anchor.xDeviceTable is not None: + deviceX = otl.buildDevice(dict(anchor.xDeviceTable)) + if anchor.yDeviceTable is not None: + deviceY = otl.buildDevice(dict(anchor.yDeviceTable)) + for dim in ("x", "y"): + if not isinstance(getattr(anchor, dim), VariableScalar): + continue + if getattr(anchor, dim+"DeviceTable") is not None: + raise FeatureLibError("Can't define a device coordinate and variable scalar", location) + if not self.varstorebuilder: + raise FeatureLibError("Can't define a variable scalar in a non-variable font", location) + varscalar = getattr(anchor,dim) + varscalar.axes = self.axes + default, index = varscalar.add_to_variation_store(self.varstorebuilder) + setattr(anchor, dim, default) + if index is not None and index != 0xFFFFFFFF: + if dim == "x": + deviceX = buildVarDevTable(index) + else: + deviceY = buildVarDevTable(index) + variable = True + + otlanchor = otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY) + if variable: + otlanchor.Format = 3 + return otlanchor + + _VALUEREC_ATTRS = { + name[0].lower() + name[1:]: (name, isDevice) + for _, name, isDevice, _ in otBase.valueRecordFormat + if not name.startswith("Reserved") + } + + + def makeOpenTypeValueRecord(self, location, v, pairPosContext): + """ast.ValueRecord --> otBase.ValueRecord""" + if not v: + return None + + vr = {} + variable = False + for astName, (otName, isDevice) in self._VALUEREC_ATTRS.items(): + val = getattr(v, astName, None) + if not val: + continue + if isDevice: + vr[otName] = otl.buildDevice(dict(val)) + elif isinstance(val, VariableScalar): + otDeviceName = otName[0:4] + "Device" + feaDeviceName = otDeviceName[0].lower() + otDeviceName[1:] + if getattr(v, feaDeviceName): + raise FeatureLibError("Can't define a device coordinate and variable scalar", location) + if not self.varstorebuilder: + raise FeatureLibError("Can't define a variable scalar in a non-variable font", location) + val.axes = self.axes + default, index = val.add_to_variation_store(self.varstorebuilder) + vr[otName] = default + if index is not None and index != 0xFFFFFFFF: + vr[otDeviceName] = buildVarDevTable(index) + variable = True + else: + vr[otName] = val + + if pairPosContext and not vr: + vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0} + valRec = otl.buildValue(vr) + return valRec diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/error.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/error.py new file mode 100644 index 00000000..a2c5f9db --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/error.py @@ -0,0 +1,22 @@ +class FeatureLibError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + return f"{self.location}: {message}" + else: + return message + + +class IncludedFeaNotFound(FeatureLibError): + def __str__(self): + assert self.location is not None + + message = ( + "The following feature file should be included but cannot be found: " + f"{Exception.__str__(self)}" + ) + return f"{self.location}: {message}" diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/lexer.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/lexer.py new file mode 100644 index 00000000..140fbd82 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/lexer.py @@ -0,0 +1,285 @@ +from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound +from fontTools.feaLib.location import FeatureLibLocation +import re +import os + + +class Lexer(object): + NUMBER = "NUMBER" + HEXADECIMAL = "HEXADECIMAL" + OCTAL = "OCTAL" + NUMBERS = (NUMBER, HEXADECIMAL, OCTAL) + FLOAT = "FLOAT" + STRING = "STRING" + NAME = "NAME" + FILENAME = "FILENAME" + GLYPHCLASS = "GLYPHCLASS" + CID = "CID" + SYMBOL = "SYMBOL" + COMMENT = "COMMENT" + NEWLINE = "NEWLINE" + ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" + + CHAR_WHITESPACE_ = " \t" + CHAR_NEWLINE_ = "\r\n" + CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" + CHAR_DIGIT_ = "0123456789" + CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" + CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" + CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" + + RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$") + + MODE_NORMAL_ = "NORMAL" + MODE_FILENAME_ = "FILENAME" + + def __init__(self, text, filename): + self.filename_ = filename + self.line_ = 1 + self.pos_ = 0 + self.line_start_ = 0 + self.text_ = text + self.text_length_ = len(text) + self.mode_ = Lexer.MODE_NORMAL_ + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while True: + token_type, token, location = self.next_() + if token_type != Lexer.NEWLINE: + return (token_type, token, location) + + def location_(self): + column = self.pos_ - self.line_start_ + 1 + return FeatureLibLocation(self.filename_ or "", self.line_, column) + + def next_(self): + self.scan_over_(Lexer.CHAR_WHITESPACE_) + location = self.location_() + start = self.pos_ + text = self.text_ + limit = len(text) + if start >= limit: + raise StopIteration() + cur_char = text[start] + next_char = text[start + 1] if start + 1 < limit else None + + if cur_char == "\n": + self.pos_ += 1 + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "\r": + self.pos_ += 2 if next_char == "\n" else 1 + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "#": + self.scan_until_(Lexer.CHAR_NEWLINE_) + return (Lexer.COMMENT, text[start : self.pos_], location) + + if self.mode_ is Lexer.MODE_FILENAME_: + if cur_char != "(": + raise FeatureLibError("Expected '(' before file name", location) + self.scan_until_(")") + cur_char = text[self.pos_] if self.pos_ < limit else None + if cur_char != ")": + raise FeatureLibError("Expected ')' after file name", location) + self.pos_ += 1 + self.mode_ = Lexer.MODE_NORMAL_ + return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location) + + if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location) + if cur_char == "@": + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + glyphclass = text[start + 1 : self.pos_] + if len(glyphclass) < 1: + raise FeatureLibError("Expected glyph class name", location) + if len(glyphclass) > 63: + raise FeatureLibError( + "Glyph class names must not be longer than 63 characters", location + ) + if not Lexer.RE_GLYPHCLASS.match(glyphclass): + raise FeatureLibError( + "Glyph class names must consist of letters, digits, " + "underscore, period or hyphen", + location, + ) + return (Lexer.GLYPHCLASS, glyphclass, location) + if cur_char in Lexer.CHAR_NAME_START_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + token = text[start : self.pos_] + if token == "include": + self.mode_ = Lexer.MODE_FILENAME_ + return (Lexer.NAME, token, location) + if cur_char == "0" and next_char in "xX": + self.pos_ += 2 + self.scan_over_(Lexer.CHAR_HEXDIGIT_) + return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location) + if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_: + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.OCTAL, int(text[start : self.pos_], 8), location) + if cur_char in Lexer.CHAR_DIGIT_: + self.scan_over_(Lexer.CHAR_DIGIT_) + if self.pos_ >= limit or text[self.pos_] != ".": + return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + self.scan_over_(".") + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.FLOAT, float(text[start : self.pos_]), location) + if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + if self.pos_ >= limit or text[self.pos_] != ".": + return (Lexer.NUMBER, int(text[start : self.pos_], 10), location) + self.scan_over_(".") + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.FLOAT, float(text[start : self.pos_]), location) + if cur_char in Lexer.CHAR_SYMBOL_: + self.pos_ += 1 + return (Lexer.SYMBOL, cur_char, location) + if cur_char == '"': + self.pos_ += 1 + self.scan_until_('"') + if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + self.pos_ += 1 + # strip newlines embedded within a string + string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1]) + return (Lexer.STRING, string, location) + else: + raise FeatureLibError("Expected '\"' to terminate string", location) + raise FeatureLibError("Unexpected character: %r" % cur_char, location) + + def scan_over_(self, valid): + p = self.pos_ + while p < self.text_length_ and self.text_[p] in valid: + p += 1 + self.pos_ = p + + def scan_until_(self, stop_at): + p = self.pos_ + while p < self.text_length_ and self.text_[p] not in stop_at: + p += 1 + self.pos_ = p + + def scan_anonymous_block(self, tag): + location = self.location_() + tag = tag.strip() + self.scan_until_(Lexer.CHAR_NEWLINE_) + self.scan_over_(Lexer.CHAR_NEWLINE_) + regexp = r"}\s*" + tag + r"\s*;" + split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1) + if len(split) != 2: + raise FeatureLibError( + "Expected '} %s;' to terminate anonymous block" % tag, location + ) + self.pos_ += len(split[0]) + return (Lexer.ANONYMOUS_BLOCK, split[0], location) + + +class IncludingLexer(object): + """A Lexer that follows include statements. + + The OpenType feature file specification states that due to + historical reasons, relative imports should be resolved in this + order: + + 1. If the source font is UFO format, then relative to the UFO's + font directory + 2. relative to the top-level include file + 3. relative to the parent include file + + We only support 1 (via includeDir) and 2. + """ + + def __init__(self, featurefile, *, includeDir=None): + """Initializes an IncludingLexer. + + Behavior: + If includeDir is passed, it will be used to determine the top-level + include directory to use for all encountered include statements. If it is + not passed, ``os.path.dirname(featurefile)`` will be considered the + include directory. + """ + + self.lexers_ = [self.make_lexer_(featurefile)] + self.featurefilepath = self.lexers_[0].filename_ + self.includeDir = includeDir + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while self.lexers_: + lexer = self.lexers_[-1] + try: + token_type, token, location = next(lexer) + except StopIteration: + self.lexers_.pop() + continue + if token_type is Lexer.NAME and token == "include": + fname_type, fname_token, fname_location = lexer.next() + if fname_type is not Lexer.FILENAME: + raise FeatureLibError("Expected file name", fname_location) + # semi_type, semi_token, semi_location = lexer.next() + # if semi_type is not Lexer.SYMBOL or semi_token != ";": + # raise FeatureLibError("Expected ';'", semi_location) + if os.path.isabs(fname_token): + path = fname_token + else: + if self.includeDir is not None: + curpath = self.includeDir + elif self.featurefilepath is not None: + curpath = os.path.dirname(self.featurefilepath) + else: + # if the IncludingLexer was initialized from an in-memory + # file-like stream, it doesn't have a 'name' pointing to + # its filesystem path, therefore we fall back to using the + # current working directory to resolve relative includes + curpath = os.getcwd() + path = os.path.join(curpath, fname_token) + if len(self.lexers_) >= 5: + raise FeatureLibError("Too many recursive includes", fname_location) + try: + self.lexers_.append(self.make_lexer_(path)) + except FileNotFoundError as err: + raise IncludedFeaNotFound(fname_token, fname_location) from err + else: + return (token_type, token, location) + raise StopIteration() + + @staticmethod + def make_lexer_(file_or_path): + if hasattr(file_or_path, "read"): + fileobj, closing = file_or_path, False + else: + filename, closing = file_or_path, True + fileobj = open(filename, "r", encoding="utf-8") + data = fileobj.read() + filename = getattr(fileobj, "name", None) + if closing: + fileobj.close() + return Lexer(data, filename) + + def scan_anonymous_block(self, tag): + return self.lexers_[-1].scan_anonymous_block(tag) + + +class NonIncludingLexer(IncludingLexer): + """Lexer that does not follow `include` statements, emits them as-is.""" + + def __next__(self): # Python 3 + return next(self.lexers_[0]) diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/location.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/location.py new file mode 100644 index 00000000..50f761d2 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/location.py @@ -0,0 +1,12 @@ +from typing import NamedTuple + + +class FeatureLibLocation(NamedTuple): + """A location in a feature file""" + + file: str + line: int + column: int + + def __str__(self): + return f"{self.file}:{self.line}:{self.column}" diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/lookupDebugInfo.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/lookupDebugInfo.py new file mode 100644 index 00000000..876cadff --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/lookupDebugInfo.py @@ -0,0 +1,11 @@ +from typing import NamedTuple + +LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib" +LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING" + +class LookupDebugInfo(NamedTuple): + """Information about where a lookup came from, to be embedded in a font""" + + location: str + name: str + feature: list diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/parser.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/parser.py new file mode 100644 index 00000000..fd53573d --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/parser.py @@ -0,0 +1,2356 @@ +from fontTools.feaLib.error import FeatureLibError +from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer +from fontTools.feaLib.variableScalar import VariableScalar +from fontTools.misc.encodingTools import getEncoding +from fontTools.misc.textTools import bytechr, tobytes, tostr +import fontTools.feaLib.ast as ast +import logging +import os +import re + + +log = logging.getLogger(__name__) + + +class Parser(object): + """Initializes a Parser object. + + Example: + + .. code:: python + + from fontTools.feaLib.parser import Parser + parser = Parser(file, font.getReverseGlyphMap()) + parsetree = parser.parse() + + Note: the ``glyphNames`` iterable serves a double role to help distinguish + glyph names from ranges in the presence of hyphens and to ensure that glyph + names referenced in a feature file are actually part of a font's glyph set. + If the iterable is left empty, no glyph name in glyph set checking takes + place, and all glyph tokens containing hyphens are treated as literal glyph + names, not as ranges. (Adding a space around the hyphen can, in any case, + help to disambiguate ranges from glyph names containing hyphens.) + + By default, the parser will follow ``include()`` statements in the feature + file. To turn this off, pass ``followIncludes=False``. Pass a directory string as + ``includeDir`` to explicitly declare a directory to search included feature files + in. + """ + + extensions = {} + ast = ast + SS_FEATURE_TAGS = {"ss%02d" % i for i in range(1, 20 + 1)} + CV_FEATURE_TAGS = {"cv%02d" % i for i in range(1, 99 + 1)} + + def __init__( + self, featurefile, glyphNames=(), followIncludes=True, includeDir=None, **kwargs + ): + + if "glyphMap" in kwargs: + from fontTools.misc.loggingTools import deprecateArgument + + deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead") + if glyphNames: + raise TypeError( + "'glyphNames' and (deprecated) 'glyphMap' are " "mutually exclusive" + ) + glyphNames = kwargs.pop("glyphMap") + if kwargs: + raise TypeError( + "unsupported keyword argument%s: %s" + % ("" if len(kwargs) == 1 else "s", ", ".join(repr(k) for k in kwargs)) + ) + + self.glyphNames_ = set(glyphNames) + self.doc_ = self.ast.FeatureFile() + self.anchors_ = SymbolTable() + self.glyphclasses_ = SymbolTable() + self.lookups_ = SymbolTable() + self.valuerecords_ = SymbolTable() + self.symbol_tables_ = {self.anchors_, self.valuerecords_} + self.next_token_type_, self.next_token_ = (None, None) + self.cur_comments_ = [] + self.next_token_location_ = None + lexerClass = IncludingLexer if followIncludes else NonIncludingLexer + self.lexer_ = lexerClass(featurefile, includeDir=includeDir) + self.advance_lexer_(comments=True) + + def parse(self): + """Parse the file, and return a :class:`fontTools.feaLib.ast.FeatureFile` + object representing the root of the abstract syntax tree containing the + parsed contents of the file.""" + statements = self.doc_.statements + while self.next_token_type_ is not None or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.is_cur_keyword_("include"): + statements.append(self.parse_include_()) + elif self.cur_token_type_ is Lexer.GLYPHCLASS: + statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_(("anon", "anonymous")): + statements.append(self.parse_anonymous_()) + elif self.is_cur_keyword_("anchorDef"): + statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_("languagesystem"): + statements.append(self.parse_languagesystem_()) + elif self.is_cur_keyword_("lookup"): + statements.append(self.parse_lookup_(vertical=False)) + elif self.is_cur_keyword_("markClass"): + statements.append(self.parse_markClass_()) + elif self.is_cur_keyword_("feature"): + statements.append(self.parse_feature_block_()) + elif self.is_cur_keyword_("conditionset"): + statements.append(self.parse_conditionset_()) + elif self.is_cur_keyword_("variation"): + statements.append(self.parse_feature_block_(variation=True)) + elif self.is_cur_keyword_("table"): + statements.append(self.parse_table_()) + elif self.is_cur_keyword_("valueRecordDef"): + statements.append(self.parse_valuerecord_definition_(vertical=False)) + elif ( + self.cur_token_type_ is Lexer.NAME + and self.cur_token_ in self.extensions + ): + statements.append(self.extensions[self.cur_token_](self)) + elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + "Expected feature, languagesystem, lookup, markClass, " + 'table, or glyph class definition, got {} "{}"'.format( + self.cur_token_type_, self.cur_token_ + ), + self.cur_token_location_, + ) + return self.doc_ + + def parse_anchor_(self): + # Parses an anchor in any of the four formats given in the feature + # file specification (2.e.vii). + self.expect_symbol_("<") + self.expect_keyword_("anchor") + location = self.cur_token_location_ + + if self.next_token_ == "NULL": # Format D + self.expect_keyword_("NULL") + self.expect_symbol_(">") + return None + + if self.next_token_type_ == Lexer.NAME: # Format E + name = self.expect_name_() + anchordef = self.anchors_.resolve(name) + if anchordef is None: + raise FeatureLibError( + 'Unknown anchor "%s"' % name, self.cur_token_location_ + ) + self.expect_symbol_(">") + return self.ast.Anchor( + anchordef.x, + anchordef.y, + name=name, + contourpoint=anchordef.contourpoint, + xDeviceTable=None, + yDeviceTable=None, + location=location, + ) + + x, y = self.expect_number_(variable=True), self.expect_number_(variable=True) + + contourpoint = None + if self.next_token_ == "contourpoint": # Format B + self.expect_keyword_("contourpoint") + contourpoint = self.expect_number_() + + if self.next_token_ == "<": # Format C + xDeviceTable = self.parse_device_() + yDeviceTable = self.parse_device_() + else: + xDeviceTable, yDeviceTable = None, None + + self.expect_symbol_(">") + return self.ast.Anchor( + x, + y, + name=None, + contourpoint=contourpoint, + xDeviceTable=xDeviceTable, + yDeviceTable=yDeviceTable, + location=location, + ) + + def parse_anchor_marks_(self): + # Parses a sequence of ``[ mark @MARKCLASS]*.`` + anchorMarks = [] # [(self.ast.Anchor, markClassName)*] + while self.next_token_ == "<": + anchor = self.parse_anchor_() + if anchor is None and self.next_token_ != "mark": + continue # without mark, eg. in GPOS type 5 + self.expect_keyword_("mark") + markClass = self.expect_markClass_reference_() + anchorMarks.append((anchor, markClass)) + return anchorMarks + + def parse_anchordef_(self): + # Parses a named anchor definition (`section 2.e.viii `_). + assert self.is_cur_keyword_("anchorDef") + location = self.cur_token_location_ + x, y = self.expect_number_(), self.expect_number_() + contourpoint = None + if self.next_token_ == "contourpoint": + self.expect_keyword_("contourpoint") + contourpoint = self.expect_number_() + name = self.expect_name_() + self.expect_symbol_(";") + anchordef = self.ast.AnchorDefinition( + name, x, y, contourpoint=contourpoint, location=location + ) + self.anchors_.define(name, anchordef) + return anchordef + + def parse_anonymous_(self): + # Parses an anonymous data block (`section 10 `_). + assert self.is_cur_keyword_(("anon", "anonymous")) + tag = self.expect_tag_() + _, content, location = self.lexer_.scan_anonymous_block(tag) + self.advance_lexer_() + self.expect_symbol_("}") + end_tag = self.expect_tag_() + assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()" + self.expect_symbol_(";") + return self.ast.AnonymousBlock(tag, content, location=location) + + def parse_attach_(self): + # Parses a GDEF Attach statement (`section 9.b `_) + assert self.is_cur_keyword_("Attach") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + contourPoints = {self.expect_number_()} + while self.next_token_ != ";": + contourPoints.add(self.expect_number_()) + self.expect_symbol_(";") + return self.ast.AttachStatement(glyphs, contourPoints, location=location) + + def parse_enumerate_(self, vertical): + # Parse an enumerated pair positioning rule (`section 6.b.ii `_). + assert self.cur_token_ in {"enumerate", "enum"} + self.advance_lexer_() + return self.parse_position_(enumerated=True, vertical=vertical) + + def parse_GlyphClassDef_(self): + # Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;' + assert self.is_cur_keyword_("GlyphClassDef") + location = self.cur_token_location_ + if self.next_token_ != ",": + baseGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + baseGlyphs = None + self.expect_symbol_(",") + if self.next_token_ != ",": + ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + ligatureGlyphs = None + self.expect_symbol_(",") + if self.next_token_ != ",": + markGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + markGlyphs = None + self.expect_symbol_(",") + if self.next_token_ != ";": + componentGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + componentGlyphs = None + self.expect_symbol_(";") + return self.ast.GlyphClassDefStatement( + baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=location + ) + + def parse_glyphclass_definition_(self): + # Parses glyph class definitions such as '@UPPERCASE = [A-Z];' + location, name = self.cur_token_location_, self.cur_token_ + self.expect_symbol_("=") + glyphs = self.parse_glyphclass_(accept_glyphname=False) + self.expect_symbol_(";") + glyphclass = self.ast.GlyphClassDefinition(name, glyphs, location=location) + self.glyphclasses_.define(name, glyphclass) + return glyphclass + + def split_glyph_range_(self, name, location): + # Since v1.20, the OpenType Feature File specification allows + # for dashes in glyph names. A sequence like "a-b-c-d" could + # therefore mean a single glyph whose name happens to be + # "a-b-c-d", or it could mean a range from glyph "a" to glyph + # "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a + # range from glyph "a-b-c" to glyph "d".Technically, this + # example could be resolved because the (pretty complex) + # definition of glyph ranges renders most of these splits + # invalid. But the specification does not say that a compiler + # should try to apply such fancy heuristics. To encourage + # unambiguous feature files, we therefore try all possible + # splits and reject the feature file if there are multiple + # splits possible. It is intentional that we don't just emit a + # warning; warnings tend to get ignored. To fix the problem, + # font designers can trivially add spaces around the intended + # split point, and we emit a compiler error that suggests + # how exactly the source should be rewritten to make things + # unambiguous. + parts = name.split("-") + solutions = [] + for i in range(len(parts)): + start, limit = "-".join(parts[0:i]), "-".join(parts[i:]) + if start in self.glyphNames_ and limit in self.glyphNames_: + solutions.append((start, limit)) + if len(solutions) == 1: + start, limit = solutions[0] + return start, limit + elif len(solutions) == 0: + raise FeatureLibError( + '"%s" is not a glyph in the font, and it can not be split ' + "into a range of known glyphs" % name, + location, + ) + else: + ranges = " or ".join(['"%s - %s"' % (s, l) for s, l in solutions]) + raise FeatureLibError( + 'Ambiguous glyph range "%s"; ' + "please use %s to clarify what you mean" % (name, ranges), + location, + ) + + def parse_glyphclass_(self, accept_glyphname, accept_null=False): + # Parses a glyph class, either named or anonymous, or (if + # ``bool(accept_glyphname)``) a glyph name. If ``bool(accept_null)`` then + # also accept the special NULL glyph. + if accept_glyphname and self.next_token_type_ in (Lexer.NAME, Lexer.CID): + if accept_null and self.next_token_ == "NULL": + # If you want a glyph called NULL, you should escape it. + self.advance_lexer_() + return self.ast.NullGlyph(location=self.cur_token_location_) + glyph = self.expect_glyph_() + self.check_glyph_name_in_glyph_set(glyph) + return self.ast.GlyphName(glyph, location=self.cur_token_location_) + if self.next_token_type_ is Lexer.GLYPHCLASS: + self.advance_lexer_() + gc = self.glyphclasses_.resolve(self.cur_token_) + if gc is None: + raise FeatureLibError( + "Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_, + ) + if isinstance(gc, self.ast.MarkClass): + return self.ast.MarkClassName(gc, location=self.cur_token_location_) + else: + return self.ast.GlyphClassName(gc, location=self.cur_token_location_) + + self.expect_symbol_("[") + location = self.cur_token_location_ + glyphs = self.ast.GlyphClass(location=location) + while self.next_token_ != "]": + if self.next_token_type_ is Lexer.NAME: + glyph = self.expect_glyph_() + location = self.cur_token_location_ + if "-" in glyph and self.glyphNames_ and glyph not in self.glyphNames_: + start, limit = self.split_glyph_range_(glyph, location) + self.check_glyph_name_in_glyph_set(start, limit) + glyphs.add_range( + start, limit, self.make_glyph_range_(location, start, limit) + ) + elif self.next_token_ == "-": + start = glyph + self.expect_symbol_("-") + limit = self.expect_glyph_() + self.check_glyph_name_in_glyph_set(start, limit) + glyphs.add_range( + start, limit, self.make_glyph_range_(location, start, limit) + ) + else: + if "-" in glyph and not self.glyphNames_: + log.warning( + str( + FeatureLibError( + f"Ambiguous glyph name that looks like a range: {glyph!r}", + location, + ) + ) + ) + self.check_glyph_name_in_glyph_set(glyph) + glyphs.append(glyph) + elif self.next_token_type_ is Lexer.CID: + glyph = self.expect_glyph_() + if self.next_token_ == "-": + range_location = self.cur_token_location_ + range_start = self.cur_token_ + self.expect_symbol_("-") + range_end = self.expect_cid_() + self.check_glyph_name_in_glyph_set( + f"cid{range_start:05d}", f"cid{range_end:05d}", + ) + glyphs.add_cid_range( + range_start, + range_end, + self.make_cid_range_(range_location, range_start, range_end), + ) + else: + glyph_name = f"cid{self.cur_token_:05d}" + self.check_glyph_name_in_glyph_set(glyph_name) + glyphs.append(glyph_name) + elif self.next_token_type_ is Lexer.GLYPHCLASS: + self.advance_lexer_() + gc = self.glyphclasses_.resolve(self.cur_token_) + if gc is None: + raise FeatureLibError( + "Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_, + ) + if isinstance(gc, self.ast.MarkClass): + gc = self.ast.MarkClassName(gc, location=self.cur_token_location_) + else: + gc = self.ast.GlyphClassName(gc, location=self.cur_token_location_) + glyphs.add_class(gc) + else: + raise FeatureLibError( + "Expected glyph name, glyph range, " + f"or glyph class reference, found {self.next_token_!r}", + self.next_token_location_, + ) + self.expect_symbol_("]") + return glyphs + + def parse_glyph_pattern_(self, vertical): + # Parses a glyph pattern, including lookups and context, e.g.:: + # + # a b + # a b c' d e + # a b c' lookup ChangeC d e + prefix, glyphs, lookups, values, suffix = ([], [], [], [], []) + hasMarks = False + while self.next_token_ not in {"by", "from", ";", ","}: + gc = self.parse_glyphclass_(accept_glyphname=True) + marked = False + if self.next_token_ == "'": + self.expect_symbol_("'") + hasMarks = marked = True + if marked: + if suffix: + # makeotf also reports this as an error, while FontForge + # silently inserts ' in all the intervening glyphs. + # https://github.com/fonttools/fonttools/pull/1096 + raise FeatureLibError( + "Unsupported contextual target sequence: at most " + "one run of marked (') glyph/class names allowed", + self.cur_token_location_, + ) + glyphs.append(gc) + elif glyphs: + suffix.append(gc) + else: + prefix.append(gc) + + if self.is_next_value_(): + values.append(self.parse_valuerecord_(vertical)) + else: + values.append(None) + + lookuplist = None + while self.next_token_ == "lookup": + if lookuplist is None: + lookuplist = [] + self.expect_keyword_("lookup") + if not marked: + raise FeatureLibError( + "Lookups can only follow marked glyphs", + self.cur_token_location_, + ) + lookup_name = self.expect_name_() + lookup = self.lookups_.resolve(lookup_name) + if lookup is None: + raise FeatureLibError( + 'Unknown lookup "%s"' % lookup_name, self.cur_token_location_ + ) + lookuplist.append(lookup) + if marked: + lookups.append(lookuplist) + + if not glyphs and not suffix: # eg., "sub f f i by" + assert lookups == [] + return ([], prefix, [None] * len(prefix), values, [], hasMarks) + else: + if any(values[: len(prefix)]): + raise FeatureLibError( + "Positioning cannot be applied in the bactrack glyph sequence, " + "before the marked glyph sequence.", + self.cur_token_location_, + ) + marked_values = values[len(prefix) : len(prefix) + len(glyphs)] + if any(marked_values): + if any(values[len(prefix) + len(glyphs) :]): + raise FeatureLibError( + "Positioning values are allowed only in the marked glyph " + "sequence, or after the final glyph node when only one glyph " + "node is marked.", + self.cur_token_location_, + ) + values = marked_values + elif values and values[-1]: + if len(glyphs) > 1 or any(values[:-1]): + raise FeatureLibError( + "Positioning values are allowed only in the marked glyph " + "sequence, or after the final glyph node when only one glyph " + "node is marked.", + self.cur_token_location_, + ) + values = values[-1:] + elif any(values): + raise FeatureLibError( + "Positioning values are allowed only in the marked glyph " + "sequence, or after the final glyph node when only one glyph " + "node is marked.", + self.cur_token_location_, + ) + return (prefix, glyphs, lookups, values, suffix, hasMarks) + + def parse_chain_context_(self): + location = self.cur_token_location_ + prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_( + vertical=False + ) + chainContext = [(prefix, glyphs, suffix)] + hasLookups = any(lookups) + while self.next_token_ == ",": + self.expect_symbol_(",") + ( + prefix, + glyphs, + lookups, + values, + suffix, + hasMarks, + ) = self.parse_glyph_pattern_(vertical=False) + chainContext.append((prefix, glyphs, suffix)) + hasLookups = hasLookups or any(lookups) + self.expect_symbol_(";") + return chainContext, hasLookups + + def parse_ignore_(self): + # Parses an ignore sub/pos rule. + assert self.is_cur_keyword_("ignore") + location = self.cur_token_location_ + self.advance_lexer_() + if self.cur_token_ in ["substitute", "sub"]: + chainContext, hasLookups = self.parse_chain_context_() + if hasLookups: + raise FeatureLibError( + 'No lookups can be specified for "ignore sub"', location + ) + return self.ast.IgnoreSubstStatement(chainContext, location=location) + if self.cur_token_ in ["position", "pos"]: + chainContext, hasLookups = self.parse_chain_context_() + if hasLookups: + raise FeatureLibError( + 'No lookups can be specified for "ignore pos"', location + ) + return self.ast.IgnorePosStatement(chainContext, location=location) + raise FeatureLibError( + 'Expected "substitute" or "position"', self.cur_token_location_ + ) + + def parse_include_(self): + assert self.cur_token_ == "include" + location = self.cur_token_location_ + filename = self.expect_filename_() + # self.expect_symbol_(";") + return ast.IncludeStatement(filename, location=location) + + def parse_language_(self): + assert self.is_cur_keyword_("language") + location = self.cur_token_location_ + language = self.expect_language_tag_() + include_default, required = (True, False) + if self.next_token_ in {"exclude_dflt", "include_dflt"}: + include_default = self.expect_name_() == "include_dflt" + if self.next_token_ == "required": + self.expect_keyword_("required") + required = True + self.expect_symbol_(";") + return self.ast.LanguageStatement( + language, include_default, required, location=location + ) + + def parse_ligatureCaretByIndex_(self): + assert self.is_cur_keyword_("LigatureCaretByIndex") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + carets = [self.expect_number_()] + while self.next_token_ != ";": + carets.append(self.expect_number_()) + self.expect_symbol_(";") + return self.ast.LigatureCaretByIndexStatement(glyphs, carets, location=location) + + def parse_ligatureCaretByPos_(self): + assert self.is_cur_keyword_("LigatureCaretByPos") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + carets = [self.expect_number_()] + while self.next_token_ != ";": + carets.append(self.expect_number_()) + self.expect_symbol_(";") + return self.ast.LigatureCaretByPosStatement(glyphs, carets, location=location) + + def parse_lookup_(self, vertical): + # Parses a ``lookup`` - either a lookup block, or a lookup reference + # inside a feature. + assert self.is_cur_keyword_("lookup") + location, name = self.cur_token_location_, self.expect_name_() + + if self.next_token_ == ";": + lookup = self.lookups_.resolve(name) + if lookup is None: + raise FeatureLibError( + 'Unknown lookup "%s"' % name, self.cur_token_location_ + ) + self.expect_symbol_(";") + return self.ast.LookupReferenceStatement(lookup, location=location) + + use_extension = False + if self.next_token_ == "useExtension": + self.expect_keyword_("useExtension") + use_extension = True + + block = self.ast.LookupBlock(name, use_extension, location=location) + self.parse_block_(block, vertical) + self.lookups_.define(name, block) + return block + + def parse_lookupflag_(self): + # Parses a ``lookupflag`` statement, either specified by number or + # in words. + assert self.is_cur_keyword_("lookupflag") + location = self.cur_token_location_ + + # format B: "lookupflag 6;" + if self.next_token_type_ == Lexer.NUMBER: + value = self.expect_number_() + self.expect_symbol_(";") + return self.ast.LookupFlagStatement(value, location=location) + + # format A: "lookupflag RightToLeft MarkAttachmentType @M;" + value_seen = False + value, markAttachment, markFilteringSet = 0, None, None + flags = { + "RightToLeft": 1, + "IgnoreBaseGlyphs": 2, + "IgnoreLigatures": 4, + "IgnoreMarks": 8, + } + seen = set() + while self.next_token_ != ";": + if self.next_token_ in seen: + raise FeatureLibError( + "%s can be specified only once" % self.next_token_, + self.next_token_location_, + ) + seen.add(self.next_token_) + if self.next_token_ == "MarkAttachmentType": + self.expect_keyword_("MarkAttachmentType") + markAttachment = self.parse_glyphclass_(accept_glyphname=False) + elif self.next_token_ == "UseMarkFilteringSet": + self.expect_keyword_("UseMarkFilteringSet") + markFilteringSet = self.parse_glyphclass_(accept_glyphname=False) + elif self.next_token_ in flags: + value_seen = True + value = value | flags[self.expect_name_()] + else: + raise FeatureLibError( + '"%s" is not a recognized lookupflag' % self.next_token_, + self.next_token_location_, + ) + self.expect_symbol_(";") + + if not any([value_seen, markAttachment, markFilteringSet]): + raise FeatureLibError( + "lookupflag must have a value", self.next_token_location_ + ) + + return self.ast.LookupFlagStatement( + value, + markAttachment=markAttachment, + markFilteringSet=markFilteringSet, + location=location, + ) + + def parse_markClass_(self): + assert self.is_cur_keyword_("markClass") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + if not glyphs.glyphSet(): + raise FeatureLibError("Empty glyph class in mark class definition", location) + anchor = self.parse_anchor_() + name = self.expect_class_name_() + self.expect_symbol_(";") + markClass = self.doc_.markClasses.get(name) + if markClass is None: + markClass = self.ast.MarkClass(name) + self.doc_.markClasses[name] = markClass + self.glyphclasses_.define(name, markClass) + mcdef = self.ast.MarkClassDefinition( + markClass, anchor, glyphs, location=location + ) + markClass.addDefinition(mcdef) + return mcdef + + def parse_position_(self, enumerated, vertical): + assert self.cur_token_ in {"position", "pos"} + if self.next_token_ == "cursive": # GPOS type 3 + return self.parse_position_cursive_(enumerated, vertical) + elif self.next_token_ == "base": # GPOS type 4 + return self.parse_position_base_(enumerated, vertical) + elif self.next_token_ == "ligature": # GPOS type 5 + return self.parse_position_ligature_(enumerated, vertical) + elif self.next_token_ == "mark": # GPOS type 6 + return self.parse_position_mark_(enumerated, vertical) + + location = self.cur_token_location_ + prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_( + vertical + ) + self.expect_symbol_(";") + + if any(lookups): + # GPOS type 8: Chaining contextual positioning; explicit lookups + if any(values): + raise FeatureLibError( + 'If "lookup" is present, no values must be specified', location + ) + return self.ast.ChainContextPosStatement( + prefix, glyphs, suffix, lookups, location=location + ) + + # Pair positioning, format A: "pos V 10 A -10;" + # Pair positioning, format B: "pos V A -20;" + if not prefix and not suffix and len(glyphs) == 2 and not hasMarks: + if values[0] is None: # Format B: "pos V A -20;" + values.reverse() + return self.ast.PairPosStatement( + glyphs[0], + values[0], + glyphs[1], + values[1], + enumerated=enumerated, + location=location, + ) + + if enumerated: + raise FeatureLibError( + '"enumerate" is only allowed with pair positionings', location + ) + return self.ast.SinglePosStatement( + list(zip(glyphs, values)), + prefix, + suffix, + forceChain=hasMarks, + location=location, + ) + + def parse_position_cursive_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("cursive") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' "cursive attachment positioning", + location, + ) + glyphclass = self.parse_glyphclass_(accept_glyphname=True) + entryAnchor = self.parse_anchor_() + exitAnchor = self.parse_anchor_() + self.expect_symbol_(";") + return self.ast.CursivePosStatement( + glyphclass, entryAnchor, exitAnchor, location=location + ) + + def parse_position_base_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("base") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + "mark-to-base attachment positioning", + location, + ) + base = self.parse_glyphclass_(accept_glyphname=True) + marks = self.parse_anchor_marks_() + self.expect_symbol_(";") + return self.ast.MarkBasePosStatement(base, marks, location=location) + + def parse_position_ligature_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("ligature") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + "mark-to-ligature attachment positioning", + location, + ) + ligatures = self.parse_glyphclass_(accept_glyphname=True) + marks = [self.parse_anchor_marks_()] + while self.next_token_ == "ligComponent": + self.expect_keyword_("ligComponent") + marks.append(self.parse_anchor_marks_()) + self.expect_symbol_(";") + return self.ast.MarkLigPosStatement(ligatures, marks, location=location) + + def parse_position_mark_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("mark") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + "mark-to-mark attachment positioning", + location, + ) + baseMarks = self.parse_glyphclass_(accept_glyphname=True) + marks = self.parse_anchor_marks_() + self.expect_symbol_(";") + return self.ast.MarkMarkPosStatement(baseMarks, marks, location=location) + + def parse_script_(self): + assert self.is_cur_keyword_("script") + location, script = self.cur_token_location_, self.expect_script_tag_() + self.expect_symbol_(";") + return self.ast.ScriptStatement(script, location=location) + + def parse_substitute_(self): + assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"} + location = self.cur_token_location_ + reverse = self.cur_token_ in {"reversesub", "rsub"} + ( + old_prefix, + old, + lookups, + values, + old_suffix, + hasMarks, + ) = self.parse_glyph_pattern_(vertical=False) + if any(values): + raise FeatureLibError( + "Substitution statements cannot contain values", location + ) + new = [] + if self.next_token_ == "by": + keyword = self.expect_keyword_("by") + while self.next_token_ != ";": + gc = self.parse_glyphclass_(accept_glyphname=True, accept_null=True) + new.append(gc) + elif self.next_token_ == "from": + keyword = self.expect_keyword_("from") + new = [self.parse_glyphclass_(accept_glyphname=False)] + else: + keyword = None + self.expect_symbol_(";") + if len(new) == 0 and not any(lookups): + raise FeatureLibError( + 'Expected "by", "from" or explicit lookup references', + self.cur_token_location_, + ) + + # GSUB lookup type 3: Alternate substitution. + # Format: "substitute a from [a.1 a.2 a.3];" + if keyword == "from": + if reverse: + raise FeatureLibError( + 'Reverse chaining substitutions do not support "from"', location + ) + if len(old) != 1 or len(old[0].glyphSet()) != 1: + raise FeatureLibError('Expected a single glyph before "from"', location) + if len(new) != 1: + raise FeatureLibError( + 'Expected a single glyphclass after "from"', location + ) + return self.ast.AlternateSubstStatement( + old_prefix, old[0], old_suffix, new[0], location=location + ) + + num_lookups = len([l for l in lookups if l is not None]) + + is_deletion = False + if len(new) == 1 and isinstance(new[0], ast.NullGlyph): + new = [] # Deletion + is_deletion = True + + # GSUB lookup type 1: Single substitution. + # Format A: "substitute a by a.sc;" + # Format B: "substitute [one.fitted one.oldstyle] by one;" + # Format C: "substitute [a-d] by [A.sc-D.sc];" + if not reverse and len(old) == 1 and len(new) == 1 and num_lookups == 0: + glyphs = list(old[0].glyphSet()) + replacements = list(new[0].glyphSet()) + if len(replacements) == 1: + replacements = replacements * len(glyphs) + if len(glyphs) != len(replacements): + raise FeatureLibError( + 'Expected a glyph class with %d elements after "by", ' + "but found a glyph class with %d elements" + % (len(glyphs), len(replacements)), + location, + ) + return self.ast.SingleSubstStatement( + old, new, old_prefix, old_suffix, forceChain=hasMarks, location=location + ) + + # Glyph deletion, built as GSUB lookup type 2: Multiple substitution + # with empty replacement. + if is_deletion and len(old) == 1 and num_lookups == 0: + return self.ast.MultipleSubstStatement( + old_prefix, + old[0], + old_suffix, + (), + forceChain=hasMarks, + location=location, + ) + + # GSUB lookup type 2: Multiple substitution. + # Format: "substitute f_f_i by f f i;" + if ( + not reverse + and len(old) == 1 + and len(old[0].glyphSet()) == 1 + and len(new) > 1 + and max([len(n.glyphSet()) for n in new]) == 1 + and num_lookups == 0 + ): + for n in new: + if not list(n.glyphSet()): + raise FeatureLibError("Empty class in replacement", location) + return self.ast.MultipleSubstStatement( + old_prefix, + tuple(old[0].glyphSet())[0], + old_suffix, + tuple([list(n.glyphSet())[0] for n in new]), + forceChain=hasMarks, + location=location, + ) + + # GSUB lookup type 4: Ligature substitution. + # Format: "substitute f f i by f_f_i;" + if ( + not reverse + and len(old) > 1 + and len(new) == 1 + and len(new[0].glyphSet()) == 1 + and num_lookups == 0 + ): + return self.ast.LigatureSubstStatement( + old_prefix, + old, + old_suffix, + list(new[0].glyphSet())[0], + forceChain=hasMarks, + location=location, + ) + + # GSUB lookup type 8: Reverse chaining substitution. + if reverse: + if len(old) != 1: + raise FeatureLibError( + "In reverse chaining single substitutions, " + "only a single glyph or glyph class can be replaced", + location, + ) + if len(new) != 1: + raise FeatureLibError( + "In reverse chaining single substitutions, " + 'the replacement (after "by") must be a single glyph ' + "or glyph class", + location, + ) + if num_lookups != 0: + raise FeatureLibError( + "Reverse chaining substitutions cannot call named lookups", location + ) + glyphs = sorted(list(old[0].glyphSet())) + replacements = sorted(list(new[0].glyphSet())) + if len(replacements) == 1: + replacements = replacements * len(glyphs) + if len(glyphs) != len(replacements): + raise FeatureLibError( + 'Expected a glyph class with %d elements after "by", ' + "but found a glyph class with %d elements" + % (len(glyphs), len(replacements)), + location, + ) + return self.ast.ReverseChainSingleSubstStatement( + old_prefix, old_suffix, old, new, location=location + ) + + if len(old) > 1 and len(new) > 1: + raise FeatureLibError( + "Direct substitution of multiple glyphs by multiple glyphs " + "is not supported", + location, + ) + + # If there are remaining glyphs to parse, this is an invalid GSUB statement + if len(new) != 0 or is_deletion: + raise FeatureLibError("Invalid substitution statement", location) + + # GSUB lookup type 6: Chaining contextual substitution. + rule = self.ast.ChainContextSubstStatement( + old_prefix, old, old_suffix, lookups, location=location + ) + return rule + + def parse_subtable_(self): + assert self.is_cur_keyword_("subtable") + location = self.cur_token_location_ + self.expect_symbol_(";") + return self.ast.SubtableStatement(location=location) + + def parse_size_parameters_(self): + # Parses a ``parameters`` statement used in ``size`` features. See + # `section 8.b `_. + assert self.is_cur_keyword_("parameters") + location = self.cur_token_location_ + DesignSize = self.expect_decipoint_() + SubfamilyID = self.expect_number_() + RangeStart = 0.0 + RangeEnd = 0.0 + if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or SubfamilyID != 0: + RangeStart = self.expect_decipoint_() + RangeEnd = self.expect_decipoint_() + + self.expect_symbol_(";") + return self.ast.SizeParameters( + DesignSize, SubfamilyID, RangeStart, RangeEnd, location=location + ) + + def parse_size_menuname_(self): + assert self.is_cur_keyword_("sizemenuname") + location = self.cur_token_location_ + platformID, platEncID, langID, string = self.parse_name_() + return self.ast.FeatureNameStatement( + "size", platformID, platEncID, langID, string, location=location + ) + + def parse_table_(self): + assert self.is_cur_keyword_("table") + location, name = self.cur_token_location_, self.expect_tag_() + table = self.ast.TableBlock(name, location=location) + self.expect_symbol_("{") + handler = { + "GDEF": self.parse_table_GDEF_, + "head": self.parse_table_head_, + "hhea": self.parse_table_hhea_, + "vhea": self.parse_table_vhea_, + "name": self.parse_table_name_, + "BASE": self.parse_table_BASE_, + "OS/2": self.parse_table_OS_2_, + "STAT": self.parse_table_STAT_, + }.get(name) + if handler: + handler(table) + else: + raise FeatureLibError( + '"table %s" is not supported' % name.strip(), location + ) + self.expect_symbol_("}") + end_tag = self.expect_tag_() + if end_tag != name: + raise FeatureLibError( + 'Expected "%s"' % name.strip(), self.cur_token_location_ + ) + self.expect_symbol_(";") + return table + + def parse_table_GDEF_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.is_cur_keyword_("Attach"): + statements.append(self.parse_attach_()) + elif self.is_cur_keyword_("GlyphClassDef"): + statements.append(self.parse_GlyphClassDef_()) + elif self.is_cur_keyword_("LigatureCaretByIndex"): + statements.append(self.parse_ligatureCaretByIndex_()) + elif self.is_cur_keyword_("LigatureCaretByPos"): + statements.append(self.parse_ligatureCaretByPos_()) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + "Expected Attach, LigatureCaretByIndex, " "or LigatureCaretByPos", + self.cur_token_location_, + ) + + def parse_table_head_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.is_cur_keyword_("FontRevision"): + statements.append(self.parse_FontRevision_()) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected FontRevision", self.cur_token_location_) + + def parse_table_hhea_(self, table): + statements = table.statements + fields = ("CaretOffset", "Ascender", "Descender", "LineGap") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: + key = self.cur_token_.lower() + value = self.expect_number_() + statements.append( + self.ast.HheaField(key, value, location=self.cur_token_location_) + ) + if self.next_token_ != ";": + raise FeatureLibError( + "Incomplete statement", self.next_token_location_ + ) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + "Expected CaretOffset, Ascender, " "Descender or LineGap", + self.cur_token_location_, + ) + + def parse_table_vhea_(self, table): + statements = table.statements + fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: + key = self.cur_token_.lower() + value = self.expect_number_() + statements.append( + self.ast.VheaField(key, value, location=self.cur_token_location_) + ) + if self.next_token_ != ";": + raise FeatureLibError( + "Incomplete statement", self.next_token_location_ + ) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + "Expected VertTypoAscender, " + "VertTypoDescender or VertTypoLineGap", + self.cur_token_location_, + ) + + def parse_table_name_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.is_cur_keyword_("nameid"): + statement = self.parse_nameid_() + if statement: + statements.append(statement) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected nameid", self.cur_token_location_) + + def parse_name_(self): + """Parses a name record. See `section 9.e `_.""" + platEncID = None + langID = None + if self.next_token_type_ in Lexer.NUMBERS: + platformID = self.expect_any_number_() + location = self.cur_token_location_ + if platformID not in (1, 3): + raise FeatureLibError("Expected platform id 1 or 3", location) + if self.next_token_type_ in Lexer.NUMBERS: + platEncID = self.expect_any_number_() + langID = self.expect_any_number_() + else: + platformID = 3 + location = self.cur_token_location_ + + if platformID == 1: # Macintosh + platEncID = platEncID or 0 # Roman + langID = langID or 0 # English + else: # 3, Windows + platEncID = platEncID or 1 # Unicode + langID = langID or 0x0409 # English + + string = self.expect_string_() + self.expect_symbol_(";") + + encoding = getEncoding(platformID, platEncID, langID) + if encoding is None: + raise FeatureLibError("Unsupported encoding", location) + unescaped = self.unescape_string_(string, encoding) + return platformID, platEncID, langID, unescaped + + def parse_stat_name_(self): + platEncID = None + langID = None + if self.next_token_type_ in Lexer.NUMBERS: + platformID = self.expect_any_number_() + location = self.cur_token_location_ + if platformID not in (1, 3): + raise FeatureLibError("Expected platform id 1 or 3", location) + if self.next_token_type_ in Lexer.NUMBERS: + platEncID = self.expect_any_number_() + langID = self.expect_any_number_() + else: + platformID = 3 + location = self.cur_token_location_ + + if platformID == 1: # Macintosh + platEncID = platEncID or 0 # Roman + langID = langID or 0 # English + else: # 3, Windows + platEncID = platEncID or 1 # Unicode + langID = langID or 0x0409 # English + + string = self.expect_string_() + encoding = getEncoding(platformID, platEncID, langID) + if encoding is None: + raise FeatureLibError("Unsupported encoding", location) + unescaped = self.unescape_string_(string, encoding) + return platformID, platEncID, langID, unescaped + + def parse_nameid_(self): + assert self.cur_token_ == "nameid", self.cur_token_ + location, nameID = self.cur_token_location_, self.expect_any_number_() + if nameID > 32767: + raise FeatureLibError( + "Name id value cannot be greater than 32767", self.cur_token_location_ + ) + if 1 <= nameID <= 6: + log.warning( + "Name id %d cannot be set from the feature file. " + "Ignoring record" % nameID + ) + self.parse_name_() # skip to the next record + return None + + platformID, platEncID, langID, string = self.parse_name_() + return self.ast.NameRecord( + nameID, platformID, platEncID, langID, string, location=location + ) + + def unescape_string_(self, string, encoding): + if encoding == "utf_16_be": + s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string) + else: + unescape = lambda m: self.unescape_byte_(m, encoding) + s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string) + # We now have a Unicode string, but it might contain surrogate pairs. + # We convert surrogates to actual Unicode by round-tripping through + # Python's UTF-16 codec in a special mode. + utf16 = tobytes(s, "utf_16_be", "surrogatepass") + return tostr(utf16, "utf_16_be") + + @staticmethod + def unescape_unichr_(match): + n = match.group(0)[1:] + return chr(int(n, 16)) + + @staticmethod + def unescape_byte_(match, encoding): + n = match.group(0)[1:] + return bytechr(int(n, 16)).decode(encoding) + + def parse_table_BASE_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.is_cur_keyword_("HorizAxis.BaseTagList"): + horiz_bases = self.parse_base_tag_list_() + elif self.is_cur_keyword_("HorizAxis.BaseScriptList"): + horiz_scripts = self.parse_base_script_list_(len(horiz_bases)) + statements.append( + self.ast.BaseAxis( + horiz_bases, + horiz_scripts, + False, + location=self.cur_token_location_, + ) + ) + elif self.is_cur_keyword_("VertAxis.BaseTagList"): + vert_bases = self.parse_base_tag_list_() + elif self.is_cur_keyword_("VertAxis.BaseScriptList"): + vert_scripts = self.parse_base_script_list_(len(vert_bases)) + statements.append( + self.ast.BaseAxis( + vert_bases, + vert_scripts, + True, + location=self.cur_token_location_, + ) + ) + elif self.cur_token_ == ";": + continue + + def parse_table_OS_2_(self, table): + statements = table.statements + numbers = ( + "FSType", + "TypoAscender", + "TypoDescender", + "TypoLineGap", + "winAscent", + "winDescent", + "XHeight", + "CapHeight", + "WeightClass", + "WidthClass", + "LowerOpSize", + "UpperOpSize", + ) + ranges = ("UnicodeRange", "CodePageRange") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.cur_token_type_ is Lexer.NAME: + key = self.cur_token_.lower() + value = None + if self.cur_token_ in numbers: + value = self.expect_number_() + elif self.is_cur_keyword_("Panose"): + value = [] + for i in range(10): + value.append(self.expect_number_()) + elif self.cur_token_ in ranges: + value = [] + while self.next_token_ != ";": + value.append(self.expect_number_()) + elif self.is_cur_keyword_("Vendor"): + value = self.expect_string_() + statements.append( + self.ast.OS2Field(key, value, location=self.cur_token_location_) + ) + elif self.cur_token_ == ";": + continue + + def parse_STAT_ElidedFallbackName(self): + assert self.is_cur_keyword_("ElidedFallbackName") + self.expect_symbol_("{") + names = [] + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_() + if self.is_cur_keyword_("name"): + platformID, platEncID, langID, string = self.parse_stat_name_() + nameRecord = self.ast.STATNameStatement( + "stat", + platformID, + platEncID, + langID, + string, + location=self.cur_token_location_, + ) + names.append(nameRecord) + else: + if self.cur_token_ != ";": + raise FeatureLibError( + f"Unexpected token {self.cur_token_} " f"in ElidedFallbackName", + self.cur_token_location_, + ) + self.expect_symbol_("}") + if not names: + raise FeatureLibError('Expected "name"', self.cur_token_location_) + return names + + def parse_STAT_design_axis(self): + assert self.is_cur_keyword_("DesignAxis") + names = [] + axisTag = self.expect_tag_() + if ( + axisTag not in ("ital", "opsz", "slnt", "wdth", "wght") + and not axisTag.isupper() + ): + log.warning(f"Unregistered axis tag {axisTag} should be uppercase.") + axisOrder = self.expect_number_() + self.expect_symbol_("{") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_() + if self.cur_token_type_ is Lexer.COMMENT: + continue + elif self.is_cur_keyword_("name"): + location = self.cur_token_location_ + platformID, platEncID, langID, string = self.parse_stat_name_() + name = self.ast.STATNameStatement( + "stat", platformID, platEncID, langID, string, location=location + ) + names.append(name) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + f'Expected "name", got {self.cur_token_}', self.cur_token_location_ + ) + + self.expect_symbol_("}") + return self.ast.STATDesignAxisStatement( + axisTag, axisOrder, names, self.cur_token_location_ + ) + + def parse_STAT_axis_value_(self): + assert self.is_cur_keyword_("AxisValue") + self.expect_symbol_("{") + locations = [] + names = [] + flags = 0 + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + continue + elif self.is_cur_keyword_("name"): + location = self.cur_token_location_ + platformID, platEncID, langID, string = self.parse_stat_name_() + name = self.ast.STATNameStatement( + "stat", platformID, platEncID, langID, string, location=location + ) + names.append(name) + elif self.is_cur_keyword_("location"): + location = self.parse_STAT_location() + locations.append(location) + elif self.is_cur_keyword_("flag"): + flags = self.expect_stat_flags() + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + f"Unexpected token {self.cur_token_} " f"in AxisValue", + self.cur_token_location_, + ) + self.expect_symbol_("}") + if not names: + raise FeatureLibError('Expected "Axis Name"', self.cur_token_location_) + if not locations: + raise FeatureLibError('Expected "Axis location"', self.cur_token_location_) + if len(locations) > 1: + for location in locations: + if len(location.values) > 1: + raise FeatureLibError( + "Only one value is allowed in a " + "Format 4 Axis Value Record, but " + f"{len(location.values)} were found.", + self.cur_token_location_, + ) + format4_tags = [] + for location in locations: + tag = location.tag + if tag in format4_tags: + raise FeatureLibError( + f"Axis tag {tag} already " "defined.", self.cur_token_location_ + ) + format4_tags.append(tag) + + return self.ast.STATAxisValueStatement( + names, locations, flags, self.cur_token_location_ + ) + + def parse_STAT_location(self): + values = [] + tag = self.expect_tag_() + if len(tag.strip()) != 4: + raise FeatureLibError( + f"Axis tag {self.cur_token_} must be 4 " "characters", + self.cur_token_location_, + ) + + while self.next_token_ != ";": + if self.next_token_type_ is Lexer.FLOAT: + value = self.expect_float_() + values.append(value) + elif self.next_token_type_ is Lexer.NUMBER: + value = self.expect_number_() + values.append(value) + else: + raise FeatureLibError( + f'Unexpected value "{self.next_token_}". ' + "Expected integer or float.", + self.next_token_location_, + ) + if len(values) == 3: + nominal, min_val, max_val = values + if nominal < min_val or nominal > max_val: + raise FeatureLibError( + f"Default value {nominal} is outside " + f"of specified range " + f"{min_val}-{max_val}.", + self.next_token_location_, + ) + return self.ast.AxisValueLocationStatement(tag, values) + + def parse_table_STAT_(self, table): + statements = table.statements + design_axes = [] + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.cur_token_type_ is Lexer.NAME: + if self.is_cur_keyword_("ElidedFallbackName"): + names = self.parse_STAT_ElidedFallbackName() + statements.append(self.ast.ElidedFallbackName(names)) + elif self.is_cur_keyword_("ElidedFallbackNameID"): + value = self.expect_number_() + statements.append(self.ast.ElidedFallbackNameID(value)) + self.expect_symbol_(";") + elif self.is_cur_keyword_("DesignAxis"): + designAxis = self.parse_STAT_design_axis() + design_axes.append(designAxis.tag) + statements.append(designAxis) + self.expect_symbol_(";") + elif self.is_cur_keyword_("AxisValue"): + axisValueRecord = self.parse_STAT_axis_value_() + for location in axisValueRecord.locations: + if location.tag not in design_axes: + # Tag must be defined in a DesignAxis before it + # can be referenced + raise FeatureLibError( + "DesignAxis not defined for " f"{location.tag}.", + self.cur_token_location_, + ) + statements.append(axisValueRecord) + self.expect_symbol_(";") + else: + raise FeatureLibError( + f"Unexpected token {self.cur_token_}", self.cur_token_location_ + ) + elif self.cur_token_ == ";": + continue + + def parse_base_tag_list_(self): + # Parses BASE table entries. (See `section 9.a `_) + assert self.cur_token_ in ( + "HorizAxis.BaseTagList", + "VertAxis.BaseTagList", + ), self.cur_token_ + bases = [] + while self.next_token_ != ";": + bases.append(self.expect_script_tag_()) + self.expect_symbol_(";") + return bases + + def parse_base_script_list_(self, count): + assert self.cur_token_ in ( + "HorizAxis.BaseScriptList", + "VertAxis.BaseScriptList", + ), self.cur_token_ + scripts = [(self.parse_base_script_record_(count))] + while self.next_token_ == ",": + self.expect_symbol_(",") + scripts.append(self.parse_base_script_record_(count)) + self.expect_symbol_(";") + return scripts + + def parse_base_script_record_(self, count): + script_tag = self.expect_script_tag_() + base_tag = self.expect_script_tag_() + coords = [self.expect_number_() for i in range(count)] + return script_tag, base_tag, coords + + def parse_device_(self): + result = None + self.expect_symbol_("<") + self.expect_keyword_("device") + if self.next_token_ == "NULL": + self.expect_keyword_("NULL") + else: + result = [(self.expect_number_(), self.expect_number_())] + while self.next_token_ == ",": + self.expect_symbol_(",") + result.append((self.expect_number_(), self.expect_number_())) + result = tuple(result) # make it hashable + self.expect_symbol_(">") + return result + + def is_next_value_(self): + return ( + self.next_token_type_ is Lexer.NUMBER + or self.next_token_ == "<" + or self.next_token_ == "(" + ) + + def parse_valuerecord_(self, vertical): + if ( + self.next_token_type_ is Lexer.SYMBOL and self.next_token_ == "(" + ) or self.next_token_type_ is Lexer.NUMBER: + number, location = ( + self.expect_number_(variable=True), + self.cur_token_location_, + ) + if vertical: + val = self.ast.ValueRecord( + yAdvance=number, vertical=vertical, location=location + ) + else: + val = self.ast.ValueRecord( + xAdvance=number, vertical=vertical, location=location + ) + return val + self.expect_symbol_("<") + location = self.cur_token_location_ + if self.next_token_type_ is Lexer.NAME: + name = self.expect_name_() + if name == "NULL": + self.expect_symbol_(">") + return self.ast.ValueRecord() + vrd = self.valuerecords_.resolve(name) + if vrd is None: + raise FeatureLibError( + 'Unknown valueRecordDef "%s"' % name, self.cur_token_location_ + ) + value = vrd.value + xPlacement, yPlacement = (value.xPlacement, value.yPlacement) + xAdvance, yAdvance = (value.xAdvance, value.yAdvance) + else: + xPlacement, yPlacement, xAdvance, yAdvance = ( + self.expect_number_(variable=True), + self.expect_number_(variable=True), + self.expect_number_(variable=True), + self.expect_number_(variable=True), + ) + + if self.next_token_ == "<": + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = ( + self.parse_device_(), + self.parse_device_(), + self.parse_device_(), + self.parse_device_(), + ) + allDeltas = sorted( + [ + delta + for size, delta in (xPlaDevice if xPlaDevice else ()) + + (yPlaDevice if yPlaDevice else ()) + + (xAdvDevice if xAdvDevice else ()) + + (yAdvDevice if yAdvDevice else ()) + ] + ) + if allDeltas[0] < -128 or allDeltas[-1] > 127: + raise FeatureLibError( + "Device value out of valid range (-128..127)", + self.cur_token_location_, + ) + else: + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (None, None, None, None) + + self.expect_symbol_(">") + return self.ast.ValueRecord( + xPlacement, + yPlacement, + xAdvance, + yAdvance, + xPlaDevice, + yPlaDevice, + xAdvDevice, + yAdvDevice, + vertical=vertical, + location=location, + ) + + def parse_valuerecord_definition_(self, vertical): + # Parses a named value record definition. (See section `2.e.v `_) + assert self.is_cur_keyword_("valueRecordDef") + location = self.cur_token_location_ + value = self.parse_valuerecord_(vertical) + name = self.expect_name_() + self.expect_symbol_(";") + vrd = self.ast.ValueRecordDefinition(name, value, location=location) + self.valuerecords_.define(name, vrd) + return vrd + + def parse_languagesystem_(self): + assert self.cur_token_ == "languagesystem" + location = self.cur_token_location_ + script = self.expect_script_tag_() + language = self.expect_language_tag_() + self.expect_symbol_(";") + return self.ast.LanguageSystemStatement(script, language, location=location) + + def parse_feature_block_(self, variation=False): + if variation: + assert self.cur_token_ == "variation" + else: + assert self.cur_token_ == "feature" + location = self.cur_token_location_ + tag = self.expect_tag_() + vertical = tag in {"vkrn", "vpal", "vhal", "valt"} + + stylisticset = None + cv_feature = None + size_feature = False + if tag in self.SS_FEATURE_TAGS: + stylisticset = tag + elif tag in self.CV_FEATURE_TAGS: + cv_feature = tag + elif tag == "size": + size_feature = True + + if variation: + conditionset = self.expect_name_() + + use_extension = False + if self.next_token_ == "useExtension": + self.expect_keyword_("useExtension") + use_extension = True + + if variation: + block = self.ast.VariationBlock( + tag, conditionset, use_extension=use_extension, location=location + ) + else: + block = self.ast.FeatureBlock( + tag, use_extension=use_extension, location=location + ) + self.parse_block_(block, vertical, stylisticset, size_feature, cv_feature) + return block + + def parse_feature_reference_(self): + assert self.cur_token_ == "feature", self.cur_token_ + location = self.cur_token_location_ + featureName = self.expect_tag_() + self.expect_symbol_(";") + return self.ast.FeatureReferenceStatement(featureName, location=location) + + def parse_featureNames_(self, tag): + """Parses a ``featureNames`` statement found in stylistic set features. + See section `8.c `_.""" + assert self.cur_token_ == "featureNames", self.cur_token_ + block = self.ast.NestedBlock( + tag, self.cur_token_, location=self.cur_token_location_ + ) + self.expect_symbol_("{") + for symtab in self.symbol_tables_: + symtab.enter_scope() + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + block.statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.is_cur_keyword_("name"): + location = self.cur_token_location_ + platformID, platEncID, langID, string = self.parse_name_() + block.statements.append( + self.ast.FeatureNameStatement( + tag, platformID, platEncID, langID, string, location=location + ) + ) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError('Expected "name"', self.cur_token_location_) + self.expect_symbol_("}") + for symtab in self.symbol_tables_: + symtab.exit_scope() + self.expect_symbol_(";") + return block + + def parse_cvParameters_(self, tag): + # Parses a ``cvParameters`` block found in Character Variant features. + # See section `8.d `_. + assert self.cur_token_ == "cvParameters", self.cur_token_ + block = self.ast.NestedBlock( + tag, self.cur_token_, location=self.cur_token_location_ + ) + self.expect_symbol_("{") + for symtab in self.symbol_tables_: + symtab.enter_scope() + + statements = block.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.is_cur_keyword_( + { + "FeatUILabelNameID", + "FeatUITooltipTextNameID", + "SampleTextNameID", + "ParamUILabelNameID", + } + ): + statements.append(self.parse_cvNameIDs_(tag, self.cur_token_)) + elif self.is_cur_keyword_("Character"): + statements.append(self.parse_cvCharacter_(tag)) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + "Expected statement: got {} {}".format( + self.cur_token_type_, self.cur_token_ + ), + self.cur_token_location_, + ) + + self.expect_symbol_("}") + for symtab in self.symbol_tables_: + symtab.exit_scope() + self.expect_symbol_(";") + return block + + def parse_cvNameIDs_(self, tag, block_name): + assert self.cur_token_ == block_name, self.cur_token_ + block = self.ast.NestedBlock(tag, block_name, location=self.cur_token_location_) + self.expect_symbol_("{") + for symtab in self.symbol_tables_: + symtab.enter_scope() + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + block.statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.is_cur_keyword_("name"): + location = self.cur_token_location_ + platformID, platEncID, langID, string = self.parse_name_() + block.statements.append( + self.ast.CVParametersNameStatement( + tag, + platformID, + platEncID, + langID, + string, + block_name, + location=location, + ) + ) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError('Expected "name"', self.cur_token_location_) + self.expect_symbol_("}") + for symtab in self.symbol_tables_: + symtab.exit_scope() + self.expect_symbol_(";") + return block + + def parse_cvCharacter_(self, tag): + assert self.cur_token_ == "Character", self.cur_token_ + location, character = self.cur_token_location_, self.expect_any_number_() + self.expect_symbol_(";") + if not (0xFFFFFF >= character >= 0): + raise FeatureLibError( + "Character value must be between " + "{:#x} and {:#x}".format(0, 0xFFFFFF), + location, + ) + return self.ast.CharacterStatement(character, tag, location=location) + + def parse_FontRevision_(self): + # Parses a ``FontRevision`` statement found in the head table. See + # `section 9.c `_. + assert self.cur_token_ == "FontRevision", self.cur_token_ + location, version = self.cur_token_location_, self.expect_float_() + self.expect_symbol_(";") + if version <= 0: + raise FeatureLibError("Font revision numbers must be positive", location) + return self.ast.FontRevisionStatement(version, location=location) + + def parse_conditionset_(self): + name = self.expect_name_() + + conditions = {} + self.expect_symbol_("{") + + while self.next_token_ != "}": + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.NAME: + raise FeatureLibError("Expected an axis name", self.cur_token_location_) + + axis = self.cur_token_ + if axis in conditions: + raise FeatureLibError( + f"Repeated condition for axis {axis}", self.cur_token_location_ + ) + + if self.next_token_type_ is Lexer.FLOAT: + min_value = self.expect_float_() + elif self.next_token_type_ is Lexer.NUMBER: + min_value = self.expect_number_(variable=False) + + if self.next_token_type_ is Lexer.FLOAT: + max_value = self.expect_float_() + elif self.next_token_type_ is Lexer.NUMBER: + max_value = self.expect_number_(variable=False) + self.expect_symbol_(";") + + conditions[axis] = (min_value, max_value) + + self.expect_symbol_("}") + + finalname = self.expect_name_() + if finalname != name: + raise FeatureLibError('Expected "%s"' % name, self.cur_token_location_) + return self.ast.ConditionsetStatement(name, conditions) + + def parse_block_( + self, block, vertical, stylisticset=None, size_feature=False, cv_feature=None + ): + self.expect_symbol_("{") + for symtab in self.symbol_tables_: + symtab.enter_scope() + + statements = block.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append( + self.ast.Comment(self.cur_token_, location=self.cur_token_location_) + ) + elif self.cur_token_type_ is Lexer.GLYPHCLASS: + statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_("anchorDef"): + statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_({"enum", "enumerate"}): + statements.append(self.parse_enumerate_(vertical=vertical)) + elif self.is_cur_keyword_("feature"): + statements.append(self.parse_feature_reference_()) + elif self.is_cur_keyword_("ignore"): + statements.append(self.parse_ignore_()) + elif self.is_cur_keyword_("language"): + statements.append(self.parse_language_()) + elif self.is_cur_keyword_("lookup"): + statements.append(self.parse_lookup_(vertical)) + elif self.is_cur_keyword_("lookupflag"): + statements.append(self.parse_lookupflag_()) + elif self.is_cur_keyword_("markClass"): + statements.append(self.parse_markClass_()) + elif self.is_cur_keyword_({"pos", "position"}): + statements.append( + self.parse_position_(enumerated=False, vertical=vertical) + ) + elif self.is_cur_keyword_("script"): + statements.append(self.parse_script_()) + elif self.is_cur_keyword_({"sub", "substitute", "rsub", "reversesub"}): + statements.append(self.parse_substitute_()) + elif self.is_cur_keyword_("subtable"): + statements.append(self.parse_subtable_()) + elif self.is_cur_keyword_("valueRecordDef"): + statements.append(self.parse_valuerecord_definition_(vertical)) + elif stylisticset and self.is_cur_keyword_("featureNames"): + statements.append(self.parse_featureNames_(stylisticset)) + elif cv_feature and self.is_cur_keyword_("cvParameters"): + statements.append(self.parse_cvParameters_(cv_feature)) + elif size_feature and self.is_cur_keyword_("parameters"): + statements.append(self.parse_size_parameters_()) + elif size_feature and self.is_cur_keyword_("sizemenuname"): + statements.append(self.parse_size_menuname_()) + elif ( + self.cur_token_type_ is Lexer.NAME + and self.cur_token_ in self.extensions + ): + statements.append(self.extensions[self.cur_token_](self)) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + "Expected glyph class definition or statement: got {} {}".format( + self.cur_token_type_, self.cur_token_ + ), + self.cur_token_location_, + ) + + self.expect_symbol_("}") + for symtab in self.symbol_tables_: + symtab.exit_scope() + + name = self.expect_name_() + if name != block.name.strip(): + raise FeatureLibError( + 'Expected "%s"' % block.name.strip(), self.cur_token_location_ + ) + self.expect_symbol_(";") + + # A multiple substitution may have a single destination, in which case + # it will look just like a single substitution. So if there are both + # multiple and single substitutions, upgrade all the single ones to + # multiple substitutions. + + # Check if we have a mix of non-contextual singles and multiples. + has_single = False + has_multiple = False + for s in statements: + if isinstance(s, self.ast.SingleSubstStatement): + has_single = not any([s.prefix, s.suffix, s.forceChain]) + elif isinstance(s, self.ast.MultipleSubstStatement): + has_multiple = not any([s.prefix, s.suffix, s.forceChain]) + + # Upgrade all single substitutions to multiple substitutions. + if has_single and has_multiple: + statements = [] + for s in block.statements: + if isinstance(s, self.ast.SingleSubstStatement): + glyphs = s.glyphs[0].glyphSet() + replacements = s.replacements[0].glyphSet() + if len(replacements) == 1: + replacements *= len(glyphs) + for i, glyph in enumerate(glyphs): + statements.append( + self.ast.MultipleSubstStatement( + s.prefix, + glyph, + s.suffix, + [replacements[i]], + s.forceChain, + location=s.location, + ) + ) + else: + statements.append(s) + block.statements = statements + + def is_cur_keyword_(self, k): + if self.cur_token_type_ is Lexer.NAME: + if isinstance(k, type("")): # basestring is gone in Python3 + return self.cur_token_ == k + else: + return self.cur_token_ in k + return False + + def expect_class_name_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.GLYPHCLASS: + raise FeatureLibError("Expected @NAME", self.cur_token_location_) + return self.cur_token_ + + def expect_cid_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.CID: + return self.cur_token_ + raise FeatureLibError("Expected a CID", self.cur_token_location_) + + def expect_filename_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.FILENAME: + raise FeatureLibError("Expected file name", self.cur_token_location_) + return self.cur_token_ + + def expect_glyph_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + self.cur_token_ = self.cur_token_.lstrip("\\") + if len(self.cur_token_) > 63: + raise FeatureLibError( + "Glyph names must not be longer than 63 characters", + self.cur_token_location_, + ) + return self.cur_token_ + elif self.cur_token_type_ is Lexer.CID: + return "cid%05d" % self.cur_token_ + raise FeatureLibError("Expected a glyph name or CID", self.cur_token_location_) + + def check_glyph_name_in_glyph_set(self, *names): + """Raises if glyph name (just `start`) or glyph names of a + range (`start` and `end`) are not in the glyph set. + + If no glyph set is present, does nothing. + """ + if self.glyphNames_: + missing = [name for name in names if name not in self.glyphNames_] + if missing: + raise FeatureLibError( + "The following glyph names are referenced but are missing from the " + f"glyph set: {', '.join(missing)}", + self.cur_token_location_, + ) + + def expect_markClass_reference_(self): + name = self.expect_class_name_() + mc = self.glyphclasses_.resolve(name) + if mc is None: + raise FeatureLibError( + "Unknown markClass @%s" % name, self.cur_token_location_ + ) + if not isinstance(mc, self.ast.MarkClass): + raise FeatureLibError( + "@%s is not a markClass" % name, self.cur_token_location_ + ) + return mc + + def expect_tag_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.NAME: + raise FeatureLibError("Expected a tag", self.cur_token_location_) + if len(self.cur_token_) > 4: + raise FeatureLibError( + "Tags cannot be longer than 4 characters", self.cur_token_location_ + ) + return (self.cur_token_ + " ")[:4] + + def expect_script_tag_(self): + tag = self.expect_tag_() + if tag == "dflt": + raise FeatureLibError( + '"dflt" is not a valid script tag; use "DFLT" instead', + self.cur_token_location_, + ) + return tag + + def expect_language_tag_(self): + tag = self.expect_tag_() + if tag == "DFLT": + raise FeatureLibError( + '"DFLT" is not a valid language tag; use "dflt" instead', + self.cur_token_location_, + ) + return tag + + def expect_symbol_(self, symbol): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol: + return symbol + raise FeatureLibError("Expected '%s'" % symbol, self.cur_token_location_) + + def expect_keyword_(self, keyword): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: + return self.cur_token_ + raise FeatureLibError('Expected "%s"' % keyword, self.cur_token_location_) + + def expect_name_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + return self.cur_token_ + raise FeatureLibError("Expected a name", self.cur_token_location_) + + def expect_number_(self, variable=False): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NUMBER: + return self.cur_token_ + if variable and self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "(": + return self.expect_variable_scalar_() + raise FeatureLibError("Expected a number", self.cur_token_location_) + + def expect_variable_scalar_(self): + self.advance_lexer_() # "(" + scalar = VariableScalar() + while True: + if self.cur_token_type_ == Lexer.SYMBOL and self.cur_token_ == ")": + break + location, value = self.expect_master_() + scalar.add_value(location, value) + return scalar + + def expect_master_(self): + location = {} + while True: + if self.cur_token_type_ is not Lexer.NAME: + raise FeatureLibError("Expected an axis name", self.cur_token_location_) + axis = self.cur_token_ + self.advance_lexer_() + if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "="): + raise FeatureLibError( + "Expected an equals sign", self.cur_token_location_ + ) + value = self.expect_number_() + location[axis] = value + if self.next_token_type_ is Lexer.NAME and self.next_token_[0] == ":": + # Lexer has just read the value as a glyph name. We'll correct it later + break + self.advance_lexer_() + if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ","): + raise FeatureLibError( + "Expected an comma or an equals sign", self.cur_token_location_ + ) + self.advance_lexer_() + self.advance_lexer_() + value = int(self.cur_token_[1:]) + self.advance_lexer_() + return location, value + + def expect_any_number_(self): + self.advance_lexer_() + if self.cur_token_type_ in Lexer.NUMBERS: + return self.cur_token_ + raise FeatureLibError( + "Expected a decimal, hexadecimal or octal number", self.cur_token_location_ + ) + + def expect_float_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.FLOAT: + return self.cur_token_ + raise FeatureLibError( + "Expected a floating-point number", self.cur_token_location_ + ) + + def expect_decipoint_(self): + if self.next_token_type_ == Lexer.FLOAT: + return self.expect_float_() + elif self.next_token_type_ is Lexer.NUMBER: + return self.expect_number_() / 10 + else: + raise FeatureLibError( + "Expected an integer or floating-point number", self.cur_token_location_ + ) + + def expect_stat_flags(self): + value = 0 + flags = { + "OlderSiblingFontAttribute": 1, + "ElidableAxisValueName": 2, + } + while self.next_token_ != ";": + if self.next_token_ in flags: + name = self.expect_name_() + value = value | flags[name] + else: + raise FeatureLibError( + f"Unexpected STAT flag {self.cur_token_}", self.cur_token_location_ + ) + return value + + def expect_stat_values_(self): + if self.next_token_type_ == Lexer.FLOAT: + return self.expect_float_() + elif self.next_token_type_ is Lexer.NUMBER: + return self.expect_number_() + else: + raise FeatureLibError( + "Expected an integer or floating-point number", self.cur_token_location_ + ) + + def expect_string_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.STRING: + return self.cur_token_ + raise FeatureLibError("Expected a string", self.cur_token_location_) + + def advance_lexer_(self, comments=False): + if comments and self.cur_comments_: + self.cur_token_type_ = Lexer.COMMENT + self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0) + return + else: + self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( + self.next_token_type_, + self.next_token_, + self.next_token_location_, + ) + while True: + try: + ( + self.next_token_type_, + self.next_token_, + self.next_token_location_, + ) = next(self.lexer_) + except StopIteration: + self.next_token_type_, self.next_token_ = (None, None) + if self.next_token_type_ != Lexer.COMMENT: + break + self.cur_comments_.append((self.next_token_, self.next_token_location_)) + + @staticmethod + def reverse_string_(s): + """'abc' --> 'cba'""" + return "".join(reversed(list(s))) + + def make_cid_range_(self, location, start, limit): + """(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]""" + result = list() + if start > limit: + raise FeatureLibError( + "Bad range: start should be less than limit", location + ) + for cid in range(start, limit + 1): + result.append("cid%05d" % cid) + return result + + def make_glyph_range_(self, location, start, limit): + """(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]""" + result = list() + if len(start) != len(limit): + raise FeatureLibError( + 'Bad range: "%s" and "%s" should have the same length' % (start, limit), + location, + ) + + rev = self.reverse_string_ + prefix = os.path.commonprefix([start, limit]) + suffix = rev(os.path.commonprefix([rev(start), rev(limit)])) + if len(suffix) > 0: + start_range = start[len(prefix) : -len(suffix)] + limit_range = limit[len(prefix) : -len(suffix)] + else: + start_range = start[len(prefix) :] + limit_range = limit[len(prefix) :] + + if start_range >= limit_range: + raise FeatureLibError( + "Start of range must be smaller than its end", location + ) + + uppercase = re.compile(r"^[A-Z]$") + if uppercase.match(start_range) and uppercase.match(limit_range): + for c in range(ord(start_range), ord(limit_range) + 1): + result.append("%s%c%s" % (prefix, c, suffix)) + return result + + lowercase = re.compile(r"^[a-z]$") + if lowercase.match(start_range) and lowercase.match(limit_range): + for c in range(ord(start_range), ord(limit_range) + 1): + result.append("%s%c%s" % (prefix, c, suffix)) + return result + + digits = re.compile(r"^[0-9]{1,3}$") + if digits.match(start_range) and digits.match(limit_range): + for i in range(int(start_range, 10), int(limit_range, 10) + 1): + number = ("000" + str(i))[-len(start_range) :] + result.append("%s%s%s" % (prefix, number, suffix)) + return result + + raise FeatureLibError('Bad range: "%s-%s"' % (start, limit), location) + + +class SymbolTable(object): + def __init__(self): + self.scopes_ = [{}] + + def enter_scope(self): + self.scopes_.append({}) + + def exit_scope(self): + self.scopes_.pop() + + def define(self, name, item): + self.scopes_[-1][name] = item + + def resolve(self, name): + for scope in reversed(self.scopes_): + item = scope.get(name) + if item: + return item + return None diff --git a/.venv/lib/python3.9/site-packages/fontTools/feaLib/variableScalar.py b/.venv/lib/python3.9/site-packages/fontTools/feaLib/variableScalar.py new file mode 100644 index 00000000..a286568e --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/feaLib/variableScalar.py @@ -0,0 +1,97 @@ +from fontTools.varLib.models import VariationModel, normalizeValue + + +def Location(loc): + return tuple(sorted(loc.items())) + + +class VariableScalar: + """A scalar with different values at different points in the designspace.""" + + def __init__(self, location_value={}): + self.values = {} + self.axes = {} + for location, value in location_value.items(): + self.add_value(location, value) + + def __repr__(self): + items = [] + for location, value in self.values.items(): + loc = ",".join(["%s=%i" % (ax, loc) for ax, loc in location]) + items.append("%s:%i" % (loc, value)) + return "(" + (" ".join(items)) + ")" + + @property + def does_vary(self): + values = list(self.values.values()) + return any(v != values[0] for v in values[1:]) + + @property + def axes_dict(self): + if not self.axes: + raise ValueError( + ".axes must be defined on variable scalar before interpolating" + ) + return {ax.axisTag: ax for ax in self.axes} + + def _normalized_location(self, location): + location = self.fix_location(location) + normalized_location = {} + for axtag in location.keys(): + if axtag not in self.axes_dict: + raise ValueError("Unknown axis %s in %s" % (axtag, location)) + axis = self.axes_dict[axtag] + normalized_location[axtag] = normalizeValue( + location[axtag], (axis.minValue, axis.defaultValue, axis.maxValue) + ) + + return Location(normalized_location) + + def fix_location(self, location): + location = dict(location) + for tag, axis in self.axes_dict.items(): + if tag not in location: + location[tag] = axis.defaultValue + return location + + def add_value(self, location, value): + if self.axes: + location = self.fix_location(location) + + self.values[Location(location)] = value + + def fix_all_locations(self): + self.values = { + Location(self.fix_location(l)): v for l, v in self.values.items() + } + + @property + def default(self): + self.fix_all_locations() + key = Location({ax.axisTag: ax.defaultValue for ax in self.axes}) + if key not in self.values: + raise ValueError("Default value could not be found") + # I *guess* we could interpolate one, but I don't know how. + return self.values[key] + + def value_at_location(self, location): + loc = location + if loc in self.values.keys(): + return self.values[loc] + values = list(self.values.values()) + return self.model.interpolateFromMasters(loc, values) + + @property + def model(self): + locations = [dict(self._normalized_location(k)) for k in self.values.keys()] + return VariationModel(locations) + + def get_deltas_and_supports(self): + values = list(self.values.values()) + return self.model.getDeltasAndSupports(values) + + def add_to_variation_store(self, store_builder): + deltas, supports = self.get_deltas_and_supports() + store_builder.setSupports(supports) + index = store_builder.storeDeltas(deltas) + return int(self.default), index diff --git a/.venv/lib/python3.9/site-packages/fontTools/fontBuilder.py b/.venv/lib/python3.9/site-packages/fontTools/fontBuilder.py new file mode 100644 index 00000000..bf3b31b7 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/fontBuilder.py @@ -0,0 +1,959 @@ +__all__ = ["FontBuilder"] + +""" +This module is *experimental*, meaning it still may evolve and change. + +The `FontBuilder` class is a convenient helper to construct working TTF or +OTF fonts from scratch. + +Note that the various setup methods cannot be called in arbitrary order, +due to various interdependencies between OpenType tables. Here is an order +that works: + + fb = FontBuilder(...) + fb.setupGlyphOrder(...) + fb.setupCharacterMap(...) + fb.setupGlyf(...) --or-- fb.setupCFF(...) + fb.setupHorizontalMetrics(...) + fb.setupHorizontalHeader() + fb.setupNameTable(...) + fb.setupOS2() + fb.addOpenTypeFeatures(...) + fb.setupPost() + fb.save(...) + +Here is how to build a minimal TTF: + +```python +from fontTools.fontBuilder import FontBuilder +from fontTools.pens.ttGlyphPen import TTGlyphPen + + +def drawTestGlyph(pen): + pen.moveTo((100, 100)) + pen.lineTo((100, 1000)) + pen.qCurveTo((200, 900), (400, 900), (500, 1000)) + pen.lineTo((500, 100)) + pen.closePath() + + +fb = FontBuilder(1024, isTTF=True) +fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"]) +fb.setupCharacterMap({32: "space", 65: "A", 97: "a"}) +advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0} + +familyName = "HelloTestFont" +styleName = "TotallyNormal" +version = "0.1" + +nameStrings = dict( + familyName=dict(en=familyName, nl="HalloTestFont"), + styleName=dict(en=styleName, nl="TotaalNormaal"), + uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName, + fullName=familyName + "-" + styleName, + psName=familyName + "-" + styleName, + version="Version " + version, +) + +pen = TTGlyphPen(None) +drawTestGlyph(pen) +glyph = pen.glyph() +glyphs = {".notdef": glyph, "space": glyph, "A": glyph, "a": glyph, ".null": glyph} +fb.setupGlyf(glyphs) +metrics = {} +glyphTable = fb.font["glyf"] +for gn, advanceWidth in advanceWidths.items(): + metrics[gn] = (advanceWidth, glyphTable[gn].xMin) +fb.setupHorizontalMetrics(metrics) +fb.setupHorizontalHeader(ascent=824, descent=-200) +fb.setupNameTable(nameStrings) +fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200) +fb.setupPost() +fb.save("test.ttf") +``` + +And here's how to build a minimal OTF: + +```python +from fontTools.fontBuilder import FontBuilder +from fontTools.pens.t2CharStringPen import T2CharStringPen + + +def drawTestGlyph(pen): + pen.moveTo((100, 100)) + pen.lineTo((100, 1000)) + pen.curveTo((200, 900), (400, 900), (500, 1000)) + pen.lineTo((500, 100)) + pen.closePath() + + +fb = FontBuilder(1024, isTTF=False) +fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"]) +fb.setupCharacterMap({32: "space", 65: "A", 97: "a"}) +advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0} + +familyName = "HelloTestFont" +styleName = "TotallyNormal" +version = "0.1" + +nameStrings = dict( + familyName=dict(en=familyName, nl="HalloTestFont"), + styleName=dict(en=styleName, nl="TotaalNormaal"), + uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName, + fullName=familyName + "-" + styleName, + psName=familyName + "-" + styleName, + version="Version " + version, +) + +pen = T2CharStringPen(600, None) +drawTestGlyph(pen) +charString = pen.getCharString() +charStrings = { + ".notdef": charString, + "space": charString, + "A": charString, + "a": charString, + ".null": charString, +} +fb.setupCFF(nameStrings["psName"], {"FullName": nameStrings["psName"]}, charStrings, {}) +lsb = {gn: cs.calcBounds(None)[0] for gn, cs in charStrings.items()} +metrics = {} +for gn, advanceWidth in advanceWidths.items(): + metrics[gn] = (advanceWidth, lsb[gn]) +fb.setupHorizontalMetrics(metrics) +fb.setupHorizontalHeader(ascent=824, descent=200) +fb.setupNameTable(nameStrings) +fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200) +fb.setupPost() +fb.save("test.otf") +``` +""" + +from .ttLib import TTFont, newTable +from .ttLib.tables._c_m_a_p import cmap_classes +from .misc.timeTools import timestampNow +import struct +from collections import OrderedDict + + +_headDefaults = dict( + tableVersion=1.0, + fontRevision=1.0, + checkSumAdjustment=0, + magicNumber=0x5F0F3CF5, + flags=0x0003, + unitsPerEm=1000, + created=0, + modified=0, + xMin=0, + yMin=0, + xMax=0, + yMax=0, + macStyle=0, + lowestRecPPEM=3, + fontDirectionHint=2, + indexToLocFormat=0, + glyphDataFormat=0, +) + +_maxpDefaultsTTF = dict( + tableVersion=0x00010000, + numGlyphs=0, + maxPoints=0, + maxContours=0, + maxCompositePoints=0, + maxCompositeContours=0, + maxZones=2, + maxTwilightPoints=0, + maxStorage=0, + maxFunctionDefs=0, + maxInstructionDefs=0, + maxStackElements=0, + maxSizeOfInstructions=0, + maxComponentElements=0, + maxComponentDepth=0, +) +_maxpDefaultsOTF = dict( + tableVersion=0x00005000, + numGlyphs=0, +) + +_postDefaults = dict( + formatType=3.0, + italicAngle=0, + underlinePosition=0, + underlineThickness=0, + isFixedPitch=0, + minMemType42=0, + maxMemType42=0, + minMemType1=0, + maxMemType1=0, +) + +_hheaDefaults = dict( + tableVersion=0x00010000, + ascent=0, + descent=0, + lineGap=0, + advanceWidthMax=0, + minLeftSideBearing=0, + minRightSideBearing=0, + xMaxExtent=0, + caretSlopeRise=1, + caretSlopeRun=0, + caretOffset=0, + reserved0=0, + reserved1=0, + reserved2=0, + reserved3=0, + metricDataFormat=0, + numberOfHMetrics=0, +) + +_vheaDefaults = dict( + tableVersion=0x00010000, + ascent=0, + descent=0, + lineGap=0, + advanceHeightMax=0, + minTopSideBearing=0, + minBottomSideBearing=0, + yMaxExtent=0, + caretSlopeRise=0, + caretSlopeRun=0, + reserved0=0, + reserved1=0, + reserved2=0, + reserved3=0, + reserved4=0, + metricDataFormat=0, + numberOfVMetrics=0, +) + +_nameIDs = dict( + copyright=0, + familyName=1, + styleName=2, + uniqueFontIdentifier=3, + fullName=4, + version=5, + psName=6, + trademark=7, + manufacturer=8, + designer=9, + description=10, + vendorURL=11, + designerURL=12, + licenseDescription=13, + licenseInfoURL=14, + # reserved = 15, + typographicFamily=16, + typographicSubfamily=17, + compatibleFullName=18, + sampleText=19, + postScriptCIDFindfontName=20, + wwsFamilyName=21, + wwsSubfamilyName=22, + lightBackgroundPalette=23, + darkBackgroundPalette=24, + variationsPostScriptNamePrefix=25, +) + +# to insert in setupNameTable doc string: +# print("\n".join(("%s (nameID %s)" % (k, v)) for k, v in sorted(_nameIDs.items(), key=lambda x: x[1]))) + +_panoseDefaults = dict( + bFamilyType=0, + bSerifStyle=0, + bWeight=0, + bProportion=0, + bContrast=0, + bStrokeVariation=0, + bArmStyle=0, + bLetterForm=0, + bMidline=0, + bXHeight=0, +) + +_OS2Defaults = dict( + version=3, + xAvgCharWidth=0, + usWeightClass=400, + usWidthClass=5, + fsType=0x0004, # default: Preview & Print embedding + ySubscriptXSize=0, + ySubscriptYSize=0, + ySubscriptXOffset=0, + ySubscriptYOffset=0, + ySuperscriptXSize=0, + ySuperscriptYSize=0, + ySuperscriptXOffset=0, + ySuperscriptYOffset=0, + yStrikeoutSize=0, + yStrikeoutPosition=0, + sFamilyClass=0, + panose=_panoseDefaults, + ulUnicodeRange1=0, + ulUnicodeRange2=0, + ulUnicodeRange3=0, + ulUnicodeRange4=0, + achVendID="????", + fsSelection=0, + usFirstCharIndex=0, + usLastCharIndex=0, + sTypoAscender=0, + sTypoDescender=0, + sTypoLineGap=0, + usWinAscent=0, + usWinDescent=0, + ulCodePageRange1=0, + ulCodePageRange2=0, + sxHeight=0, + sCapHeight=0, + usDefaultChar=0, # .notdef + usBreakChar=32, # space + usMaxContext=0, + usLowerOpticalPointSize=0, + usUpperOpticalPointSize=0, +) + + +class FontBuilder(object): + def __init__(self, unitsPerEm=None, font=None, isTTF=True): + """Initialize a FontBuilder instance. + + If the `font` argument is not given, a new `TTFont` will be + constructed, and `unitsPerEm` must be given. If `isTTF` is True, + the font will be a glyf-based TTF; if `isTTF` is False it will be + a CFF-based OTF. + + If `font` is given, it must be a `TTFont` instance and `unitsPerEm` + must _not_ be given. The `isTTF` argument will be ignored. + """ + if font is None: + self.font = TTFont(recalcTimestamp=False) + self.isTTF = isTTF + now = timestampNow() + assert unitsPerEm is not None + self.setupHead(unitsPerEm=unitsPerEm, created=now, modified=now) + self.setupMaxp() + else: + assert unitsPerEm is None + self.font = font + self.isTTF = "glyf" in font + + def save(self, file): + """Save the font. The 'file' argument can be either a pathname or a + writable file object. + """ + self.font.save(file) + + def _initTableWithValues(self, tableTag, defaults, values): + table = self.font[tableTag] = newTable(tableTag) + for k, v in defaults.items(): + setattr(table, k, v) + for k, v in values.items(): + setattr(table, k, v) + return table + + def _updateTableWithValues(self, tableTag, values): + table = self.font[tableTag] + for k, v in values.items(): + setattr(table, k, v) + + def setupHead(self, **values): + """Create a new `head` table and initialize it with default values, + which can be overridden by keyword arguments. + """ + self._initTableWithValues("head", _headDefaults, values) + + def updateHead(self, **values): + """Update the head table with the fields and values passed as + keyword arguments. + """ + self._updateTableWithValues("head", values) + + def setupGlyphOrder(self, glyphOrder): + """Set the glyph order for the font.""" + self.font.setGlyphOrder(glyphOrder) + + def setupCharacterMap(self, cmapping, uvs=None, allowFallback=False): + """Build the `cmap` table for the font. The `cmapping` argument should + be a dict mapping unicode code points as integers to glyph names. + + The `uvs` argument, when passed, must be a list of tuples, describing + Unicode Variation Sequences. These tuples have three elements: + (unicodeValue, variationSelector, glyphName) + `unicodeValue` and `variationSelector` are integer code points. + `glyphName` may be None, to indicate this is the default variation. + Text processors will then use the cmap to find the glyph name. + Each Unicode Variation Sequence should be an officially supported + sequence, but this is not policed. + """ + subTables = [] + highestUnicode = max(cmapping) + if highestUnicode > 0xFFFF: + cmapping_3_1 = dict((k, v) for k, v in cmapping.items() if k < 0x10000) + subTable_3_10 = buildCmapSubTable(cmapping, 12, 3, 10) + subTables.append(subTable_3_10) + else: + cmapping_3_1 = cmapping + format = 4 + subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1) + try: + subTable_3_1.compile(self.font) + except struct.error: + # format 4 overflowed, fall back to format 12 + if not allowFallback: + raise ValueError( + "cmap format 4 subtable overflowed; sort glyph order by unicode to fix." + ) + format = 12 + subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1) + subTables.append(subTable_3_1) + subTable_0_3 = buildCmapSubTable(cmapping_3_1, format, 0, 3) + subTables.append(subTable_0_3) + + if uvs is not None: + uvsDict = {} + for unicodeValue, variationSelector, glyphName in uvs: + if cmapping.get(unicodeValue) == glyphName: + # this is a default variation + glyphName = None + if variationSelector not in uvsDict: + uvsDict[variationSelector] = [] + uvsDict[variationSelector].append((unicodeValue, glyphName)) + uvsSubTable = buildCmapSubTable({}, 14, 0, 5) + uvsSubTable.uvsDict = uvsDict + subTables.append(uvsSubTable) + + self.font["cmap"] = newTable("cmap") + self.font["cmap"].tableVersion = 0 + self.font["cmap"].tables = subTables + + def setupNameTable(self, nameStrings, windows=True, mac=True): + """Create the `name` table for the font. The `nameStrings` argument must + be a dict, mapping nameIDs or descriptive names for the nameIDs to name + record values. A value is either a string, or a dict, mapping language codes + to strings, to allow localized name table entries. + + By default, both Windows (platformID=3) and Macintosh (platformID=1) name + records are added, unless any of `windows` or `mac` arguments is False. + + The following descriptive names are available for nameIDs: + + copyright (nameID 0) + familyName (nameID 1) + styleName (nameID 2) + uniqueFontIdentifier (nameID 3) + fullName (nameID 4) + version (nameID 5) + psName (nameID 6) + trademark (nameID 7) + manufacturer (nameID 8) + designer (nameID 9) + description (nameID 10) + vendorURL (nameID 11) + designerURL (nameID 12) + licenseDescription (nameID 13) + licenseInfoURL (nameID 14) + typographicFamily (nameID 16) + typographicSubfamily (nameID 17) + compatibleFullName (nameID 18) + sampleText (nameID 19) + postScriptCIDFindfontName (nameID 20) + wwsFamilyName (nameID 21) + wwsSubfamilyName (nameID 22) + lightBackgroundPalette (nameID 23) + darkBackgroundPalette (nameID 24) + variationsPostScriptNamePrefix (nameID 25) + """ + nameTable = self.font["name"] = newTable("name") + nameTable.names = [] + + for nameName, nameValue in nameStrings.items(): + if isinstance(nameName, int): + nameID = nameName + else: + nameID = _nameIDs[nameName] + if isinstance(nameValue, str): + nameValue = dict(en=nameValue) + nameTable.addMultilingualName( + nameValue, ttFont=self.font, nameID=nameID, windows=windows, mac=mac + ) + + def setupOS2(self, **values): + """Create a new `OS/2` table and initialize it with default values, + which can be overridden by keyword arguments. + """ + if "xAvgCharWidth" not in values: + gs = self.font.getGlyphSet() + widths = [ + gs[glyphName].width + for glyphName in gs.keys() + if gs[glyphName].width > 0 + ] + values["xAvgCharWidth"] = int(round(sum(widths) / float(len(widths)))) + self._initTableWithValues("OS/2", _OS2Defaults, values) + if not ( + "ulUnicodeRange1" in values + or "ulUnicodeRange2" in values + or "ulUnicodeRange3" in values + or "ulUnicodeRange3" in values + ): + assert ( + "cmap" in self.font + ), "the 'cmap' table must be setup before the 'OS/2' table" + self.font["OS/2"].recalcUnicodeRanges(self.font) + + def setupCFF(self, psName, fontInfo, charStringsDict, privateDict): + from .cffLib import ( + CFFFontSet, + TopDictIndex, + TopDict, + CharStrings, + GlobalSubrsIndex, + PrivateDict, + ) + + assert not self.isTTF + self.font.sfntVersion = "OTTO" + fontSet = CFFFontSet() + fontSet.major = 1 + fontSet.minor = 0 + fontSet.otFont = self.font + fontSet.fontNames = [psName] + fontSet.topDictIndex = TopDictIndex() + + globalSubrs = GlobalSubrsIndex() + fontSet.GlobalSubrs = globalSubrs + private = PrivateDict() + for key, value in privateDict.items(): + setattr(private, key, value) + fdSelect = None + fdArray = None + + topDict = TopDict() + topDict.charset = self.font.getGlyphOrder() + topDict.Private = private + topDict.GlobalSubrs = fontSet.GlobalSubrs + for key, value in fontInfo.items(): + setattr(topDict, key, value) + if "FontMatrix" not in fontInfo: + scale = 1 / self.font["head"].unitsPerEm + topDict.FontMatrix = [scale, 0, 0, scale, 0, 0] + + charStrings = CharStrings( + None, topDict.charset, globalSubrs, private, fdSelect, fdArray + ) + for glyphName, charString in charStringsDict.items(): + charString.private = private + charString.globalSubrs = globalSubrs + charStrings[glyphName] = charString + topDict.CharStrings = charStrings + + fontSet.topDictIndex.append(topDict) + + self.font["CFF "] = newTable("CFF ") + self.font["CFF "].cff = fontSet + + def setupCFF2(self, charStringsDict, fdArrayList=None, regions=None): + from .cffLib import ( + CFFFontSet, + TopDictIndex, + TopDict, + CharStrings, + GlobalSubrsIndex, + PrivateDict, + FDArrayIndex, + FontDict, + ) + + assert not self.isTTF + self.font.sfntVersion = "OTTO" + fontSet = CFFFontSet() + fontSet.major = 2 + fontSet.minor = 0 + + cff2GetGlyphOrder = self.font.getGlyphOrder + fontSet.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None) + + globalSubrs = GlobalSubrsIndex() + fontSet.GlobalSubrs = globalSubrs + + if fdArrayList is None: + fdArrayList = [{}] + fdSelect = None + fdArray = FDArrayIndex() + fdArray.strings = None + fdArray.GlobalSubrs = globalSubrs + for privateDict in fdArrayList: + fontDict = FontDict() + fontDict.setCFF2(True) + private = PrivateDict() + for key, value in privateDict.items(): + setattr(private, key, value) + fontDict.Private = private + fdArray.append(fontDict) + + topDict = TopDict() + topDict.cff2GetGlyphOrder = cff2GetGlyphOrder + topDict.FDArray = fdArray + scale = 1 / self.font["head"].unitsPerEm + topDict.FontMatrix = [scale, 0, 0, scale, 0, 0] + + private = fdArray[0].Private + charStrings = CharStrings(None, None, globalSubrs, private, fdSelect, fdArray) + for glyphName, charString in charStringsDict.items(): + charString.private = private + charString.globalSubrs = globalSubrs + charStrings[glyphName] = charString + topDict.CharStrings = charStrings + + fontSet.topDictIndex.append(topDict) + + self.font["CFF2"] = newTable("CFF2") + self.font["CFF2"].cff = fontSet + + if regions: + self.setupCFF2Regions(regions) + + def setupCFF2Regions(self, regions): + from .varLib.builder import buildVarRegionList, buildVarData, buildVarStore + from .cffLib import VarStoreData + + assert "fvar" in self.font, "fvar must to be set up first" + assert "CFF2" in self.font, "CFF2 must to be set up first" + axisTags = [a.axisTag for a in self.font["fvar"].axes] + varRegionList = buildVarRegionList(regions, axisTags) + varData = buildVarData(list(range(len(regions))), None, optimize=False) + varStore = buildVarStore(varRegionList, [varData]) + vstore = VarStoreData(otVarStore=varStore) + topDict = self.font["CFF2"].cff.topDictIndex[0] + topDict.VarStore = vstore + for fontDict in topDict.FDArray: + fontDict.Private.vstore = vstore + + def setupGlyf(self, glyphs, calcGlyphBounds=True): + """Create the `glyf` table from a dict, that maps glyph names + to `fontTools.ttLib.tables._g_l_y_f.Glyph` objects, for example + as made by `fontTools.pens.ttGlyphPen.TTGlyphPen`. + + If `calcGlyphBounds` is True, the bounds of all glyphs will be + calculated. Only pass False if your glyph objects already have + their bounding box values set. + """ + assert self.isTTF + self.font["loca"] = newTable("loca") + self.font["glyf"] = newTable("glyf") + self.font["glyf"].glyphs = glyphs + if hasattr(self.font, "glyphOrder"): + self.font["glyf"].glyphOrder = self.font.glyphOrder + if calcGlyphBounds: + self.calcGlyphBounds() + + def setupFvar(self, axes, instances): + """Adds an font variations table to the font. + + Args: + axes (list): See below. + instances (list): See below. + + ``axes`` should be a list of axes, with each axis either supplied as + a py:class:`.designspaceLib.AxisDescriptor` object, or a tuple in the + format ```tupletag, minValue, defaultValue, maxValue, name``. + The ``name`` is either a string, or a dict, mapping language codes + to strings, to allow localized name table entries. + + ```instances`` should be a list of instances, with each instance either + supplied as a py:class:`.designspaceLib.InstanceDescriptor` object, or a + dict with keys ``location`` (mapping of axis tags to float values), + ``stylename`` and (optionally) ``postscriptfontname``. + The ``stylename`` is either a string, or a dict, mapping language codes + to strings, to allow localized name table entries. + """ + + addFvar(self.font, axes, instances) + + def setupAvar(self, axes): + """Adds an axis variations table to the font. + + Args: + axes (list): A list of py:class:`.designspaceLib.AxisDescriptor` objects. + """ + from .varLib import _add_avar + + _add_avar(self.font, OrderedDict(enumerate(axes))) # Only values are used + + def setupGvar(self, variations): + gvar = self.font["gvar"] = newTable("gvar") + gvar.version = 1 + gvar.reserved = 0 + gvar.variations = variations + + def calcGlyphBounds(self): + """Calculate the bounding boxes of all glyphs in the `glyf` table. + This is usually not called explicitly by client code. + """ + glyphTable = self.font["glyf"] + for glyph in glyphTable.glyphs.values(): + glyph.recalcBounds(glyphTable) + + def setupHorizontalMetrics(self, metrics): + """Create a new `hmtx` table, for horizontal metrics. + + The `metrics` argument must be a dict, mapping glyph names to + `(width, leftSidebearing)` tuples. + """ + self.setupMetrics("hmtx", metrics) + + def setupVerticalMetrics(self, metrics): + """Create a new `vmtx` table, for horizontal metrics. + + The `metrics` argument must be a dict, mapping glyph names to + `(height, topSidebearing)` tuples. + """ + self.setupMetrics("vmtx", metrics) + + def setupMetrics(self, tableTag, metrics): + """See `setupHorizontalMetrics()` and `setupVerticalMetrics()`.""" + assert tableTag in ("hmtx", "vmtx") + mtxTable = self.font[tableTag] = newTable(tableTag) + roundedMetrics = {} + for gn in metrics: + w, lsb = metrics[gn] + roundedMetrics[gn] = int(round(w)), int(round(lsb)) + mtxTable.metrics = roundedMetrics + + def setupHorizontalHeader(self, **values): + """Create a new `hhea` table initialize it with default values, + which can be overridden by keyword arguments. + """ + self._initTableWithValues("hhea", _hheaDefaults, values) + + def setupVerticalHeader(self, **values): + """Create a new `vhea` table initialize it with default values, + which can be overridden by keyword arguments. + """ + self._initTableWithValues("vhea", _vheaDefaults, values) + + def setupVerticalOrigins(self, verticalOrigins, defaultVerticalOrigin=None): + """Create a new `VORG` table. The `verticalOrigins` argument must be + a dict, mapping glyph names to vertical origin values. + + The `defaultVerticalOrigin` argument should be the most common vertical + origin value. If omitted, this value will be derived from the actual + values in the `verticalOrigins` argument. + """ + if defaultVerticalOrigin is None: + # find the most frequent vorg value + bag = {} + for gn in verticalOrigins: + vorg = verticalOrigins[gn] + if vorg not in bag: + bag[vorg] = 1 + else: + bag[vorg] += 1 + defaultVerticalOrigin = sorted( + bag, key=lambda vorg: bag[vorg], reverse=True + )[0] + self._initTableWithValues( + "VORG", + {}, + dict(VOriginRecords={}, defaultVertOriginY=defaultVerticalOrigin), + ) + vorgTable = self.font["VORG"] + vorgTable.majorVersion = 1 + vorgTable.minorVersion = 0 + for gn in verticalOrigins: + vorgTable[gn] = verticalOrigins[gn] + + def setupPost(self, keepGlyphNames=True, **values): + """Create a new `post` table and initialize it with default values, + which can be overridden by keyword arguments. + """ + isCFF2 = "CFF2" in self.font + postTable = self._initTableWithValues("post", _postDefaults, values) + if (self.isTTF or isCFF2) and keepGlyphNames: + postTable.formatType = 2.0 + postTable.extraNames = [] + postTable.mapping = {} + else: + postTable.formatType = 3.0 + + def setupMaxp(self): + """Create a new `maxp` table. This is called implicitly by FontBuilder + itself and is usually not called by client code. + """ + if self.isTTF: + defaults = _maxpDefaultsTTF + else: + defaults = _maxpDefaultsOTF + self._initTableWithValues("maxp", defaults, {}) + + def setupDummyDSIG(self): + """This adds an empty DSIG table to the font to make some MS applications + happy. This does not properly sign the font. + """ + values = dict( + ulVersion=1, + usFlag=0, + usNumSigs=0, + signatureRecords=[], + ) + self._initTableWithValues("DSIG", {}, values) + + def addOpenTypeFeatures(self, features, filename=None, tables=None): + """Add OpenType features to the font from a string containing + Feature File syntax. + + The `filename` argument is used in error messages and to determine + where to look for "include" files. + + The optional `tables` argument can be a list of OTL tables tags to + build, allowing the caller to only build selected OTL tables. See + `fontTools.feaLib` for details. + """ + from .feaLib.builder import addOpenTypeFeaturesFromString + + addOpenTypeFeaturesFromString( + self.font, features, filename=filename, tables=tables + ) + + def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"): + """Add conditional substitutions to a Variable Font. + + See `fontTools.varLib.featureVars.addFeatureVariations`. + """ + from .varLib import featureVars + + if "fvar" not in self.font: + raise KeyError("'fvar' table is missing; can't add FeatureVariations.") + + featureVars.addFeatureVariations( + self.font, conditionalSubstitutions, featureTag=featureTag + ) + + def setupCOLR( + self, + colorLayers, + version=None, + varStore=None, + varIndexMap=None, + clipBoxes=None, + ): + """Build new COLR table using color layers dictionary. + + Cf. `fontTools.colorLib.builder.buildCOLR`. + """ + from fontTools.colorLib.builder import buildCOLR + + glyphMap = self.font.getReverseGlyphMap() + self.font["COLR"] = buildCOLR( + colorLayers, + version=version, + glyphMap=glyphMap, + varStore=varStore, + varIndexMap=varIndexMap, + clipBoxes=clipBoxes, + ) + + def setupCPAL( + self, + palettes, + paletteTypes=None, + paletteLabels=None, + paletteEntryLabels=None, + ): + """Build new CPAL table using list of palettes. + + Optionally build CPAL v1 table using paletteTypes, paletteLabels and + paletteEntryLabels. + + Cf. `fontTools.colorLib.builder.buildCPAL`. + """ + from fontTools.colorLib.builder import buildCPAL + + self.font["CPAL"] = buildCPAL( + palettes, + paletteTypes=paletteTypes, + paletteLabels=paletteLabels, + paletteEntryLabels=paletteEntryLabels, + nameTable=self.font.get("name"), + ) + + def setupStat(self, axes, locations=None, elidedFallbackName=2): + """Build a new 'STAT' table. + + See `fontTools.otlLib.builder.buildStatTable` for details about + the arguments. + """ + from .otlLib.builder import buildStatTable + + buildStatTable(self.font, axes, locations, elidedFallbackName) + + +def buildCmapSubTable(cmapping, format, platformID, platEncID): + subTable = cmap_classes[format](format) + subTable.cmap = cmapping + subTable.platformID = platformID + subTable.platEncID = platEncID + subTable.language = 0 + return subTable + + +def addFvar(font, axes, instances): + from .ttLib.tables._f_v_a_r import Axis, NamedInstance + + assert axes + + fvar = newTable("fvar") + nameTable = font["name"] + + for axis_def in axes: + axis = Axis() + + if isinstance(axis_def, tuple): + ( + axis.axisTag, + axis.minValue, + axis.defaultValue, + axis.maxValue, + name, + ) = axis_def + else: + (axis.axisTag, axis.minValue, axis.defaultValue, axis.maxValue, name) = ( + axis_def.tag, + axis_def.minimum, + axis_def.default, + axis_def.maximum, + axis_def.name, + ) + + if isinstance(name, str): + name = dict(en=name) + + axis.axisNameID = nameTable.addMultilingualName(name, ttFont=font) + fvar.axes.append(axis) + + for instance in instances: + if isinstance(instance, dict): + coordinates = instance["location"] + name = instance["stylename"] + psname = instance.get("postscriptfontname") + else: + coordinates = instance.location + name = instance.localisedStyleName or instance.styleName + psname = instance.postScriptFontName + + if isinstance(name, str): + name = dict(en=name) + + inst = NamedInstance() + inst.subfamilyNameID = nameTable.addMultilingualName(name, ttFont=font) + if psname is not None: + inst.postscriptNameID = nameTable.addName(psname) + inst.coordinates = coordinates + fvar.instances.append(inst) + + font["fvar"] = fvar diff --git a/.venv/lib/python3.9/site-packages/fontTools/help.py b/.venv/lib/python3.9/site-packages/fontTools/help.py new file mode 100644 index 00000000..4334e500 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/help.py @@ -0,0 +1,35 @@ +import pkgutil +import sys +import fontTools +import importlib +import os +from pathlib import Path + + +def main(): + """Show this help""" + path = fontTools.__path__ + descriptions = {} + for pkg in sorted( + mod.name + for mod in pkgutil.walk_packages([fontTools.__path__[0]], prefix="fontTools.") + ): + try: + imports = __import__(pkg, globals(), locals(), ["main"]) + except ImportError as e: + continue + try: + description = imports.main.__doc__ + if description: + pkg = pkg.replace("fontTools.", "").replace(".__main__", "") + # show the docstring's first line only + descriptions[pkg] = description.splitlines()[0] + except AttributeError as e: + pass + for pkg, description in descriptions.items(): + print("fonttools %-12s %s" % (pkg, description), file=sys.stderr) + + +if __name__ == "__main__": + print("fonttools v%s\n" % fontTools.__version__, file=sys.stderr) + main() diff --git a/.venv/lib/python3.9/site-packages/fontTools/merge.py b/.venv/lib/python3.9/site-packages/fontTools/merge.py new file mode 100644 index 00000000..fc2242b1 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/merge.py @@ -0,0 +1,1282 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod, Roozbeh Pournader + +from fontTools.misc.timeTools import timestampNow +from fontTools import ttLib, cffLib +from fontTools.ttLib.tables import otTables, _h_e_a_d +from fontTools.ttLib.tables.DefaultTable import DefaultTable +from fontTools.misc.loggingTools import Timer +from fontTools.pens.recordingPen import DecomposingRecordingPen +from functools import reduce +import sys +import time +import operator +import logging +import os + + +log = logging.getLogger("fontTools.merge") +timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO) + + +def _add_method(*clazzes, **kwargs): + """Returns a decorator function that adds a new method to one or + more classes.""" + allowDefault = kwargs.get('allowDefaultTable', False) + def wrapper(method): + done = [] + for clazz in clazzes: + if clazz in done: continue # Support multiple names of a clazz + done.append(clazz) + assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.' + assert method.__name__ not in clazz.__dict__, \ + "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) + setattr(clazz, method.__name__, method) + return None + return wrapper + +# General utility functions for merging values from different fonts + +def equal(lst): + lst = list(lst) + t = iter(lst) + first = next(t) + assert all(item == first for item in t), "Expected all items to be equal: %s" % lst + return first + +def first(lst): + return next(iter(lst)) + +def recalculate(lst): + return NotImplemented + +def current_time(lst): + return timestampNow() + +def bitwise_and(lst): + return reduce(operator.and_, lst) + +def bitwise_or(lst): + return reduce(operator.or_, lst) + +def avg_int(lst): + lst = list(lst) + return sum(lst) // len(lst) + +def onlyExisting(func): + """Returns a filter func that when called with a list, + only calls func on the non-NotImplemented items of the list, + and only so if there's at least one item remaining. + Otherwise returns NotImplemented.""" + + def wrapper(lst): + items = [item for item in lst if item is not NotImplemented] + return func(items) if items else NotImplemented + + return wrapper + +def sumLists(lst): + l = [] + for item in lst: + l.extend(item) + return l + +def sumDicts(lst): + d = {} + for item in lst: + d.update(item) + return d + +def mergeObjects(lst): + lst = [item for item in lst if item is not NotImplemented] + if not lst: + return NotImplemented + lst = [item for item in lst if item is not None] + if not lst: + return None + + clazz = lst[0].__class__ + assert all(type(item) == clazz for item in lst), lst + + logic = clazz.mergeMap + returnTable = clazz() + returnDict = {} + + allKeys = set.union(set(), *(vars(table).keys() for table in lst)) + for key in allKeys: + try: + mergeLogic = logic[key] + except KeyError: + try: + mergeLogic = logic['*'] + except KeyError: + raise Exception("Don't know how to merge key %s of class %s" % + (key, clazz.__name__)) + if mergeLogic is NotImplemented: + continue + value = mergeLogic(getattr(table, key, NotImplemented) for table in lst) + if value is not NotImplemented: + returnDict[key] = value + + returnTable.__dict__ = returnDict + + return returnTable + +def mergeBits(bitmap): + + def wrapper(lst): + lst = list(lst) + returnValue = 0 + for bitNumber in range(bitmap['size']): + try: + mergeLogic = bitmap[bitNumber] + except KeyError: + try: + mergeLogic = bitmap['*'] + except KeyError: + raise Exception("Don't know how to merge bit %s" % bitNumber) + shiftedBit = 1 << bitNumber + mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst) + returnValue |= mergedValue << bitNumber + return returnValue + + return wrapper + + +@_add_method(DefaultTable, allowDefaultTable=True) +def merge(self, m, tables): + if not hasattr(self, 'mergeMap'): + log.info("Don't know how to merge '%s'.", self.tableTag) + return NotImplemented + + logic = self.mergeMap + + if isinstance(logic, dict): + return m.mergeObjects(self, self.mergeMap, tables) + else: + return logic(tables) + + +ttLib.getTableClass('maxp').mergeMap = { + '*': max, + 'tableTag': equal, + 'tableVersion': equal, + 'numGlyphs': sum, + 'maxStorage': first, + 'maxFunctionDefs': first, + 'maxInstructionDefs': first, + # TODO When we correctly merge hinting data, update these values: + # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions +} + +headFlagsMergeBitMap = { + 'size': 16, + '*': bitwise_or, + 1: bitwise_and, # Baseline at y = 0 + 2: bitwise_and, # lsb at x = 0 + 3: bitwise_and, # Force ppem to integer values. FIXME? + 5: bitwise_and, # Font is vertical + 6: lambda bit: 0, # Always set to zero + 11: bitwise_and, # Font data is 'lossless' + 13: bitwise_and, # Optimized for ClearType + 14: bitwise_and, # Last resort font. FIXME? equal or first may be better + 15: lambda bit: 0, # Always set to zero +} + +ttLib.getTableClass('head').mergeMap = { + 'tableTag': equal, + 'tableVersion': max, + 'fontRevision': max, + 'checkSumAdjustment': lambda lst: 0, # We need *something* here + 'magicNumber': equal, + 'flags': mergeBits(headFlagsMergeBitMap), + 'unitsPerEm': equal, + 'created': current_time, + 'modified': current_time, + 'xMin': min, + 'yMin': min, + 'xMax': max, + 'yMax': max, + 'macStyle': first, + 'lowestRecPPEM': max, + 'fontDirectionHint': lambda lst: 2, + 'indexToLocFormat': first, + 'glyphDataFormat': equal, +} + +ttLib.getTableClass('hhea').mergeMap = { + '*': equal, + 'tableTag': equal, + 'tableVersion': max, + 'ascent': max, + 'descent': min, + 'lineGap': max, + 'advanceWidthMax': max, + 'minLeftSideBearing': min, + 'minRightSideBearing': min, + 'xMaxExtent': max, + 'caretSlopeRise': first, + 'caretSlopeRun': first, + 'caretOffset': first, + 'numberOfHMetrics': recalculate, +} + +ttLib.getTableClass('vhea').mergeMap = { + '*': equal, + 'tableTag': equal, + 'tableVersion': max, + 'ascent': max, + 'descent': min, + 'lineGap': max, + 'advanceHeightMax': max, + 'minTopSideBearing': min, + 'minBottomSideBearing': min, + 'yMaxExtent': max, + 'caretSlopeRise': first, + 'caretSlopeRun': first, + 'caretOffset': first, + 'numberOfVMetrics': recalculate, +} + +os2FsTypeMergeBitMap = { + 'size': 16, + '*': lambda bit: 0, + 1: bitwise_or, # no embedding permitted + 2: bitwise_and, # allow previewing and printing documents + 3: bitwise_and, # allow editing documents + 8: bitwise_or, # no subsetting permitted + 9: bitwise_or, # no embedding of outlines permitted +} + +def mergeOs2FsType(lst): + lst = list(lst) + if all(item == 0 for item in lst): + return 0 + + # Compute least restrictive logic for each fsType value + for i in range(len(lst)): + # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set + if lst[i] & 0x000C: + lst[i] &= ~0x0002 + # set bit 2 (allow previewing) if bit 3 is set (allow editing) + elif lst[i] & 0x0008: + lst[i] |= 0x0004 + # set bits 2 and 3 if everything is allowed + elif lst[i] == 0: + lst[i] = 0x000C + + fsType = mergeBits(os2FsTypeMergeBitMap)(lst) + # unset bits 2 and 3 if bit 1 is set (some font is "no embedding") + if fsType & 0x0002: + fsType &= ~0x000C + return fsType + + +ttLib.getTableClass('OS/2').mergeMap = { + '*': first, + 'tableTag': equal, + 'version': max, + 'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this + 'fsType': mergeOs2FsType, # Will be overwritten + 'panose': first, # FIXME: should really be the first Latin font + 'ulUnicodeRange1': bitwise_or, + 'ulUnicodeRange2': bitwise_or, + 'ulUnicodeRange3': bitwise_or, + 'ulUnicodeRange4': bitwise_or, + 'fsFirstCharIndex': min, + 'fsLastCharIndex': max, + 'sTypoAscender': max, + 'sTypoDescender': min, + 'sTypoLineGap': max, + 'usWinAscent': max, + 'usWinDescent': max, + # Version 1 + 'ulCodePageRange1': onlyExisting(bitwise_or), + 'ulCodePageRange2': onlyExisting(bitwise_or), + # Version 2, 3, 4 + 'sxHeight': onlyExisting(max), + 'sCapHeight': onlyExisting(max), + 'usDefaultChar': onlyExisting(first), + 'usBreakChar': onlyExisting(first), + 'usMaxContext': onlyExisting(max), + # version 5 + 'usLowerOpticalPointSize': onlyExisting(min), + 'usUpperOpticalPointSize': onlyExisting(max), +} + +@_add_method(ttLib.getTableClass('OS/2')) +def merge(self, m, tables): + DefaultTable.merge(self, m, tables) + if self.version < 2: + # bits 8 and 9 are reserved and should be set to zero + self.fsType &= ~0x0300 + if self.version >= 3: + # Only one of bits 1, 2, and 3 may be set. We already take + # care of bit 1 implications in mergeOs2FsType. So unset + # bit 2 if bit 3 is already set. + if self.fsType & 0x0008: + self.fsType &= ~0x0004 + return self + +ttLib.getTableClass('post').mergeMap = { + '*': first, + 'tableTag': equal, + 'formatType': max, + 'isFixedPitch': min, + 'minMemType42': max, + 'maxMemType42': lambda lst: 0, + 'minMemType1': max, + 'maxMemType1': lambda lst: 0, + 'mapping': onlyExisting(sumDicts), + 'extraNames': lambda lst: [], +} + +ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = { + 'tableTag': equal, + 'metrics': sumDicts, +} + +ttLib.getTableClass('name').mergeMap = { + 'tableTag': equal, + 'names': first, # FIXME? Does mixing name records make sense? +} + +ttLib.getTableClass('loca').mergeMap = { + '*': recalculate, + 'tableTag': equal, +} + +ttLib.getTableClass('glyf').mergeMap = { + 'tableTag': equal, + 'glyphs': sumDicts, + 'glyphOrder': sumLists, +} + +@_add_method(ttLib.getTableClass('glyf')) +def merge(self, m, tables): + for i,table in enumerate(tables): + for g in table.glyphs.values(): + if i: + # Drop hints for all but first font, since + # we don't map functions / CVT values. + g.removeHinting() + # Expand composite glyphs to load their + # composite glyph names. + if g.isComposite(): + g.expand(table) + return DefaultTable.merge(self, m, tables) + +ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable + +@_add_method(ttLib.getTableClass('CFF ')) +def merge(self, m, tables): + if any(hasattr(table, "FDSelect") for table in tables): + raise NotImplementedError( + "Merging CID-keyed CFF tables is not supported yet" + ) + + newcff = tables[0] + newfont = newcff.cff[0] + private = newfont.Private + storedNamesStrings = [] + glyphOrderStrings = [] + glyphOrder = set(newfont.getGlyphOrder()) + for name in newfont.strings.strings: + if name not in glyphOrder: + storedNamesStrings.append(name) + else: + glyphOrderStrings.append(name) + chrset = list(newfont.charset) + newcs = newfont.CharStrings + log.debug("FONT 0 CharStrings: %d.", len(newcs)) + for i, table in enumerate(tables[1:], start=1): + font = table.cff[0] + font.Private = private + fontGlyphOrder = set(font.getGlyphOrder()) + for name in font.strings.strings: + if name in fontGlyphOrder: + glyphOrderStrings.append(name) + cs = font.CharStrings + gs = table.cff.GlobalSubrs + log.debug("Font %d CharStrings: %d.", i, len(cs)) + chrset.extend(font.charset) + if newcs.charStringsAreIndexed: + for i, name in enumerate(cs.charStrings, start=len(newcs)): + newcs.charStrings[name] = i + newcs.charStringsIndex.items.append(None) + for name in cs.charStrings: + newcs[name] = cs[name] + + newfont.charset = chrset + newfont.numGlyphs = len(chrset) + newfont.strings.strings = glyphOrderStrings + storedNamesStrings + + return newcff + +def _glyphsAreSame(glyphSet1, glyphSet2, glyph1, glyph2): + pen1 = DecomposingRecordingPen(glyphSet1) + pen2 = DecomposingRecordingPen(glyphSet2) + g1 = glyphSet1[glyph1] + g2 = glyphSet2[glyph2] + g1.draw(pen1) + g2.draw(pen2) + return (pen1.value == pen2.value and + g1.width == g2.width and + (not hasattr(g1, 'height') or g1.height == g2.height)) + +# Valid (format, platformID, platEncID) triplets for cmap subtables containing +# Unicode BMP-only and Unicode Full Repertoire semantics. +# Cf. OpenType spec for "Platform specific encodings": +# https://docs.microsoft.com/en-us/typography/opentype/spec/name +class CmapUnicodePlatEncodings: + BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)} + FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)} + +@_add_method(ttLib.getTableClass('cmap')) +def merge(self, m, tables): + # TODO Handle format=14. + # Only merge format 4 and 12 Unicode subtables, ignores all other subtables + # If there is a format 12 table for the same font, ignore the format 4 table + cmapTables = [] + for fontIdx,table in enumerate(tables): + format4 = None + format12 = None + for subtable in table.tables: + properties = (subtable.format, subtable.platformID, subtable.platEncID) + if properties in CmapUnicodePlatEncodings.BMP: + format4 = subtable + elif properties in CmapUnicodePlatEncodings.FullRepertoire: + format12 = subtable + else: + log.warning( + "Dropped cmap subtable from font [%s]:\t" + "format %2s, platformID %2s, platEncID %2s", + fontIdx, subtable.format, subtable.platformID, subtable.platEncID + ) + if format12 is not None: + cmapTables.append((format12, fontIdx)) + elif format4 is not None: + cmapTables.append((format4, fontIdx)) + + # Build a unicode mapping, then decide which format is needed to store it. + cmap = {} + fontIndexForGlyph = {} + glyphSets = [None for f in m.fonts] if hasattr(m, 'fonts') else None + for table,fontIdx in cmapTables: + # handle duplicates + for uni,gid in table.cmap.items(): + oldgid = cmap.get(uni, None) + if oldgid is None: + cmap[uni] = gid + fontIndexForGlyph[gid] = fontIdx + elif oldgid != gid: + # Char previously mapped to oldgid, now to gid. + # Record, to fix up in GSUB 'locl' later. + if m.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None: + if glyphSets is not None: + oldFontIdx = fontIndexForGlyph[oldgid] + for idx in (fontIdx, oldFontIdx): + if glyphSets[idx] is None: + glyphSets[idx] = m.fonts[idx].getGlyphSet() + if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid): + continue + m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid + elif m.duplicateGlyphsPerFont[fontIdx][oldgid] != gid: + # Char previously mapped to oldgid but oldgid is already remapped to a different + # gid, because of another Unicode character. + # TODO: Try harder to do something about these. + log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid) + + cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF} + self.tables = [] + module = ttLib.getTableModule('cmap') + if len(cmapBmpOnly) != len(cmap): + # format-12 required. + cmapTable = module.cmap_classes[12](12) + cmapTable.platformID = 3 + cmapTable.platEncID = 10 + cmapTable.language = 0 + cmapTable.cmap = cmap + self.tables.append(cmapTable) + # always create format-4 + cmapTable = module.cmap_classes[4](4) + cmapTable.platformID = 3 + cmapTable.platEncID = 1 + cmapTable.language = 0 + cmapTable.cmap = cmapBmpOnly + # ordered by platform then encoding + self.tables.insert(0, cmapTable) + self.tableVersion = 0 + self.numSubTables = len(self.tables) + return self + + +def mergeLookupLists(lst): + # TODO Do smarter merge. + return sumLists(lst) + +def mergeFeatures(lst): + assert lst + self = otTables.Feature() + self.FeatureParams = None + self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex]) + self.LookupCount = len(self.LookupListIndex) + return self + +def mergeFeatureLists(lst): + d = {} + for l in lst: + for f in l: + tag = f.FeatureTag + if tag not in d: + d[tag] = [] + d[tag].append(f.Feature) + ret = [] + for tag in sorted(d.keys()): + rec = otTables.FeatureRecord() + rec.FeatureTag = tag + rec.Feature = mergeFeatures(d[tag]) + ret.append(rec) + return ret + +def mergeLangSyses(lst): + assert lst + + # TODO Support merging ReqFeatureIndex + assert all(l.ReqFeatureIndex == 0xFFFF for l in lst) + + self = otTables.LangSys() + self.LookupOrder = None + self.ReqFeatureIndex = 0xFFFF + self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex]) + self.FeatureCount = len(self.FeatureIndex) + return self + +def mergeScripts(lst): + assert lst + + if len(lst) == 1: + return lst[0] + langSyses = {} + for sr in lst: + for lsr in sr.LangSysRecord: + if lsr.LangSysTag not in langSyses: + langSyses[lsr.LangSysTag] = [] + langSyses[lsr.LangSysTag].append(lsr.LangSys) + lsrecords = [] + for tag, langSys_list in sorted(langSyses.items()): + lsr = otTables.LangSysRecord() + lsr.LangSys = mergeLangSyses(langSys_list) + lsr.LangSysTag = tag + lsrecords.append(lsr) + + self = otTables.Script() + self.LangSysRecord = lsrecords + self.LangSysCount = len(lsrecords) + dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys] + if dfltLangSyses: + self.DefaultLangSys = mergeLangSyses(dfltLangSyses) + else: + self.DefaultLangSys = None + return self + +def mergeScriptRecords(lst): + d = {} + for l in lst: + for s in l: + tag = s.ScriptTag + if tag not in d: + d[tag] = [] + d[tag].append(s.Script) + ret = [] + for tag in sorted(d.keys()): + rec = otTables.ScriptRecord() + rec.ScriptTag = tag + rec.Script = mergeScripts(d[tag]) + ret.append(rec) + return ret + +otTables.ScriptList.mergeMap = { + 'ScriptCount': lambda lst: None, # TODO + 'ScriptRecord': mergeScriptRecords, +} +otTables.BaseScriptList.mergeMap = { + 'BaseScriptCount': lambda lst: None, # TODO + # TODO: Merge duplicate entries + 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag), +} + +otTables.FeatureList.mergeMap = { + 'FeatureCount': sum, + 'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag), +} + +otTables.LookupList.mergeMap = { + 'LookupCount': sum, + 'Lookup': sumLists, +} + +otTables.Coverage.mergeMap = { + 'Format': min, + 'glyphs': sumLists, +} + +otTables.ClassDef.mergeMap = { + 'Format': min, + 'classDefs': sumDicts, +} + +otTables.LigCaretList.mergeMap = { + 'Coverage': mergeObjects, + 'LigGlyphCount': sum, + 'LigGlyph': sumLists, +} + +otTables.AttachList.mergeMap = { + 'Coverage': mergeObjects, + 'GlyphCount': sum, + 'AttachPoint': sumLists, +} + +# XXX Renumber MarkFilterSets of lookups +otTables.MarkGlyphSetsDef.mergeMap = { + 'MarkSetTableFormat': equal, + 'MarkSetCount': sum, + 'Coverage': sumLists, +} + +otTables.Axis.mergeMap = { + '*': mergeObjects, +} + +# XXX Fix BASE table merging +otTables.BaseTagList.mergeMap = { + 'BaseTagCount': sum, + 'BaselineTag': sumLists, +} + +otTables.GDEF.mergeMap = \ +otTables.GSUB.mergeMap = \ +otTables.GPOS.mergeMap = \ +otTables.BASE.mergeMap = \ +otTables.JSTF.mergeMap = \ +otTables.MATH.mergeMap = \ +{ + '*': mergeObjects, + 'Version': max, +} + +ttLib.getTableClass('GDEF').mergeMap = \ +ttLib.getTableClass('GSUB').mergeMap = \ +ttLib.getTableClass('GPOS').mergeMap = \ +ttLib.getTableClass('BASE').mergeMap = \ +ttLib.getTableClass('JSTF').mergeMap = \ +ttLib.getTableClass('MATH').mergeMap = \ +{ + 'tableTag': onlyExisting(equal), # XXX clean me up + 'table': mergeObjects, +} + +@_add_method(ttLib.getTableClass('GSUB')) +def merge(self, m, tables): + + assert len(tables) == len(m.duplicateGlyphsPerFont) + for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): + if not dups: continue + assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB: %s" % (i + 1, dups) + synthFeature = None + synthLookup = None + for script in table.table.ScriptList.ScriptRecord: + if script.ScriptTag == 'DFLT': continue # XXX + for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]: + if langsys is None: continue # XXX Create! + feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl'] + assert len(feature) <= 1 + if feature: + feature = feature[0] + else: + if not synthFeature: + synthFeature = otTables.FeatureRecord() + synthFeature.FeatureTag = 'locl' + f = synthFeature.Feature = otTables.Feature() + f.FeatureParams = None + f.LookupCount = 0 + f.LookupListIndex = [] + langsys.FeatureIndex.append(synthFeature) + langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag) + table.table.FeatureList.FeatureRecord.append(synthFeature) + table.table.FeatureList.FeatureCount += 1 + feature = synthFeature + + if not synthLookup: + subtable = otTables.SingleSubst() + subtable.mapping = dups + synthLookup = otTables.Lookup() + synthLookup.LookupFlag = 0 + synthLookup.LookupType = 1 + synthLookup.SubTableCount = 1 + synthLookup.SubTable = [subtable] + if table.table.LookupList is None: + # mtiLib uses None as default value for LookupList, + # while feaLib points to an empty array with count 0 + # TODO: make them do the same + table.table.LookupList = otTables.LookupList() + table.table.LookupList.Lookup = [] + table.table.LookupList.LookupCount = 0 + table.table.LookupList.Lookup.append(synthLookup) + table.table.LookupList.LookupCount += 1 + + feature.Feature.LookupListIndex[:0] = [synthLookup] + feature.Feature.LookupCount += 1 + + DefaultTable.merge(self, m, tables) + return self + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def mapLookups(self, lookupMap): + pass + +# Copied and trimmed down from subset.py +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def __merge_classify_context(self): + + class ContextHelper(object): + def __init__(self, klass, Format): + if klass.__name__.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klass.__name__.startswith('Chain'): + Chain = 'Chain' + else: + Chain = '' + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleSet = ChainTyp+'RuleSet' + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleSet = ChainTyp+'ClassSet' + + if self.Format not in [1, 2, 3]: + return None # Don't shoot the messenger; let it go + if not hasattr(self.__class__, "_merge__ContextHelpers"): + self.__class__._merge__ContextHelpers = {} + if self.Format not in self.__class__._merge__ContextHelpers: + helper = ContextHelper(self.__class__, self.Format) + self.__class__._merge__ContextHelpers[self.Format] = helper + return self.__class__._merge__ContextHelpers[self.Format] + + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def mapLookups(self, lookupMap): + c = self.__merge_classify_context() + + if self.Format in [1, 2]: + for rs in getattr(self, c.RuleSet): + if not rs: continue + for r in getattr(rs, c.Rule): + if not r: continue + for ll in getattr(r, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookupMap[ll.LookupListIndex] + elif self.Format == 3: + for ll in getattr(self, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookupMap[ll.LookupListIndex] + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def mapLookups(self, lookupMap): + if self.Format == 1: + self.ExtSubTable.mapLookups(lookupMap) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Lookup) +def mapLookups(self, lookupMap): + for st in self.SubTable: + if not st: continue + st.mapLookups(lookupMap) + +@_add_method(otTables.LookupList) +def mapLookups(self, lookupMap): + for l in self.Lookup: + if not l: continue + l.mapLookups(lookupMap) + +@_add_method(otTables.Feature) +def mapLookups(self, lookupMap): + self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex] + +@_add_method(otTables.FeatureList) +def mapLookups(self, lookupMap): + for f in self.FeatureRecord: + if not f or not f.Feature: continue + f.Feature.mapLookups(lookupMap) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def mapFeatures(self, featureMap): + self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex] + if self.ReqFeatureIndex != 65535: + self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex] + +@_add_method(otTables.Script) +def mapFeatures(self, featureMap): + if self.DefaultLangSys: + self.DefaultLangSys.mapFeatures(featureMap) + for l in self.LangSysRecord: + if not l or not l.LangSys: continue + l.LangSys.mapFeatures(featureMap) + +@_add_method(otTables.ScriptList) +def mapFeatures(self, featureMap): + for s in self.ScriptRecord: + if not s or not s.Script: continue + s.Script.mapFeatures(featureMap) + + +class Options(object): + + class UnknownOptionError(Exception): + pass + + def __init__(self, **kwargs): + + self.verbose = False + self.timing = False + + self.set(**kwargs) + + def set(self, **kwargs): + for k,v in kwargs.items(): + if not hasattr(self, k): + raise self.UnknownOptionError("Unknown option '%s'" % k) + setattr(self, k, v) + + def parse_opts(self, argv, ignore_unknown=[]): + ret = [] + opts = {} + for a in argv: + orig_a = a + if not a.startswith('--'): + ret.append(a) + continue + a = a[2:] + i = a.find('=') + op = '=' + if i == -1: + if a.startswith("no-"): + k = a[3:] + v = False + else: + k = a + v = True + else: + k = a[:i] + if k[-1] in "-+": + op = k[-1]+'=' # Ops is '-=' or '+=' now. + k = k[:-1] + v = a[i+1:] + ok = k + k = k.replace('-', '_') + if not hasattr(self, k): + if ignore_unknown is True or ok in ignore_unknown: + ret.append(orig_a) + continue + else: + raise self.UnknownOptionError("Unknown option '%s'" % a) + + ov = getattr(self, k) + if isinstance(ov, bool): + v = bool(v) + elif isinstance(ov, int): + v = int(v) + elif isinstance(ov, list): + vv = v.split(',') + if vv == ['']: + vv = [] + vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] + if op == '=': + v = vv + elif op == '+=': + v = ov + v.extend(vv) + elif op == '-=': + v = ov + for x in vv: + if x in v: + v.remove(x) + else: + assert 0 + + opts[k] = v + self.set(**opts) + + return ret + +class _AttendanceRecordingIdentityDict(object): + """A dictionary-like object that records indices of items actually accessed + from a list.""" + + def __init__(self, lst): + self.l = lst + self.d = {id(v):i for i,v in enumerate(lst)} + self.s = set() + + def __getitem__(self, v): + self.s.add(self.d[id(v)]) + return v + +class _GregariousIdentityDict(object): + """A dictionary-like object that welcomes guests without reservations and + adds them to the end of the guest list.""" + + def __init__(self, lst): + self.l = lst + self.s = set(id(v) for v in lst) + + def __getitem__(self, v): + if id(v) not in self.s: + self.s.add(id(v)) + self.l.append(v) + return v + +class _NonhashableDict(object): + """A dictionary-like object mapping objects to values.""" + + def __init__(self, keys, values=None): + if values is None: + self.d = {id(v):i for i,v in enumerate(keys)} + else: + self.d = {id(k):v for k,v in zip(keys, values)} + + def __getitem__(self, k): + return self.d[id(k)] + + def __setitem__(self, k, v): + self.d[id(k)] = v + + def __delitem__(self, k): + del self.d[id(k)] + +class Merger(object): + """Font merger. + + This class merges multiple files into a single OpenType font, taking into + account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and + cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across + all the fonts). + + If multiple glyphs map to the same Unicode value, and the glyphs are considered + sufficiently different (that is, they differ in any of paths, widths, or + height), then subsequent glyphs are renamed and a lookup in the ``locl`` + feature will be created to disambiguate them. For example, if the arguments + are an Arabic font and a Latin font and both contain a set of parentheses, + the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``, + and a lookup will be inserted into the to ``locl`` feature (creating it if + necessary) under the ``latn`` script to substitute ``parenleft`` with + ``parenleft#1`` etc. + + Restrictions: + + - All fonts must currently have TrueType outlines (``glyf`` table). + Merging fonts with CFF outlines is not supported. + - All fonts must have the same units per em. + - If duplicate glyph disambiguation takes place as described above then the + fonts must have a ``GSUB`` table. + + Attributes: + options: Currently unused. + """ + + def __init__(self, options=None): + + if not options: + options = Options() + + self.options = options + + def merge(self, fontfiles): + """Merges fonts together. + + Args: + fontfiles: A list of file names to be merged + + Returns: + A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on + this to write it out to an OTF file. + """ + # + # Settle on a mega glyph order. + # + fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] + glyphOrders = [font.getGlyphOrder() for font in fonts] + megaGlyphOrder = self._mergeGlyphOrders(glyphOrders) + + # Take first input file sfntVersion + sfntVersion = fonts[0].sfntVersion + + cffTables = [None] * len(fonts) + if sfntVersion == "OTTO": + for i, font in enumerate(fonts): + font['CFF '].cff.desubroutinize() + cffTables[i] = font['CFF '] + + # Reload fonts and set new glyph names on them. + # TODO Is it necessary to reload font? I think it is. At least + # it's safer, in case tables were loaded to provide glyph names. + fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] + for font, glyphOrder, cffTable in zip(fonts, glyphOrders, cffTables): + font.setGlyphOrder(glyphOrder) + if cffTable: + # Rename CFF CharStrings to match the new glyphOrder. + # Using cffTable from before reloading the fonts, because reasons. + self._renameCFFCharStrings(glyphOrder, cffTable) + font['CFF '] = cffTable + + mega = ttLib.TTFont(sfntVersion=sfntVersion) + mega.setGlyphOrder(megaGlyphOrder) + + for font in fonts: + self._preMerge(font) + + self.fonts = fonts + self.duplicateGlyphsPerFont = [{} for _ in fonts] + + allTags = reduce(set.union, (list(font.keys()) for font in fonts), set()) + allTags.remove('GlyphOrder') + + # Make sure we process cmap before GSUB as we have a dependency there. + if 'GSUB' in allTags: + allTags.remove('GSUB') + allTags = ['GSUB'] + list(allTags) + if 'cmap' in allTags: + allTags.remove('cmap') + allTags = ['cmap'] + list(allTags) + + for tag in allTags: + with timer("merge '%s'" % tag): + tables = [font.get(tag, NotImplemented) for font in fonts] + + log.info("Merging '%s'.", tag) + clazz = ttLib.getTableClass(tag) + table = clazz(tag).merge(self, tables) + # XXX Clean this up and use: table = mergeObjects(tables) + + if table is not NotImplemented and table is not False: + mega[tag] = table + log.info("Merged '%s'.", tag) + else: + log.info("Dropped '%s'.", tag) + + del self.duplicateGlyphsPerFont + del self.fonts + + self._postMerge(mega) + + return mega + + def _mergeGlyphOrders(self, glyphOrders): + """Modifies passed-in glyphOrders to reflect new glyph names. + Returns glyphOrder for the merged font.""" + mega = {} + for glyphOrder in glyphOrders: + for i,glyphName in enumerate(glyphOrder): + if glyphName in mega: + n = mega[glyphName] + while (glyphName + "#" + repr(n)) in mega: + n += 1 + mega[glyphName] = n + glyphName += "#" + repr(n) + glyphOrder[i] = glyphName + mega[glyphName] = 1 + return list(mega.keys()) + + def _renameCFFCharStrings(self, glyphOrder, cffTable): + """Rename topDictIndex charStrings based on glyphOrder.""" + td = cffTable.cff.topDictIndex[0] + charStrings = {} + for i, v in enumerate(td.CharStrings.charStrings.values()): + glyphName = glyphOrder[i] + charStrings[glyphName] = v + cffTable.cff.topDictIndex[0].CharStrings.charStrings = charStrings + + def mergeObjects(self, returnTable, logic, tables): + # Right now we don't use self at all. Will use in the future + # for options and logging. + + allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented)) + for key in allKeys: + try: + mergeLogic = logic[key] + except KeyError: + try: + mergeLogic = logic['*'] + except KeyError: + raise Exception("Don't know how to merge key %s of class %s" % + (key, returnTable.__class__.__name__)) + if mergeLogic is NotImplemented: + continue + value = mergeLogic(getattr(table, key, NotImplemented) for table in tables) + if value is not NotImplemented: + setattr(returnTable, key, value) + + return returnTable + + def _preMerge(self, font): + + # Map indices to references + + GDEF = font.get('GDEF') + GSUB = font.get('GSUB') + GPOS = font.get('GPOS') + + for t in [GSUB, GPOS]: + if not t: continue + + if t.table.LookupList: + lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)} + t.table.LookupList.mapLookups(lookupMap) + t.table.FeatureList.mapLookups(lookupMap) + + if t.table.FeatureList and t.table.ScriptList: + featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)} + t.table.ScriptList.mapFeatures(featureMap) + + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs + + def _postMerge(self, font): + + # Map references back to indices + + GDEF = font.get('GDEF') + GSUB = font.get('GSUB') + GPOS = font.get('GPOS') + + for t in [GSUB, GPOS]: + if not t: continue + + if t.table.FeatureList and t.table.ScriptList: + + # Collect unregistered (new) features. + featureMap = _GregariousIdentityDict(t.table.FeatureList.FeatureRecord) + t.table.ScriptList.mapFeatures(featureMap) + + # Record used features. + featureMap = _AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord) + t.table.ScriptList.mapFeatures(featureMap) + usedIndices = featureMap.s + + # Remove unused features + t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices] + + # Map back to indices. + featureMap = _NonhashableDict(t.table.FeatureList.FeatureRecord) + t.table.ScriptList.mapFeatures(featureMap) + + t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord) + + if t.table.LookupList: + + # Collect unregistered (new) lookups. + lookupMap = _GregariousIdentityDict(t.table.LookupList.Lookup) + t.table.FeatureList.mapLookups(lookupMap) + t.table.LookupList.mapLookups(lookupMap) + + # Record used lookups. + lookupMap = _AttendanceRecordingIdentityDict(t.table.LookupList.Lookup) + t.table.FeatureList.mapLookups(lookupMap) + t.table.LookupList.mapLookups(lookupMap) + usedIndices = lookupMap.s + + # Remove unused lookups + t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices] + + # Map back to indices. + lookupMap = _NonhashableDict(t.table.LookupList.Lookup) + t.table.FeatureList.mapLookups(lookupMap) + t.table.LookupList.mapLookups(lookupMap) + + t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup) + + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs + + +__all__ = [ + 'Options', + 'Merger', + 'main' +] + +@timer("make one with everything (TOTAL TIME)") +def main(args=None): + """Merge multiple fonts into one""" + from fontTools import configLogger + + if args is None: + args = sys.argv[1:] + + options = Options() + args = options.parse_opts(args, ignore_unknown=['output-file']) + outfile = 'merged.ttf' + fontfiles = [] + for g in args: + if g.startswith('--output-file='): + outfile = g[14:] + continue + fontfiles.append(g) + + if len(args) < 1: + print("usage: pyftmerge font...", file=sys.stderr) + return 1 + + configLogger(level=logging.INFO if options.verbose else logging.WARNING) + if options.timing: + timer.logger.setLevel(logging.DEBUG) + else: + timer.logger.disabled = True + + merger = Merger(options=options) + font = merger.merge(fontfiles) + with timer("compile and save font"): + font.save(outfile) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/misc/__init__.py new file mode 100644 index 00000000..156cb232 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/__init__.py @@ -0,0 +1 @@ +"""Empty __init__.py file to signal Python this directory is a package.""" diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..b6929604 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/arrayTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/arrayTools.cpython-39.pyc new file mode 100644 index 00000000..0d573bcd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/arrayTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/bezierTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/bezierTools.cpython-39.pyc new file mode 100644 index 00000000..1379cc67 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/bezierTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/classifyTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/classifyTools.cpython-39.pyc new file mode 100644 index 00000000..2d08acad Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/classifyTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/cliTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/cliTools.cpython-39.pyc new file mode 100644 index 00000000..929c60ed Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/cliTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/cython.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/cython.cpython-39.pyc new file mode 100644 index 00000000..04649e31 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/cython.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/dictTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/dictTools.cpython-39.pyc new file mode 100644 index 00000000..33a6d8b6 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/dictTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/eexec.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/eexec.cpython-39.pyc new file mode 100644 index 00000000..e9f4fd2c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/eexec.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/encodingTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/encodingTools.cpython-39.pyc new file mode 100644 index 00000000..ad3e63fd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/encodingTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/etree.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/etree.cpython-39.pyc new file mode 100644 index 00000000..0fcb1270 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/etree.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/filenames.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/filenames.cpython-39.pyc new file mode 100644 index 00000000..902e6659 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/filenames.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/fixedTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/fixedTools.cpython-39.pyc new file mode 100644 index 00000000..466a3cce Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/fixedTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/intTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/intTools.cpython-39.pyc new file mode 100644 index 00000000..b75036e0 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/intTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/loggingTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/loggingTools.cpython-39.pyc new file mode 100644 index 00000000..cfc4a646 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/loggingTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/macCreatorType.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/macCreatorType.cpython-39.pyc new file mode 100644 index 00000000..bdd9810c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/macCreatorType.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/macRes.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/macRes.cpython-39.pyc new file mode 100644 index 00000000..82840f00 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/macRes.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/psCharStrings.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/psCharStrings.cpython-39.pyc new file mode 100644 index 00000000..65b4cbf2 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/psCharStrings.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/psLib.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/psLib.cpython-39.pyc new file mode 100644 index 00000000..688e8022 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/psLib.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/psOperators.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/psOperators.cpython-39.pyc new file mode 100644 index 00000000..499bd277 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/psOperators.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/py23.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/py23.cpython-39.pyc new file mode 100644 index 00000000..e013e17d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/py23.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/roundTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/roundTools.cpython-39.pyc new file mode 100644 index 00000000..f91b5b9c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/roundTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/sstruct.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/sstruct.cpython-39.pyc new file mode 100644 index 00000000..4db9abfb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/sstruct.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/symfont.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/symfont.cpython-39.pyc new file mode 100644 index 00000000..81bab9a6 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/symfont.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/testTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/testTools.cpython-39.pyc new file mode 100644 index 00000000..a589d8d1 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/testTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/textTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/textTools.cpython-39.pyc new file mode 100644 index 00000000..691b97b3 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/textTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/timeTools.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/timeTools.cpython-39.pyc new file mode 100644 index 00000000..7267744c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/timeTools.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/transform.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/transform.cpython-39.pyc new file mode 100644 index 00000000..2676927e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/transform.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/vector.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/vector.cpython-39.pyc new file mode 100644 index 00000000..0bd2c06f Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/vector.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/xmlReader.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/xmlReader.cpython-39.pyc new file mode 100644 index 00000000..fdcfb115 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/xmlReader.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/xmlWriter.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/xmlWriter.cpython-39.pyc new file mode 100644 index 00000000..b9c4b3be Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/__pycache__/xmlWriter.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/arrayTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/arrayTools.py new file mode 100644 index 00000000..01ccbe82 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/arrayTools.py @@ -0,0 +1,379 @@ +"""Routines for calculating bounding boxes, point in rectangle calculations and +so on. +""" + +from fontTools.misc.roundTools import otRound +from fontTools.misc.vector import Vector as _Vector +import math +import warnings + + +def calcBounds(array): + """Calculate the bounding rectangle of a 2D points array. + + Args: + array: A sequence of 2D tuples. + + Returns: + A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``. + """ + if not array: + return 0, 0, 0, 0 + xs = [x for x, y in array] + ys = [y for x, y in array] + return min(xs), min(ys), max(xs), max(ys) + +def calcIntBounds(array, round=otRound): + """Calculate the integer bounding rectangle of a 2D points array. + + Values are rounded to closest integer towards ``+Infinity`` using the + :func:`fontTools.misc.fixedTools.otRound` function by default, unless + an optional ``round`` function is passed. + + Args: + array: A sequence of 2D tuples. + round: A rounding function of type ``f(x: float) -> int``. + + Returns: + A four-item tuple of integers representing the bounding rectangle: + ``(xMin, yMin, xMax, yMax)``. + """ + return tuple(round(v) for v in calcBounds(array)) + + +def updateBounds(bounds, p, min=min, max=max): + """Add a point to a bounding rectangle. + + Args: + bounds: A bounding rectangle expressed as a tuple + ``(xMin, yMin, xMax, yMax)``. + p: A 2D tuple representing a point. + min,max: functions to compute the minimum and maximum. + + Returns: + The updated bounding rectangle ``(xMin, yMin, xMax, yMax)``. + """ + (x, y) = p + xMin, yMin, xMax, yMax = bounds + return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y) + +def pointInRect(p, rect): + """Test if a point is inside a bounding rectangle. + + Args: + p: A 2D tuple representing a point. + rect: A bounding rectangle expressed as a tuple + ``(xMin, yMin, xMax, yMax)``. + + Returns: + ``True`` if the point is inside the rectangle, ``False`` otherwise. + """ + (x, y) = p + xMin, yMin, xMax, yMax = rect + return (xMin <= x <= xMax) and (yMin <= y <= yMax) + +def pointsInRect(array, rect): + """Determine which points are inside a bounding rectangle. + + Args: + array: A sequence of 2D tuples. + rect: A bounding rectangle expressed as a tuple + ``(xMin, yMin, xMax, yMax)``. + + Returns: + A list containing the points inside the rectangle. + """ + if len(array) < 1: + return [] + xMin, yMin, xMax, yMax = rect + return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array] + +def vectorLength(vector): + """Calculate the length of the given vector. + + Args: + vector: A 2D tuple. + + Returns: + The Euclidean length of the vector. + """ + x, y = vector + return math.sqrt(x**2 + y**2) + +def asInt16(array): + """Round a list of floats to 16-bit signed integers. + + Args: + array: List of float values. + + Returns: + A list of rounded integers. + """ + return [int(math.floor(i+0.5)) for i in array] + + +def normRect(rect): + """Normalize a bounding box rectangle. + + This function "turns the rectangle the right way up", so that the following + holds:: + + xMin <= xMax and yMin <= yMax + + Args: + rect: A bounding rectangle expressed as a tuple + ``(xMin, yMin, xMax, yMax)``. + + Returns: + A normalized bounding rectangle. + """ + (xMin, yMin, xMax, yMax) = rect + return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax) + +def scaleRect(rect, x, y): + """Scale a bounding box rectangle. + + Args: + rect: A bounding rectangle expressed as a tuple + ``(xMin, yMin, xMax, yMax)``. + x: Factor to scale the rectangle along the X axis. + Y: Factor to scale the rectangle along the Y axis. + + Returns: + A scaled bounding rectangle. + """ + (xMin, yMin, xMax, yMax) = rect + return xMin * x, yMin * y, xMax * x, yMax * y + +def offsetRect(rect, dx, dy): + """Offset a bounding box rectangle. + + Args: + rect: A bounding rectangle expressed as a tuple + ``(xMin, yMin, xMax, yMax)``. + dx: Amount to offset the rectangle along the X axis. + dY: Amount to offset the rectangle along the Y axis. + + Returns: + An offset bounding rectangle. + """ + (xMin, yMin, xMax, yMax) = rect + return xMin+dx, yMin+dy, xMax+dx, yMax+dy + +def insetRect(rect, dx, dy): + """Inset a bounding box rectangle on all sides. + + Args: + rect: A bounding rectangle expressed as a tuple + ``(xMin, yMin, xMax, yMax)``. + dx: Amount to inset the rectangle along the X axis. + dY: Amount to inset the rectangle along the Y axis. + + Returns: + An inset bounding rectangle. + """ + (xMin, yMin, xMax, yMax) = rect + return xMin+dx, yMin+dy, xMax-dx, yMax-dy + +def sectRect(rect1, rect2): + """Test for rectangle-rectangle intersection. + + Args: + rect1: First bounding rectangle, expressed as tuples + ``(xMin, yMin, xMax, yMax)``. + rect2: Second bounding rectangle. + + Returns: + A boolean and a rectangle. + If the input rectangles intersect, returns ``True`` and the intersecting + rectangle. Returns ``False`` and ``(0, 0, 0, 0)`` if the input + rectangles don't intersect. + """ + (xMin1, yMin1, xMax1, yMax1) = rect1 + (xMin2, yMin2, xMax2, yMax2) = rect2 + xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2), + min(xMax1, xMax2), min(yMax1, yMax2)) + if xMin >= xMax or yMin >= yMax: + return False, (0, 0, 0, 0) + return True, (xMin, yMin, xMax, yMax) + +def unionRect(rect1, rect2): + """Determine union of bounding rectangles. + + Args: + rect1: First bounding rectangle, expressed as tuples + ``(xMin, yMin, xMax, yMax)``. + rect2: Second bounding rectangle. + + Returns: + The smallest rectangle in which both input rectangles are fully + enclosed. + """ + (xMin1, yMin1, xMax1, yMax1) = rect1 + (xMin2, yMin2, xMax2, yMax2) = rect2 + xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2), + max(xMax1, xMax2), max(yMax1, yMax2)) + return (xMin, yMin, xMax, yMax) + +def rectCenter(rect): + """Determine rectangle center. + + Args: + rect: Bounding rectangle, expressed as tuples + ``(xMin, yMin, xMax, yMax)``. + + Returns: + A 2D tuple representing the point at the center of the rectangle. + """ + (xMin, yMin, xMax, yMax) = rect + return (xMin+xMax)/2, (yMin+yMax)/2 + +def rectArea(rect): + """Determine rectangle area. + + Args: + rect: Bounding rectangle, expressed as tuples + ``(xMin, yMin, xMax, yMax)``. + + Returns: + The area of the rectangle. + """ + (xMin, yMin, xMax, yMax) = rect + return (yMax - yMin) * (xMax - xMin) + +def intRect(rect): + """Round a rectangle to integer values. + + Guarantees that the resulting rectangle is NOT smaller than the original. + + Args: + rect: Bounding rectangle, expressed as tuples + ``(xMin, yMin, xMax, yMax)``. + + Returns: + A rounded bounding rectangle. + """ + (xMin, yMin, xMax, yMax) = rect + xMin = int(math.floor(xMin)) + yMin = int(math.floor(yMin)) + xMax = int(math.ceil(xMax)) + yMax = int(math.ceil(yMax)) + return (xMin, yMin, xMax, yMax) + + +class Vector(_Vector): + + def __init__(self, *args, **kwargs): + warnings.warn( + "fontTools.misc.arrayTools.Vector has been deprecated, please use " + "fontTools.misc.vector.Vector instead.", + DeprecationWarning, + ) + + +def pairwise(iterable, reverse=False): + """Iterate over current and next items in iterable. + + Args: + iterable: An iterable + reverse: If true, iterate in reverse order. + + Returns: + A iterable yielding two elements per iteration. + + Example: + + >>> tuple(pairwise([])) + () + >>> tuple(pairwise([], reverse=True)) + () + >>> tuple(pairwise([0])) + ((0, 0),) + >>> tuple(pairwise([0], reverse=True)) + ((0, 0),) + >>> tuple(pairwise([0, 1])) + ((0, 1), (1, 0)) + >>> tuple(pairwise([0, 1], reverse=True)) + ((1, 0), (0, 1)) + >>> tuple(pairwise([0, 1, 2])) + ((0, 1), (1, 2), (2, 0)) + >>> tuple(pairwise([0, 1, 2], reverse=True)) + ((2, 1), (1, 0), (0, 2)) + >>> tuple(pairwise(['a', 'b', 'c', 'd'])) + (('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')) + >>> tuple(pairwise(['a', 'b', 'c', 'd'], reverse=True)) + (('d', 'c'), ('c', 'b'), ('b', 'a'), ('a', 'd')) + """ + if not iterable: + return + if reverse: + it = reversed(iterable) + else: + it = iter(iterable) + first = next(it, None) + a = first + for b in it: + yield (a, b) + a = b + yield (a, first) + + +def _test(): + """ + >>> import math + >>> calcBounds([]) + (0, 0, 0, 0) + >>> calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)]) + (0, 10, 80, 100) + >>> updateBounds((0, 0, 0, 0), (100, 100)) + (0, 0, 100, 100) + >>> pointInRect((50, 50), (0, 0, 100, 100)) + True + >>> pointInRect((0, 0), (0, 0, 100, 100)) + True + >>> pointInRect((100, 100), (0, 0, 100, 100)) + True + >>> not pointInRect((101, 100), (0, 0, 100, 100)) + True + >>> list(pointsInRect([(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100))) + [True, True, True, False] + >>> vectorLength((3, 4)) + 5.0 + >>> vectorLength((1, 1)) == math.sqrt(2) + True + >>> list(asInt16([0, 0.1, 0.5, 0.9])) + [0, 0, 1, 1] + >>> normRect((0, 10, 100, 200)) + (0, 10, 100, 200) + >>> normRect((100, 200, 0, 10)) + (0, 10, 100, 200) + >>> scaleRect((10, 20, 50, 150), 1.5, 2) + (15.0, 40, 75.0, 300) + >>> offsetRect((10, 20, 30, 40), 5, 6) + (15, 26, 35, 46) + >>> insetRect((10, 20, 50, 60), 5, 10) + (15, 30, 45, 50) + >>> insetRect((10, 20, 50, 60), -5, -10) + (5, 10, 55, 70) + >>> intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50)) + >>> not intersects + True + >>> intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50)) + >>> intersects + 1 + >>> rect + (5, 20, 20, 30) + >>> unionRect((0, 10, 20, 30), (0, 40, 20, 50)) + (0, 10, 20, 50) + >>> rectCenter((0, 0, 100, 200)) + (50.0, 100.0) + >>> rectCenter((0, 0, 100, 199.0)) + (50.0, 99.5) + >>> intRect((0.9, 2.9, 3.1, 4.1)) + (0, 2, 4, 5) + """ + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/bezierTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/bezierTools.py new file mode 100644 index 00000000..25e5c548 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/bezierTools.py @@ -0,0 +1,1225 @@ +# -*- coding: utf-8 -*- +"""fontTools.misc.bezierTools.py -- tools for working with Bezier path segments. +""" + +from fontTools.misc.arrayTools import calcBounds, sectRect, rectArea +from fontTools.misc.transform import Identity +import math +from collections import namedtuple + +Intersection = namedtuple("Intersection", ["pt", "t1", "t2"]) + + +__all__ = [ + "approximateCubicArcLength", + "approximateCubicArcLengthC", + "approximateQuadraticArcLength", + "approximateQuadraticArcLengthC", + "calcCubicArcLength", + "calcCubicArcLengthC", + "calcQuadraticArcLength", + "calcQuadraticArcLengthC", + "calcCubicBounds", + "calcQuadraticBounds", + "splitLine", + "splitQuadratic", + "splitCubic", + "splitQuadraticAtT", + "splitCubicAtT", + "solveQuadratic", + "solveCubic", + "quadraticPointAtT", + "cubicPointAtT", + "linePointAtT", + "segmentPointAtT", + "lineLineIntersections", + "curveLineIntersections", + "curveCurveIntersections", + "segmentSegmentIntersections", +] + + +def calcCubicArcLength(pt1, pt2, pt3, pt4, tolerance=0.005): + """Calculates the arc length for a cubic Bezier segment. + + Whereas :func:`approximateCubicArcLength` approximates the length, this + function calculates it by "measuring", recursively dividing the curve + until the divided segments are shorter than ``tolerance``. + + Args: + pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. + tolerance: Controls the precision of the calcuation. + + Returns: + Arc length value. + """ + return calcCubicArcLengthC( + complex(*pt1), complex(*pt2), complex(*pt3), complex(*pt4), tolerance + ) + + +def _split_cubic_into_two(p0, p1, p2, p3): + mid = (p0 + 3 * (p1 + p2) + p3) * 0.125 + deriv3 = (p3 + p2 - p1 - p0) * 0.125 + return ( + (p0, (p0 + p1) * 0.5, mid - deriv3, mid), + (mid, mid + deriv3, (p2 + p3) * 0.5, p3), + ) + + +def _calcCubicArcLengthCRecurse(mult, p0, p1, p2, p3): + arch = abs(p0 - p3) + box = abs(p0 - p1) + abs(p1 - p2) + abs(p2 - p3) + if arch * mult >= box: + return (arch + box) * 0.5 + else: + one, two = _split_cubic_into_two(p0, p1, p2, p3) + return _calcCubicArcLengthCRecurse(mult, *one) + _calcCubicArcLengthCRecurse( + mult, *two + ) + + +def calcCubicArcLengthC(pt1, pt2, pt3, pt4, tolerance=0.005): + """Calculates the arc length for a cubic Bezier segment. + + Args: + pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers. + tolerance: Controls the precision of the calcuation. + + Returns: + Arc length value. + """ + mult = 1.0 + 1.5 * tolerance # The 1.5 is a empirical hack; no math + return _calcCubicArcLengthCRecurse(mult, pt1, pt2, pt3, pt4) + + +epsilonDigits = 6 +epsilon = 1e-10 + + +def _dot(v1, v2): + return (v1 * v2.conjugate()).real + + +def _intSecAtan(x): + # In : sympy.integrate(sp.sec(sp.atan(x))) + # Out: x*sqrt(x**2 + 1)/2 + asinh(x)/2 + return x * math.sqrt(x ** 2 + 1) / 2 + math.asinh(x) / 2 + + +def calcQuadraticArcLength(pt1, pt2, pt3): + """Calculates the arc length for a quadratic Bezier segment. + + Args: + pt1: Start point of the Bezier as 2D tuple. + pt2: Handle point of the Bezier as 2D tuple. + pt3: End point of the Bezier as 2D tuple. + + Returns: + Arc length value. + + Example:: + + >>> calcQuadraticArcLength((0, 0), (0, 0), (0, 0)) # empty segment + 0.0 + >>> calcQuadraticArcLength((0, 0), (50, 0), (80, 0)) # collinear points + 80.0 + >>> calcQuadraticArcLength((0, 0), (0, 50), (0, 80)) # collinear points vertical + 80.0 + >>> calcQuadraticArcLength((0, 0), (50, 20), (100, 40)) # collinear points + 107.70329614269008 + >>> calcQuadraticArcLength((0, 0), (0, 100), (100, 0)) + 154.02976155645263 + >>> calcQuadraticArcLength((0, 0), (0, 50), (100, 0)) + 120.21581243984076 + >>> calcQuadraticArcLength((0, 0), (50, -10), (80, 50)) + 102.53273816445825 + >>> calcQuadraticArcLength((0, 0), (40, 0), (-40, 0)) # collinear points, control point outside + 66.66666666666667 + >>> calcQuadraticArcLength((0, 0), (40, 0), (0, 0)) # collinear points, looping back + 40.0 + """ + return calcQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3)) + + +def calcQuadraticArcLengthC(pt1, pt2, pt3): + """Calculates the arc length for a quadratic Bezier segment. + + Args: + pt1: Start point of the Bezier as a complex number. + pt2: Handle point of the Bezier as a complex number. + pt3: End point of the Bezier as a complex number. + + Returns: + Arc length value. + """ + # Analytical solution to the length of a quadratic bezier. + # I'll explain how I arrived at this later. + d0 = pt2 - pt1 + d1 = pt3 - pt2 + d = d1 - d0 + n = d * 1j + scale = abs(n) + if scale == 0.0: + return abs(pt3 - pt1) + origDist = _dot(n, d0) + if abs(origDist) < epsilon: + if _dot(d0, d1) >= 0: + return abs(pt3 - pt1) + a, b = abs(d0), abs(d1) + return (a * a + b * b) / (a + b) + x0 = _dot(d, d0) / origDist + x1 = _dot(d, d1) / origDist + Len = abs(2 * (_intSecAtan(x1) - _intSecAtan(x0)) * origDist / (scale * (x1 - x0))) + return Len + + +def approximateQuadraticArcLength(pt1, pt2, pt3): + """Calculates the arc length for a quadratic Bezier segment. + + Uses Gauss-Legendre quadrature for a branch-free approximation. + See :func:`calcQuadraticArcLength` for a slower but more accurate result. + + Args: + pt1: Start point of the Bezier as 2D tuple. + pt2: Handle point of the Bezier as 2D tuple. + pt3: End point of the Bezier as 2D tuple. + + Returns: + Approximate arc length value. + """ + return approximateQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3)) + + +def approximateQuadraticArcLengthC(pt1, pt2, pt3): + """Calculates the arc length for a quadratic Bezier segment. + + Uses Gauss-Legendre quadrature for a branch-free approximation. + See :func:`calcQuadraticArcLength` for a slower but more accurate result. + + Args: + pt1: Start point of the Bezier as a complex number. + pt2: Handle point of the Bezier as a complex number. + pt3: End point of the Bezier as a complex number. + + Returns: + Approximate arc length value. + """ + # This, essentially, approximates the length-of-derivative function + # to be integrated with the best-matching fifth-degree polynomial + # approximation of it. + # + # https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Legendre_quadrature + + # abs(BezierCurveC[2].diff(t).subs({t:T})) for T in sorted(.5, .5±sqrt(3/5)/2), + # weighted 5/18, 8/18, 5/18 respectively. + v0 = abs( + -0.492943519233745 * pt1 + 0.430331482911935 * pt2 + 0.0626120363218102 * pt3 + ) + v1 = abs(pt3 - pt1) * 0.4444444444444444 + v2 = abs( + -0.0626120363218102 * pt1 - 0.430331482911935 * pt2 + 0.492943519233745 * pt3 + ) + + return v0 + v1 + v2 + + +def calcQuadraticBounds(pt1, pt2, pt3): + """Calculates the bounding rectangle for a quadratic Bezier segment. + + Args: + pt1: Start point of the Bezier as a 2D tuple. + pt2: Handle point of the Bezier as a 2D tuple. + pt3: End point of the Bezier as a 2D tuple. + + Returns: + A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``. + + Example:: + + >>> calcQuadraticBounds((0, 0), (50, 100), (100, 0)) + (0, 0, 100, 50.0) + >>> calcQuadraticBounds((0, 0), (100, 0), (100, 100)) + (0.0, 0.0, 100, 100) + """ + (ax, ay), (bx, by), (cx, cy) = calcQuadraticParameters(pt1, pt2, pt3) + ax2 = ax * 2.0 + ay2 = ay * 2.0 + roots = [] + if ax2 != 0: + roots.append(-bx / ax2) + if ay2 != 0: + roots.append(-by / ay2) + points = [ + (ax * t * t + bx * t + cx, ay * t * t + by * t + cy) + for t in roots + if 0 <= t < 1 + ] + [pt1, pt3] + return calcBounds(points) + + +def approximateCubicArcLength(pt1, pt2, pt3, pt4): + """Approximates the arc length for a cubic Bezier segment. + + Uses Gauss-Lobatto quadrature with n=5 points to approximate arc length. + See :func:`calcCubicArcLength` for a slower but more accurate result. + + Args: + pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. + + Returns: + Arc length value. + + Example:: + + >>> approximateCubicArcLength((0, 0), (25, 100), (75, 100), (100, 0)) + 190.04332968932817 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, 50), (100, 100)) + 154.8852074945903 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, 0), (150, 0)) # line; exact result should be 150. + 149.99999999999991 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, 0), (-50, 0)) # cusp; exact result should be 150. + 136.9267662156362 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, -50), (-50, 0)) # cusp + 154.80848416537057 + """ + return approximateCubicArcLengthC( + complex(*pt1), complex(*pt2), complex(*pt3), complex(*pt4) + ) + + +def approximateCubicArcLengthC(pt1, pt2, pt3, pt4): + """Approximates the arc length for a cubic Bezier segment. + + Args: + pt1,pt2,pt3,pt4: Control points of the Bezier as complex numbers. + + Returns: + Arc length value. + """ + # This, essentially, approximates the length-of-derivative function + # to be integrated with the best-matching seventh-degree polynomial + # approximation of it. + # + # https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Lobatto_rules + + # abs(BezierCurveC[3].diff(t).subs({t:T})) for T in sorted(0, .5±(3/7)**.5/2, .5, 1), + # weighted 1/20, 49/180, 32/90, 49/180, 1/20 respectively. + v0 = abs(pt2 - pt1) * 0.15 + v1 = abs( + -0.558983582205757 * pt1 + + 0.325650248872424 * pt2 + + 0.208983582205757 * pt3 + + 0.024349751127576 * pt4 + ) + v2 = abs(pt4 - pt1 + pt3 - pt2) * 0.26666666666666666 + v3 = abs( + -0.024349751127576 * pt1 + - 0.208983582205757 * pt2 + - 0.325650248872424 * pt3 + + 0.558983582205757 * pt4 + ) + v4 = abs(pt4 - pt3) * 0.15 + + return v0 + v1 + v2 + v3 + v4 + + +def calcCubicBounds(pt1, pt2, pt3, pt4): + """Calculates the bounding rectangle for a quadratic Bezier segment. + + Args: + pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. + + Returns: + A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``. + + Example:: + + >>> calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0)) + (0, 0, 100, 75.0) + >>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100)) + (0.0, 0.0, 100, 100) + >>> print("%f %f %f %f" % calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))) + 35.566243 0.000000 64.433757 75.000000 + """ + (ax, ay), (bx, by), (cx, cy), (dx, dy) = calcCubicParameters(pt1, pt2, pt3, pt4) + # calc first derivative + ax3 = ax * 3.0 + ay3 = ay * 3.0 + bx2 = bx * 2.0 + by2 = by * 2.0 + xRoots = [t for t in solveQuadratic(ax3, bx2, cx) if 0 <= t < 1] + yRoots = [t for t in solveQuadratic(ay3, by2, cy) if 0 <= t < 1] + roots = xRoots + yRoots + + points = [ + ( + ax * t * t * t + bx * t * t + cx * t + dx, + ay * t * t * t + by * t * t + cy * t + dy, + ) + for t in roots + ] + [pt1, pt4] + return calcBounds(points) + + +def splitLine(pt1, pt2, where, isHorizontal): + """Split a line at a given coordinate. + + Args: + pt1: Start point of line as 2D tuple. + pt2: End point of line as 2D tuple. + where: Position at which to split the line. + isHorizontal: Direction of the ray splitting the line. If true, + ``where`` is interpreted as a Y coordinate; if false, then + ``where`` is interpreted as an X coordinate. + + Returns: + A list of two line segments (each line segment being two 2D tuples) + if the line was successfully split, or a list containing the original + line. + + Example:: + + >>> printSegments(splitLine((0, 0), (100, 100), 50, True)) + ((0, 0), (50, 50)) + ((50, 50), (100, 100)) + >>> printSegments(splitLine((0, 0), (100, 100), 100, True)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((0, 0), (100, 100), 0, True)) + ((0, 0), (0, 0)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((0, 0), (100, 100), 0, False)) + ((0, 0), (0, 0)) + ((0, 0), (100, 100)) + >>> printSegments(splitLine((100, 0), (0, 0), 50, False)) + ((100, 0), (50, 0)) + ((50, 0), (0, 0)) + >>> printSegments(splitLine((0, 100), (0, 0), 50, True)) + ((0, 100), (0, 50)) + ((0, 50), (0, 0)) + """ + pt1x, pt1y = pt1 + pt2x, pt2y = pt2 + + ax = pt2x - pt1x + ay = pt2y - pt1y + + bx = pt1x + by = pt1y + + a = (ax, ay)[isHorizontal] + + if a == 0: + return [(pt1, pt2)] + t = (where - (bx, by)[isHorizontal]) / a + if 0 <= t < 1: + midPt = ax * t + bx, ay * t + by + return [(pt1, midPt), (midPt, pt2)] + else: + return [(pt1, pt2)] + + +def splitQuadratic(pt1, pt2, pt3, where, isHorizontal): + """Split a quadratic Bezier curve at a given coordinate. + + Args: + pt1,pt2,pt3: Control points of the Bezier as 2D tuples. + where: Position at which to split the curve. + isHorizontal: Direction of the ray splitting the curve. If true, + ``where`` is interpreted as a Y coordinate; if false, then + ``where`` is interpreted as an X coordinate. + + Returns: + A list of two curve segments (each curve segment being three 2D tuples) + if the curve was successfully split, or a list containing the original + curve. + + Example:: + + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False)) + ((0, 0), (50, 100), (100, 0)) + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False)) + ((0, 0), (12.5, 25), (25, 37.5)) + ((25, 37.5), (62.5, 75), (100, 0)) + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True)) + ((0, 0), (7.32233, 14.6447), (14.6447, 25)) + ((14.6447, 25), (50, 75), (85.3553, 25)) + ((85.3553, 25), (92.6777, 14.6447), (100, -7.10543e-15)) + >>> # XXX I'm not at all sure if the following behavior is desirable: + >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (50, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) + """ + a, b, c = calcQuadraticParameters(pt1, pt2, pt3) + solutions = solveQuadratic( + a[isHorizontal], b[isHorizontal], c[isHorizontal] - where + ) + solutions = sorted(t for t in solutions if 0 <= t < 1) + if not solutions: + return [(pt1, pt2, pt3)] + return _splitQuadraticAtT(a, b, c, *solutions) + + +def splitCubic(pt1, pt2, pt3, pt4, where, isHorizontal): + """Split a cubic Bezier curve at a given coordinate. + + Args: + pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. + where: Position at which to split the curve. + isHorizontal: Direction of the ray splitting the curve. If true, + ``where`` is interpreted as a Y coordinate; if false, then + ``where`` is interpreted as an X coordinate. + + Returns: + A list of two curve segments (each curve segment being four 2D tuples) + if the curve was successfully split, or a list containing the original + curve. + + Example:: + + >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False)) + ((0, 0), (25, 100), (75, 100), (100, 0)) + >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) + >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True)) + ((0, 0), (2.29379, 9.17517), (4.79804, 17.5085), (7.47414, 25)) + ((7.47414, 25), (31.2886, 91.6667), (68.7114, 91.6667), (92.5259, 25)) + ((92.5259, 25), (95.202, 17.5085), (97.7062, 9.17517), (100, 1.77636e-15)) + """ + a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) + solutions = solveCubic( + a[isHorizontal], b[isHorizontal], c[isHorizontal], d[isHorizontal] - where + ) + solutions = sorted(t for t in solutions if 0 <= t < 1) + if not solutions: + return [(pt1, pt2, pt3, pt4)] + return _splitCubicAtT(a, b, c, d, *solutions) + + +def splitQuadraticAtT(pt1, pt2, pt3, *ts): + """Split a quadratic Bezier curve at one or more values of t. + + Args: + pt1,pt2,pt3: Control points of the Bezier as 2D tuples. + *ts: Positions at which to split the curve. + + Returns: + A list of curve segments (each curve segment being three 2D tuples). + + Examples:: + + >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (75, 50), (100, 0)) + >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75)) + ((0, 0), (25, 50), (50, 50)) + ((50, 50), (62.5, 50), (75, 37.5)) + ((75, 37.5), (87.5, 25), (100, 0)) + """ + a, b, c = calcQuadraticParameters(pt1, pt2, pt3) + return _splitQuadraticAtT(a, b, c, *ts) + + +def splitCubicAtT(pt1, pt2, pt3, pt4, *ts): + """Split a cubic Bezier curve at one or more values of t. + + Args: + pt1,pt2,pt3,pt4: Control points of the Bezier as 2D tuples. + *ts: Positions at which to split the curve. + + Returns: + A list of curve segments (each curve segment being four 2D tuples). + + Examples:: + + >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) + >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75)) + ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) + ((50, 75), (59.375, 75), (68.75, 68.75), (77.3438, 56.25)) + ((77.3438, 56.25), (85.9375, 43.75), (93.75, 25), (100, 0)) + """ + a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) + return _splitCubicAtT(a, b, c, d, *ts) + + +def _splitQuadraticAtT(a, b, c, *ts): + ts = list(ts) + segments = [] + ts.insert(0, 0.0) + ts.append(1.0) + ax, ay = a + bx, by = b + cx, cy = c + for i in range(len(ts) - 1): + t1 = ts[i] + t2 = ts[i + 1] + delta = t2 - t1 + # calc new a, b and c + delta_2 = delta * delta + a1x = ax * delta_2 + a1y = ay * delta_2 + b1x = (2 * ax * t1 + bx) * delta + b1y = (2 * ay * t1 + by) * delta + t1_2 = t1 * t1 + c1x = ax * t1_2 + bx * t1 + cx + c1y = ay * t1_2 + by * t1 + cy + + pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y)) + segments.append((pt1, pt2, pt3)) + return segments + + +def _splitCubicAtT(a, b, c, d, *ts): + ts = list(ts) + ts.insert(0, 0.0) + ts.append(1.0) + segments = [] + ax, ay = a + bx, by = b + cx, cy = c + dx, dy = d + for i in range(len(ts) - 1): + t1 = ts[i] + t2 = ts[i + 1] + delta = t2 - t1 + + delta_2 = delta * delta + delta_3 = delta * delta_2 + t1_2 = t1 * t1 + t1_3 = t1 * t1_2 + + # calc new a, b, c and d + a1x = ax * delta_3 + a1y = ay * delta_3 + b1x = (3 * ax * t1 + bx) * delta_2 + b1y = (3 * ay * t1 + by) * delta_2 + c1x = (2 * bx * t1 + cx + 3 * ax * t1_2) * delta + c1y = (2 * by * t1 + cy + 3 * ay * t1_2) * delta + d1x = ax * t1_3 + bx * t1_2 + cx * t1 + dx + d1y = ay * t1_3 + by * t1_2 + cy * t1 + dy + pt1, pt2, pt3, pt4 = calcCubicPoints( + (a1x, a1y), (b1x, b1y), (c1x, c1y), (d1x, d1y) + ) + segments.append((pt1, pt2, pt3, pt4)) + return segments + + +# +# Equation solvers. +# + +from math import sqrt, acos, cos, pi + + +def solveQuadratic(a, b, c, sqrt=sqrt): + """Solve a quadratic equation. + + Solves *a*x*x + b*x + c = 0* where a, b and c are real. + + Args: + a: coefficient of *x²* + b: coefficient of *x* + c: constant term + + Returns: + A list of roots. Note that the returned list is neither guaranteed to + be sorted nor to contain unique values! + """ + if abs(a) < epsilon: + if abs(b) < epsilon: + # We have a non-equation; therefore, we have no valid solution + roots = [] + else: + # We have a linear equation with 1 root. + roots = [-c / b] + else: + # We have a true quadratic equation. Apply the quadratic formula to find two roots. + DD = b * b - 4.0 * a * c + if DD >= 0.0: + rDD = sqrt(DD) + roots = [(-b + rDD) / 2.0 / a, (-b - rDD) / 2.0 / a] + else: + # complex roots, ignore + roots = [] + return roots + + +def solveCubic(a, b, c, d): + """Solve a cubic equation. + + Solves *a*x*x*x + b*x*x + c*x + d = 0* where a, b, c and d are real. + + Args: + a: coefficient of *x³* + b: coefficient of *x²* + c: coefficient of *x* + d: constant term + + Returns: + A list of roots. Note that the returned list is neither guaranteed to + be sorted nor to contain unique values! + + Examples:: + + >>> solveCubic(1, 1, -6, 0) + [-3.0, -0.0, 2.0] + >>> solveCubic(-10.0, -9.0, 48.0, -29.0) + [-2.9, 1.0, 1.0] + >>> solveCubic(-9.875, -9.0, 47.625, -28.75) + [-2.911392, 1.0, 1.0] + >>> solveCubic(1.0, -4.5, 6.75, -3.375) + [1.5, 1.5, 1.5] + >>> solveCubic(-12.0, 18.0, -9.0, 1.50023651123) + [0.5, 0.5, 0.5] + >>> solveCubic( + ... 9.0, 0.0, 0.0, -7.62939453125e-05 + ... ) == [-0.0, -0.0, -0.0] + True + """ + # + # adapted from: + # CUBIC.C - Solve a cubic polynomial + # public domain by Ross Cottrell + # found at: http://www.strangecreations.com/library/snippets/Cubic.C + # + if abs(a) < epsilon: + # don't just test for zero; for very small values of 'a' solveCubic() + # returns unreliable results, so we fall back to quad. + return solveQuadratic(b, c, d) + a = float(a) + a1 = b / a + a2 = c / a + a3 = d / a + + Q = (a1 * a1 - 3.0 * a2) / 9.0 + R = (2.0 * a1 * a1 * a1 - 9.0 * a1 * a2 + 27.0 * a3) / 54.0 + + R2 = R * R + Q3 = Q * Q * Q + R2 = 0 if R2 < epsilon else R2 + Q3 = 0 if abs(Q3) < epsilon else Q3 + + R2_Q3 = R2 - Q3 + + if R2 == 0.0 and Q3 == 0.0: + x = round(-a1 / 3.0, epsilonDigits) + return [x, x, x] + elif R2_Q3 <= epsilon * 0.5: + # The epsilon * .5 above ensures that Q3 is not zero. + theta = acos(max(min(R / sqrt(Q3), 1.0), -1.0)) + rQ2 = -2.0 * sqrt(Q) + a1_3 = a1 / 3.0 + x0 = rQ2 * cos(theta / 3.0) - a1_3 + x1 = rQ2 * cos((theta + 2.0 * pi) / 3.0) - a1_3 + x2 = rQ2 * cos((theta + 4.0 * pi) / 3.0) - a1_3 + x0, x1, x2 = sorted([x0, x1, x2]) + # Merge roots that are close-enough + if x1 - x0 < epsilon and x2 - x1 < epsilon: + x0 = x1 = x2 = round((x0 + x1 + x2) / 3.0, epsilonDigits) + elif x1 - x0 < epsilon: + x0 = x1 = round((x0 + x1) / 2.0, epsilonDigits) + x2 = round(x2, epsilonDigits) + elif x2 - x1 < epsilon: + x0 = round(x0, epsilonDigits) + x1 = x2 = round((x1 + x2) / 2.0, epsilonDigits) + else: + x0 = round(x0, epsilonDigits) + x1 = round(x1, epsilonDigits) + x2 = round(x2, epsilonDigits) + return [x0, x1, x2] + else: + x = pow(sqrt(R2_Q3) + abs(R), 1 / 3.0) + x = x + Q / x + if R >= 0.0: + x = -x + x = round(x - a1 / 3.0, epsilonDigits) + return [x] + + +# +# Conversion routines for points to parameters and vice versa +# + + +def calcQuadraticParameters(pt1, pt2, pt3): + x2, y2 = pt2 + x3, y3 = pt3 + cx, cy = pt1 + bx = (x2 - cx) * 2.0 + by = (y2 - cy) * 2.0 + ax = x3 - cx - bx + ay = y3 - cy - by + return (ax, ay), (bx, by), (cx, cy) + + +def calcCubicParameters(pt1, pt2, pt3, pt4): + x2, y2 = pt2 + x3, y3 = pt3 + x4, y4 = pt4 + dx, dy = pt1 + cx = (x2 - dx) * 3.0 + cy = (y2 - dy) * 3.0 + bx = (x3 - x2) * 3.0 - cx + by = (y3 - y2) * 3.0 - cy + ax = x4 - dx - cx - bx + ay = y4 - dy - cy - by + return (ax, ay), (bx, by), (cx, cy), (dx, dy) + + +def calcQuadraticPoints(a, b, c): + ax, ay = a + bx, by = b + cx, cy = c + x1 = cx + y1 = cy + x2 = (bx * 0.5) + cx + y2 = (by * 0.5) + cy + x3 = ax + bx + cx + y3 = ay + by + cy + return (x1, y1), (x2, y2), (x3, y3) + + +def calcCubicPoints(a, b, c, d): + ax, ay = a + bx, by = b + cx, cy = c + dx, dy = d + x1 = dx + y1 = dy + x2 = (cx / 3.0) + dx + y2 = (cy / 3.0) + dy + x3 = (bx + cx) / 3.0 + x2 + y3 = (by + cy) / 3.0 + y2 + x4 = ax + dx + cx + bx + y4 = ay + dy + cy + by + return (x1, y1), (x2, y2), (x3, y3), (x4, y4) + + +# +# Point at time +# + + +def linePointAtT(pt1, pt2, t): + """Finds the point at time `t` on a line. + + Args: + pt1, pt2: Coordinates of the line as 2D tuples. + t: The time along the line. + + Returns: + A 2D tuple with the coordinates of the point. + """ + return ((pt1[0] * (1 - t) + pt2[0] * t), (pt1[1] * (1 - t) + pt2[1] * t)) + + +def quadraticPointAtT(pt1, pt2, pt3, t): + """Finds the point at time `t` on a quadratic curve. + + Args: + pt1, pt2, pt3: Coordinates of the curve as 2D tuples. + t: The time along the curve. + + Returns: + A 2D tuple with the coordinates of the point. + """ + x = (1 - t) * (1 - t) * pt1[0] + 2 * (1 - t) * t * pt2[0] + t * t * pt3[0] + y = (1 - t) * (1 - t) * pt1[1] + 2 * (1 - t) * t * pt2[1] + t * t * pt3[1] + return (x, y) + + +def cubicPointAtT(pt1, pt2, pt3, pt4, t): + """Finds the point at time `t` on a cubic curve. + + Args: + pt1, pt2, pt3, pt4: Coordinates of the curve as 2D tuples. + t: The time along the curve. + + Returns: + A 2D tuple with the coordinates of the point. + """ + x = ( + (1 - t) * (1 - t) * (1 - t) * pt1[0] + + 3 * (1 - t) * (1 - t) * t * pt2[0] + + 3 * (1 - t) * t * t * pt3[0] + + t * t * t * pt4[0] + ) + y = ( + (1 - t) * (1 - t) * (1 - t) * pt1[1] + + 3 * (1 - t) * (1 - t) * t * pt2[1] + + 3 * (1 - t) * t * t * pt3[1] + + t * t * t * pt4[1] + ) + return (x, y) + + +def segmentPointAtT(seg, t): + if len(seg) == 2: + return linePointAtT(*seg, t) + elif len(seg) == 3: + return quadraticPointAtT(*seg, t) + elif len(seg) == 4: + return cubicPointAtT(*seg, t) + raise ValueError("Unknown curve degree") + + +# +# Intersection finders +# + + +def _line_t_of_pt(s, e, pt): + sx, sy = s + ex, ey = e + px, py = pt + if abs(sx - ex) < epsilon and abs(sy - ey) < epsilon: + # Line is a point! + return -1 + # Use the largest + if abs(sx - ex) > abs(sy - ey): + return (px - sx) / (ex - sx) + else: + return (py - sy) / (ey - sy) + + +def _both_points_are_on_same_side_of_origin(a, b, origin): + xDiff = (a[0] - origin[0]) * (b[0] - origin[0]) + yDiff = (a[1] - origin[1]) * (b[1] - origin[1]) + return not (xDiff <= 0.0 and yDiff <= 0.0) + + +def lineLineIntersections(s1, e1, s2, e2): + """Finds intersections between two line segments. + + Args: + s1, e1: Coordinates of the first line as 2D tuples. + s2, e2: Coordinates of the second line as 2D tuples. + + Returns: + A list of ``Intersection`` objects, each object having ``pt``, ``t1`` + and ``t2`` attributes containing the intersection point, time on first + segment and time on second segment respectively. + + Examples:: + + >>> a = lineLineIntersections( (310,389), (453, 222), (289, 251), (447, 367)) + >>> len(a) + 1 + >>> intersection = a[0] + >>> intersection.pt + (374.44882952482897, 313.73458370177315) + >>> (intersection.t1, intersection.t2) + (0.45069111555824465, 0.5408153767394238) + """ + s1x, s1y = s1 + e1x, e1y = e1 + s2x, s2y = s2 + e2x, e2y = e2 + if ( + math.isclose(s2x, e2x) and math.isclose(s1x, e1x) and not math.isclose(s1x, s2x) + ): # Parallel vertical + return [] + if ( + math.isclose(s2y, e2y) and math.isclose(s1y, e1y) and not math.isclose(s1y, s2y) + ): # Parallel horizontal + return [] + if math.isclose(s2x, e2x) and math.isclose(s2y, e2y): # Line segment is tiny + return [] + if math.isclose(s1x, e1x) and math.isclose(s1y, e1y): # Line segment is tiny + return [] + if math.isclose(e1x, s1x): + x = s1x + slope34 = (e2y - s2y) / (e2x - s2x) + y = slope34 * (x - s2x) + s2y + pt = (x, y) + return [ + Intersection( + pt=pt, t1=_line_t_of_pt(s1, e1, pt), t2=_line_t_of_pt(s2, e2, pt) + ) + ] + if math.isclose(s2x, e2x): + x = s2x + slope12 = (e1y - s1y) / (e1x - s1x) + y = slope12 * (x - s1x) + s1y + pt = (x, y) + return [ + Intersection( + pt=pt, t1=_line_t_of_pt(s1, e1, pt), t2=_line_t_of_pt(s2, e2, pt) + ) + ] + + slope12 = (e1y - s1y) / (e1x - s1x) + slope34 = (e2y - s2y) / (e2x - s2x) + if math.isclose(slope12, slope34): + return [] + x = (slope12 * s1x - s1y - slope34 * s2x + s2y) / (slope12 - slope34) + y = slope12 * (x - s1x) + s1y + pt = (x, y) + if _both_points_are_on_same_side_of_origin( + pt, e1, s1 + ) and _both_points_are_on_same_side_of_origin(pt, s2, e2): + return [ + Intersection( + pt=pt, t1=_line_t_of_pt(s1, e1, pt), t2=_line_t_of_pt(s2, e2, pt) + ) + ] + return [] + + +def _alignment_transformation(segment): + # Returns a transformation which aligns a segment horizontally at the + # origin. Apply this transformation to curves and root-find to find + # intersections with the segment. + start = segment[0] + end = segment[-1] + angle = math.atan2(end[1] - start[1], end[0] - start[0]) + return Identity.rotate(-angle).translate(-start[0], -start[1]) + + +def _curve_line_intersections_t(curve, line): + aligned_curve = _alignment_transformation(line).transformPoints(curve) + if len(curve) == 3: + a, b, c = calcQuadraticParameters(*aligned_curve) + intersections = solveQuadratic(a[1], b[1], c[1]) + elif len(curve) == 4: + a, b, c, d = calcCubicParameters(*aligned_curve) + intersections = solveCubic(a[1], b[1], c[1], d[1]) + else: + raise ValueError("Unknown curve degree") + return sorted(i for i in intersections if 0.0 <= i <= 1) + + +def curveLineIntersections(curve, line): + """Finds intersections between a curve and a line. + + Args: + curve: List of coordinates of the curve segment as 2D tuples. + line: List of coordinates of the line segment as 2D tuples. + + Returns: + A list of ``Intersection`` objects, each object having ``pt``, ``t1`` + and ``t2`` attributes containing the intersection point, time on first + segment and time on second segment respectively. + + Examples:: + >>> curve = [ (100, 240), (30, 60), (210, 230), (160, 30) ] + >>> line = [ (25, 260), (230, 20) ] + >>> intersections = curveLineIntersections(curve, line) + >>> len(intersections) + 3 + >>> intersections[0].pt + (84.9000930760723, 189.87306176459828) + """ + if len(curve) == 3: + pointFinder = quadraticPointAtT + elif len(curve) == 4: + pointFinder = cubicPointAtT + else: + raise ValueError("Unknown curve degree") + intersections = [] + for t in _curve_line_intersections_t(curve, line): + pt = pointFinder(*curve, t) + # Back-project the point onto the line, to avoid problems with + # numerical accuracy in the case of vertical and horizontal lines + line_t = _line_t_of_pt(*line, pt) + pt = linePointAtT(*line, line_t) + intersections.append(Intersection(pt=pt, t1=t, t2=line_t)) + return intersections + + +def _curve_bounds(c): + if len(c) == 3: + return calcQuadraticBounds(*c) + elif len(c) == 4: + return calcCubicBounds(*c) + raise ValueError("Unknown curve degree") + + +def _split_segment_at_t(c, t): + if len(c) == 2: + s, e = c + midpoint = linePointAtT(s, e, t) + return [(s, midpoint), (midpoint, e)] + if len(c) == 3: + return splitQuadraticAtT(*c, t) + elif len(c) == 4: + return splitCubicAtT(*c, t) + raise ValueError("Unknown curve degree") + + +def _curve_curve_intersections_t( + curve1, curve2, precision=1e-3, range1=None, range2=None +): + bounds1 = _curve_bounds(curve1) + bounds2 = _curve_bounds(curve2) + + if not range1: + range1 = (0.0, 1.0) + if not range2: + range2 = (0.0, 1.0) + + # If bounds don't intersect, go home + intersects, _ = sectRect(bounds1, bounds2) + if not intersects: + return [] + + def midpoint(r): + return 0.5 * (r[0] + r[1]) + + # If they do overlap but they're tiny, approximate + if rectArea(bounds1) < precision and rectArea(bounds2) < precision: + return [(midpoint(range1), midpoint(range2))] + + c11, c12 = _split_segment_at_t(curve1, 0.5) + c11_range = (range1[0], midpoint(range1)) + c12_range = (midpoint(range1), range1[1]) + + c21, c22 = _split_segment_at_t(curve2, 0.5) + c21_range = (range2[0], midpoint(range2)) + c22_range = (midpoint(range2), range2[1]) + + found = [] + found.extend( + _curve_curve_intersections_t( + c11, c21, precision, range1=c11_range, range2=c21_range + ) + ) + found.extend( + _curve_curve_intersections_t( + c12, c21, precision, range1=c12_range, range2=c21_range + ) + ) + found.extend( + _curve_curve_intersections_t( + c11, c22, precision, range1=c11_range, range2=c22_range + ) + ) + found.extend( + _curve_curve_intersections_t( + c12, c22, precision, range1=c12_range, range2=c22_range + ) + ) + + unique_key = lambda ts: (int(ts[0] / precision), int(ts[1] / precision)) + seen = set() + unique_values = [] + + for ts in found: + key = unique_key(ts) + if key in seen: + continue + seen.add(key) + unique_values.append(ts) + + return unique_values + + +def curveCurveIntersections(curve1, curve2): + """Finds intersections between a curve and a curve. + + Args: + curve1: List of coordinates of the first curve segment as 2D tuples. + curve2: List of coordinates of the second curve segment as 2D tuples. + + Returns: + A list of ``Intersection`` objects, each object having ``pt``, ``t1`` + and ``t2`` attributes containing the intersection point, time on first + segment and time on second segment respectively. + + Examples:: + >>> curve1 = [ (10,100), (90,30), (40,140), (220,220) ] + >>> curve2 = [ (5,150), (180,20), (80,250), (210,190) ] + >>> intersections = curveCurveIntersections(curve1, curve2) + >>> len(intersections) + 3 + >>> intersections[0].pt + (81.7831487395506, 109.88904552375288) + """ + intersection_ts = _curve_curve_intersections_t(curve1, curve2) + return [ + Intersection(pt=segmentPointAtT(curve1, ts[0]), t1=ts[0], t2=ts[1]) + for ts in intersection_ts + ] + + +def segmentSegmentIntersections(seg1, seg2): + """Finds intersections between two segments. + + Args: + seg1: List of coordinates of the first segment as 2D tuples. + seg2: List of coordinates of the second segment as 2D tuples. + + Returns: + A list of ``Intersection`` objects, each object having ``pt``, ``t1`` + and ``t2`` attributes containing the intersection point, time on first + segment and time on second segment respectively. + + Examples:: + >>> curve1 = [ (10,100), (90,30), (40,140), (220,220) ] + >>> curve2 = [ (5,150), (180,20), (80,250), (210,190) ] + >>> intersections = segmentSegmentIntersections(curve1, curve2) + >>> len(intersections) + 3 + >>> intersections[0].pt + (81.7831487395506, 109.88904552375288) + >>> curve3 = [ (100, 240), (30, 60), (210, 230), (160, 30) ] + >>> line = [ (25, 260), (230, 20) ] + >>> intersections = segmentSegmentIntersections(curve3, line) + >>> len(intersections) + 3 + >>> intersections[0].pt + (84.9000930760723, 189.87306176459828) + + """ + # Arrange by degree + swapped = False + if len(seg2) > len(seg1): + seg2, seg1 = seg1, seg2 + swapped = True + if len(seg1) > 2: + if len(seg2) > 2: + intersections = curveCurveIntersections(seg1, seg2) + else: + intersections = curveLineIntersections(seg1, seg2) + elif len(seg1) == 2 and len(seg2) == 2: + intersections = lineLineIntersections(*seg1, *seg2) + else: + raise ValueError("Couldn't work out which intersection function to use") + if not swapped: + return intersections + return [Intersection(pt=i.pt, t1=i.t2, t2=i.t1) for i in intersections] + + +def _segmentrepr(obj): + """ + >>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], [0.1, 2.2]]]]) + '(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))' + """ + try: + it = iter(obj) + except TypeError: + return "%g" % obj + else: + return "(%s)" % ", ".join(_segmentrepr(x) for x in it) + + +def printSegments(segments): + """Helper for the doctests, displaying each segment in a list of + segments on a single line as a tuple. + """ + for segment in segments: + print(_segmentrepr(segment)) + + +if __name__ == "__main__": + import sys + import doctest + + sys.exit(doctest.testmod().failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/classifyTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/classifyTools.py new file mode 100644 index 00000000..ae88a8f7 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/classifyTools.py @@ -0,0 +1,171 @@ +""" fontTools.misc.classifyTools.py -- tools for classifying things. +""" + + +class Classifier(object): + + """ + Main Classifier object, used to classify things into similar sets. + """ + + def __init__(self, sort=True): + + self._things = set() # set of all things known so far + self._sets = [] # list of class sets produced so far + self._mapping = {} # map from things to their class set + self._dirty = False + self._sort = sort + + def add(self, set_of_things): + """ + Add a set to the classifier. Any iterable is accepted. + """ + if not set_of_things: + return + + self._dirty = True + + things, sets, mapping = self._things, self._sets, self._mapping + + s = set(set_of_things) + intersection = s.intersection(things) # existing things + s.difference_update(intersection) # new things + difference = s + del s + + # Add new class for new things + if difference: + things.update(difference) + sets.append(difference) + for thing in difference: + mapping[thing] = difference + del difference + + while intersection: + # Take one item and process the old class it belongs to + old_class = mapping[next(iter(intersection))] + old_class_intersection = old_class.intersection(intersection) + + # Update old class to remove items from new set + old_class.difference_update(old_class_intersection) + + # Remove processed items from todo list + intersection.difference_update(old_class_intersection) + + # Add new class for the intersection with old class + sets.append(old_class_intersection) + for thing in old_class_intersection: + mapping[thing] = old_class_intersection + del old_class_intersection + + def update(self, list_of_sets): + """ + Add a a list of sets to the classifier. Any iterable of iterables is accepted. + """ + for s in list_of_sets: + self.add(s) + + def _process(self): + if not self._dirty: + return + + # Do any deferred processing + sets = self._sets + self._sets = [s for s in sets if s] + + if self._sort: + self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s))) + + self._dirty = False + + # Output methods + + def getThings(self): + """Returns the set of all things known so far. + + The return value belongs to the Classifier object and should NOT + be modified while the classifier is still in use. + """ + self._process() + return self._things + + def getMapping(self): + """Returns the mapping from things to their class set. + + The return value belongs to the Classifier object and should NOT + be modified while the classifier is still in use. + """ + self._process() + return self._mapping + + def getClasses(self): + """Returns the list of class sets. + + The return value belongs to the Classifier object and should NOT + be modified while the classifier is still in use. + """ + self._process() + return self._sets + + +def classify(list_of_sets, sort=True): + """ + Takes a iterable of iterables (list of sets from here on; but any + iterable works.), and returns the smallest list of sets such that + each set, is either a subset, or is disjoint from, each of the input + sets. + + In other words, this function classifies all the things present in + any of the input sets, into similar classes, based on which sets + things are a member of. + + If sort=True, return class sets are sorted by decreasing size and + their natural sort order within each class size. Otherwise, class + sets are returned in the order that they were identified, which is + generally not significant. + + >>> classify([]) == ([], {}) + True + >>> classify([[]]) == ([], {}) + True + >>> classify([[], []]) == ([], {}) + True + >>> classify([[1]]) == ([{1}], {1: {1}}) + True + >>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}}) + True + >>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) + True + >>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) + True + >>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}}) + True + >>> classify([[1,2],[2,4,5]]) == ( + ... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) + True + >>> classify([[1,2],[2,4,5]], sort=False) == ( + ... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) + True + >>> classify([[1,2,9],[2,4,5]], sort=False) == ( + ... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5}, + ... 9: {1, 9}}) + True + >>> classify([[1,2,9,15],[2,4,5]], sort=False) == ( + ... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5}, + ... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}}) + True + >>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False) + >>> set([frozenset(c) for c in classes]) == set( + ... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})]) + True + >>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}} + True + """ + classifier = Classifier(sort=sort) + classifier.update(list_of_sets) + return classifier.getClasses(), classifier.getMapping() + + +if __name__ == "__main__": + import sys, doctest + sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/cliTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/cliTools.py new file mode 100644 index 00000000..e8c17677 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/cliTools.py @@ -0,0 +1,46 @@ +"""Collection of utilities for command-line interfaces and console scripts.""" +import os +import re + + +numberAddedRE = re.compile(r"#\d+$") + + +def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False): + """Generates a suitable file name for writing output. + + Often tools will want to take a file, do some kind of transformation to it, + and write it out again. This function determines an appropriate name for the + output file, through one or more of the following steps: + + - changing the output directory + - replacing the file extension + - suffixing the filename with a number (``#1``, ``#2``, etc.) to avoid + overwriting an existing file. + + Args: + input: Name of input file. + outputDir: Optionally, a new directory to write the file into. + extension: Optionally, a replacement for the current file extension. + overWrite: Overwriting an existing file is permitted if true; if false + and the proposed filename exists, a new name will be generated by + adding an appropriate number suffix. + + Returns: + str: Suitable output filename + """ + dirName, fileName = os.path.split(input) + fileName, ext = os.path.splitext(fileName) + if outputDir: + dirName = outputDir + fileName = numberAddedRE.split(fileName)[0] + if extension is None: + extension = os.path.splitext(input)[1] + output = os.path.join(dirName, fileName + extension) + n = 1 + if not overWrite: + while os.path.exists(output): + output = os.path.join( + dirName, fileName + "#" + repr(n) + extension) + n += 1 + return output diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/cython.py b/.venv/lib/python3.9/site-packages/fontTools/misc/cython.py new file mode 100644 index 00000000..0ba659f6 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/cython.py @@ -0,0 +1,25 @@ +""" Exports a no-op 'cython' namespace similar to +https://github.com/cython/cython/blob/master/Cython/Shadow.py + +This allows to optionally compile @cython decorated functions +(when cython is available at built time), or run the same code +as pure-python, without runtime dependency on cython module. + +We only define the symbols that we use. E.g. see fontTools.cu2qu +""" + +from types import SimpleNamespace + +def _empty_decorator(x): + return x + +compiled = False + +for name in ("double", "complex", "int"): + globals()[name] = None + +for name in ("cfunc", "inline"): + globals()[name] = _empty_decorator + +locals = lambda **_: _empty_decorator +returns = lambda _: _empty_decorator diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/dictTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/dictTools.py new file mode 100644 index 00000000..ae7932c9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/dictTools.py @@ -0,0 +1,66 @@ +"""Misc dict tools.""" + + +__all__ = ['hashdict'] + +# https://stackoverflow.com/questions/1151658/python-hashable-dicts +class hashdict(dict): + """ + hashable dict implementation, suitable for use as a key into + other dicts. + + >>> h1 = hashdict({"apples": 1, "bananas":2}) + >>> h2 = hashdict({"bananas": 3, "mangoes": 5}) + >>> h1+h2 + hashdict(apples=1, bananas=3, mangoes=5) + >>> d1 = {} + >>> d1[h1] = "salad" + >>> d1[h1] + 'salad' + >>> d1[h2] + Traceback (most recent call last): + ... + KeyError: hashdict(bananas=3, mangoes=5) + + based on answers from + http://stackoverflow.com/questions/1151658/python-hashable-dicts + + """ + def __key(self): + return tuple(sorted(self.items())) + def __repr__(self): + return "{0}({1})".format(self.__class__.__name__, + ", ".join("{0}={1}".format( + str(i[0]),repr(i[1])) for i in self.__key())) + + def __hash__(self): + return hash(self.__key()) + def __setitem__(self, key, value): + raise TypeError("{0} does not support item assignment" + .format(self.__class__.__name__)) + def __delitem__(self, key): + raise TypeError("{0} does not support item assignment" + .format(self.__class__.__name__)) + def clear(self): + raise TypeError("{0} does not support item assignment" + .format(self.__class__.__name__)) + def pop(self, *args, **kwargs): + raise TypeError("{0} does not support item assignment" + .format(self.__class__.__name__)) + def popitem(self, *args, **kwargs): + raise TypeError("{0} does not support item assignment" + .format(self.__class__.__name__)) + def setdefault(self, *args, **kwargs): + raise TypeError("{0} does not support item assignment" + .format(self.__class__.__name__)) + def update(self, *args, **kwargs): + raise TypeError("{0} does not support item assignment" + .format(self.__class__.__name__)) + # update is not ok because it mutates the object + # __add__ is ok because it creates a new object + # while the new object is under construction, it's ok to mutate it + def __add__(self, right): + result = hashdict(self) + dict.update(result, right) + return result + diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/eexec.py b/.venv/lib/python3.9/site-packages/fontTools/misc/eexec.py new file mode 100644 index 00000000..d1d4bb6a --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/eexec.py @@ -0,0 +1,113 @@ +""" +PostScript Type 1 fonts make use of two types of encryption: charstring +encryption and ``eexec`` encryption. Charstring encryption is used for +the charstrings themselves, while ``eexec`` is used to encrypt larger +sections of the font program, such as the ``Private`` and ``CharStrings`` +dictionaries. Despite the different names, the algorithm is the same, +although ``eexec`` encryption uses a fixed initial key R=55665. + +The algorithm uses cipher feedback, meaning that the ciphertext is used +to modify the key. Because of this, the routines in this module return +the new key at the end of the operation. + +""" + +from fontTools.misc.textTools import bytechr, bytesjoin, byteord + + +def _decryptChar(cipher, R): + cipher = byteord(cipher) + plain = ( (cipher ^ (R>>8)) ) & 0xFF + R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF + return bytechr(plain), R + +def _encryptChar(plain, R): + plain = byteord(plain) + cipher = ( (plain ^ (R>>8)) ) & 0xFF + R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF + return bytechr(cipher), R + + +def decrypt(cipherstring, R): + r""" + Decrypts a string using the Type 1 encryption algorithm. + + Args: + cipherstring: String of ciphertext. + R: Initial key. + + Returns: + decryptedStr: Plaintext string. + R: Output key for subsequent decryptions. + + Examples:: + + >>> testStr = b"\0\0asdadads asds\265" + >>> decryptedStr, R = decrypt(testStr, 12321) + >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' + True + >>> R == 36142 + True + """ + plainList = [] + for cipher in cipherstring: + plain, R = _decryptChar(cipher, R) + plainList.append(plain) + plainstring = bytesjoin(plainList) + return plainstring, int(R) + +def encrypt(plainstring, R): + r""" + Encrypts a string using the Type 1 encryption algorithm. + + Note that the algorithm as described in the Type 1 specification requires the + plaintext to be prefixed with a number of random bytes. (For ``eexec`` the + number of random bytes is set to 4.) This routine does *not* add the random + prefix to its input. + + Args: + plainstring: String of plaintext. + R: Initial key. + + Returns: + cipherstring: Ciphertext string. + R: Output key for subsequent encryptions. + + Examples:: + + >>> testStr = b"\0\0asdadads asds\265" + >>> decryptedStr, R = decrypt(testStr, 12321) + >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' + True + >>> R == 36142 + True + + >>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' + >>> encryptedStr, R = encrypt(testStr, 12321) + >>> encryptedStr == b"\0\0asdadads asds\265" + True + >>> R == 36142 + True + """ + cipherList = [] + for plain in plainstring: + cipher, R = _encryptChar(plain, R) + cipherList.append(cipher) + cipherstring = bytesjoin(cipherList) + return cipherstring, int(R) + + +def hexString(s): + import binascii + return binascii.hexlify(s) + +def deHexString(h): + import binascii + h = bytesjoin(h.split()) + return binascii.unhexlify(h) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/encodingTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/encodingTools.py new file mode 100644 index 00000000..eccf951d --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/encodingTools.py @@ -0,0 +1,71 @@ +"""fontTools.misc.encodingTools.py -- tools for working with OpenType encodings. +""" + +import fontTools.encodings.codecs + +# Map keyed by platformID, then platEncID, then possibly langID +_encodingMap = { + 0: { # Unicode + 0: 'utf_16_be', + 1: 'utf_16_be', + 2: 'utf_16_be', + 3: 'utf_16_be', + 4: 'utf_16_be', + 5: 'utf_16_be', + 6: 'utf_16_be', + }, + 1: { # Macintosh + # See + # https://github.com/fonttools/fonttools/issues/236 + 0: { # Macintosh, platEncID==0, keyed by langID + 15: "mac_iceland", + 17: "mac_turkish", + 18: "mac_croatian", + 24: "mac_latin2", + 25: "mac_latin2", + 26: "mac_latin2", + 27: "mac_latin2", + 28: "mac_latin2", + 36: "mac_latin2", + 37: "mac_romanian", + 38: "mac_latin2", + 39: "mac_latin2", + 40: "mac_latin2", + Ellipsis: 'mac_roman', # Other + }, + 1: 'x_mac_japanese_ttx', + 2: 'x_mac_trad_chinese_ttx', + 3: 'x_mac_korean_ttx', + 6: 'mac_greek', + 7: 'mac_cyrillic', + 25: 'x_mac_simp_chinese_ttx', + 29: 'mac_latin2', + 35: 'mac_turkish', + 37: 'mac_iceland', + }, + 2: { # ISO + 0: 'ascii', + 1: 'utf_16_be', + 2: 'latin1', + }, + 3: { # Microsoft + 0: 'utf_16_be', + 1: 'utf_16_be', + 2: 'shift_jis', + 3: 'gb2312', + 4: 'big5', + 5: 'euc_kr', + 6: 'johab', + 10: 'utf_16_be', + }, +} + +def getEncoding(platformID, platEncID, langID, default=None): + """Returns the Python encoding name for OpenType platformID/encodingID/langID + triplet. If encoding for these values is not known, by default None is + returned. That can be overriden by passing a value to the default argument. + """ + encoding = _encodingMap.get(platformID, {}).get(platEncID, default) + if isinstance(encoding, dict): + encoding = encoding.get(langID, encoding[Ellipsis]) + return encoding diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/etree.py b/.venv/lib/python3.9/site-packages/fontTools/misc/etree.py new file mode 100644 index 00000000..cd4df365 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/etree.py @@ -0,0 +1,479 @@ +"""Shim module exporting the same ElementTree API for lxml and +xml.etree backends. + +When lxml is installed, it is automatically preferred over the built-in +xml.etree module. +On Python 2.7, the cElementTree module is preferred over the pure-python +ElementTree module. + +Besides exporting a unified interface, this also defines extra functions +or subclasses built-in ElementTree classes to add features that are +only availble in lxml, like OrderedDict for attributes, pretty_print and +iterwalk. +""" +from fontTools.misc.textTools import tostr + + +XML_DECLARATION = """""" + +__all__ = [ + # public symbols + "Comment", + "dump", + "Element", + "ElementTree", + "fromstring", + "fromstringlist", + "iselement", + "iterparse", + "parse", + "ParseError", + "PI", + "ProcessingInstruction", + "QName", + "SubElement", + "tostring", + "tostringlist", + "TreeBuilder", + "XML", + "XMLParser", + "register_namespace", +] + +try: + from lxml.etree import * + + _have_lxml = True +except ImportError: + try: + from xml.etree.cElementTree import * + + # the cElementTree version of XML function doesn't support + # the optional 'parser' keyword argument + from xml.etree.ElementTree import XML + except ImportError: # pragma: no cover + from xml.etree.ElementTree import * + _have_lxml = False + + import sys + + # dict is always ordered in python >= 3.6 and on pypy + PY36 = sys.version_info >= (3, 6) + try: + import __pypy__ + except ImportError: + __pypy__ = None + _dict_is_ordered = bool(PY36 or __pypy__) + del PY36, __pypy__ + + if _dict_is_ordered: + _Attrib = dict + else: + from collections import OrderedDict as _Attrib + + if isinstance(Element, type): + _Element = Element + else: + # in py27, cElementTree.Element cannot be subclassed, so + # we need to import the pure-python class + from xml.etree.ElementTree import Element as _Element + + class Element(_Element): + """Element subclass that keeps the order of attributes.""" + + def __init__(self, tag, attrib=_Attrib(), **extra): + super(Element, self).__init__(tag) + self.attrib = _Attrib() + if attrib: + self.attrib.update(attrib) + if extra: + self.attrib.update(extra) + + def SubElement(parent, tag, attrib=_Attrib(), **extra): + """Must override SubElement as well otherwise _elementtree.SubElement + fails if 'parent' is a subclass of Element object. + """ + element = parent.__class__(tag, attrib, **extra) + parent.append(element) + return element + + def _iterwalk(element, events, tag): + include = tag is None or element.tag == tag + if include and "start" in events: + yield ("start", element) + for e in element: + for item in _iterwalk(e, events, tag): + yield item + if include: + yield ("end", element) + + def iterwalk(element_or_tree, events=("end",), tag=None): + """A tree walker that generates events from an existing tree as + if it was parsing XML data with iterparse(). + Drop-in replacement for lxml.etree.iterwalk. + """ + if iselement(element_or_tree): + element = element_or_tree + else: + element = element_or_tree.getroot() + if tag == "*": + tag = None + for item in _iterwalk(element, events, tag): + yield item + + _ElementTree = ElementTree + + class ElementTree(_ElementTree): + """ElementTree subclass that adds 'pretty_print' and 'doctype' + arguments to the 'write' method. + Currently these are only supported for the default XML serialization + 'method', and not also for "html" or "text", for these are delegated + to the base class. + """ + + def write( + self, + file_or_filename, + encoding=None, + xml_declaration=False, + method=None, + doctype=None, + pretty_print=False, + ): + if method and method != "xml": + # delegate to super-class + super(ElementTree, self).write( + file_or_filename, + encoding=encoding, + xml_declaration=xml_declaration, + method=method, + ) + return + + if encoding is not None and encoding.lower() == "unicode": + if xml_declaration: + raise ValueError( + "Serialisation to unicode must not request an XML declaration" + ) + write_declaration = False + encoding = "unicode" + elif xml_declaration is None: + # by default, write an XML declaration only for non-standard encodings + write_declaration = encoding is not None and encoding.upper() not in ( + "ASCII", + "UTF-8", + "UTF8", + "US-ASCII", + ) + else: + write_declaration = xml_declaration + + if encoding is None: + encoding = "ASCII" + + if pretty_print: + # NOTE this will modify the tree in-place + _indent(self._root) + + with _get_writer(file_or_filename, encoding) as write: + if write_declaration: + write(XML_DECLARATION % encoding.upper()) + if pretty_print: + write("\n") + if doctype: + write(_tounicode(doctype)) + if pretty_print: + write("\n") + + qnames, namespaces = _namespaces(self._root) + _serialize_xml(write, self._root, qnames, namespaces) + + import io + + def tostring( + element, + encoding=None, + xml_declaration=None, + method=None, + doctype=None, + pretty_print=False, + ): + """Custom 'tostring' function that uses our ElementTree subclass, with + pretty_print support. + """ + stream = io.StringIO() if encoding == "unicode" else io.BytesIO() + ElementTree(element).write( + stream, + encoding=encoding, + xml_declaration=xml_declaration, + method=method, + doctype=doctype, + pretty_print=pretty_print, + ) + return stream.getvalue() + + # serialization support + + import re + + # Valid XML strings can include any Unicode character, excluding control + # characters, the surrogate blocks, FFFE, and FFFF: + # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + # Here we reversed the pattern to match only the invalid characters. + # For the 'narrow' python builds supporting only UCS-2, which represent + # characters beyond BMP as UTF-16 surrogate pairs, we need to pass through + # the surrogate block. I haven't found a more elegant solution... + UCS2 = sys.maxunicode < 0x10FFFF + if UCS2: + _invalid_xml_string = re.compile( + "[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE-\uFFFF]" + ) + else: + _invalid_xml_string = re.compile( + "[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uD800-\uDFFF\uFFFE-\uFFFF]" + ) + + def _tounicode(s): + """Test if a string is valid user input and decode it to unicode string + using ASCII encoding if it's a bytes string. + Reject all bytes/unicode input that contains non-XML characters. + Reject all bytes input that contains non-ASCII characters. + """ + try: + s = tostr(s, encoding="ascii", errors="strict") + except UnicodeDecodeError: + raise ValueError( + "Bytes strings can only contain ASCII characters. " + "Use unicode strings for non-ASCII characters.") + except AttributeError: + _raise_serialization_error(s) + if s and _invalid_xml_string.search(s): + raise ValueError( + "All strings must be XML compatible: Unicode or ASCII, " + "no NULL bytes or control characters" + ) + return s + + import contextlib + + @contextlib.contextmanager + def _get_writer(file_or_filename, encoding): + # returns text write method and release all resources after using + try: + write = file_or_filename.write + except AttributeError: + # file_or_filename is a file name + f = open( + file_or_filename, + "w", + encoding="utf-8" if encoding == "unicode" else encoding, + errors="xmlcharrefreplace", + ) + with f: + yield f.write + else: + # file_or_filename is a file-like object + # encoding determines if it is a text or binary writer + if encoding == "unicode": + # use a text writer as is + yield write + else: + # wrap a binary writer with TextIOWrapper + detach_buffer = False + if isinstance(file_or_filename, io.BufferedIOBase): + buf = file_or_filename + elif isinstance(file_or_filename, io.RawIOBase): + buf = io.BufferedWriter(file_or_filename) + detach_buffer = True + else: + # This is to handle passed objects that aren't in the + # IOBase hierarchy, but just have a write method + buf = io.BufferedIOBase() + buf.writable = lambda: True + buf.write = write + try: + # TextIOWrapper uses this methods to determine + # if BOM (for UTF-16, etc) should be added + buf.seekable = file_or_filename.seekable + buf.tell = file_or_filename.tell + except AttributeError: + pass + wrapper = io.TextIOWrapper( + buf, + encoding=encoding, + errors="xmlcharrefreplace", + newline="\n", + ) + try: + yield wrapper.write + finally: + # Keep the original file open when the TextIOWrapper and + # the BufferedWriter are destroyed + wrapper.detach() + if detach_buffer: + buf.detach() + + from xml.etree.ElementTree import _namespace_map + + def _namespaces(elem): + # identify namespaces used in this tree + + # maps qnames to *encoded* prefix:local names + qnames = {None: None} + + # maps uri:s to prefixes + namespaces = {} + + def add_qname(qname): + # calculate serialized qname representation + try: + qname = _tounicode(qname) + if qname[:1] == "{": + uri, tag = qname[1:].rsplit("}", 1) + prefix = namespaces.get(uri) + if prefix is None: + prefix = _namespace_map.get(uri) + if prefix is None: + prefix = "ns%d" % len(namespaces) + else: + prefix = _tounicode(prefix) + if prefix != "xml": + namespaces[uri] = prefix + if prefix: + qnames[qname] = "%s:%s" % (prefix, tag) + else: + qnames[qname] = tag # default element + else: + qnames[qname] = qname + except TypeError: + _raise_serialization_error(qname) + + # populate qname and namespaces table + for elem in elem.iter(): + tag = elem.tag + if isinstance(tag, QName): + if tag.text not in qnames: + add_qname(tag.text) + elif isinstance(tag, str): + if tag not in qnames: + add_qname(tag) + elif tag is not None and tag is not Comment and tag is not PI: + _raise_serialization_error(tag) + for key, value in elem.items(): + if isinstance(key, QName): + key = key.text + if key not in qnames: + add_qname(key) + if isinstance(value, QName) and value.text not in qnames: + add_qname(value.text) + text = elem.text + if isinstance(text, QName) and text.text not in qnames: + add_qname(text.text) + return qnames, namespaces + + def _serialize_xml(write, elem, qnames, namespaces, **kwargs): + tag = elem.tag + text = elem.text + if tag is Comment: + write("" % _tounicode(text)) + elif tag is ProcessingInstruction: + write("" % _tounicode(text)) + else: + tag = qnames[_tounicode(tag) if tag is not None else None] + if tag is None: + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None) + else: + write("<" + tag) + if namespaces: + for uri, prefix in sorted( + namespaces.items(), key=lambda x: x[1] + ): # sort on prefix + if prefix: + prefix = ":" + prefix + write(' xmlns%s="%s"' % (prefix, _escape_attrib(uri))) + attrs = elem.attrib + if attrs: + # try to keep existing attrib order + if len(attrs) <= 1 or type(attrs) is _Attrib: + items = attrs.items() + else: + # if plain dict, use lexical order + items = sorted(attrs.items()) + for k, v in items: + if isinstance(k, QName): + k = _tounicode(k.text) + else: + k = _tounicode(k) + if isinstance(v, QName): + v = qnames[_tounicode(v.text)] + else: + v = _escape_attrib(v) + write(' %s="%s"' % (qnames[k], v)) + if text is not None or len(elem): + write(">") + if text: + write(_escape_cdata(text)) + for e in elem: + _serialize_xml(write, e, qnames, None) + write("") + else: + write("/>") + if elem.tail: + write(_escape_cdata(elem.tail)) + + def _raise_serialization_error(text): + raise TypeError( + "cannot serialize %r (type %s)" % (text, type(text).__name__) + ) + + def _escape_cdata(text): + # escape character data + try: + text = _tounicode(text) + # it's worth avoiding do-nothing calls for short strings + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + def _escape_attrib(text): + # escape attribute value + try: + text = _tounicode(text) + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if '"' in text: + text = text.replace('"', """) + if "\n" in text: + text = text.replace("\n", " ") + return text + except (TypeError, AttributeError): + _raise_serialization_error(text) + + def _indent(elem, level=0): + # From http://effbot.org/zone/element-lib.htm#prettyprint + i = "\n" + level * " " + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + _indent(elem, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/filenames.py b/.venv/lib/python3.9/site-packages/fontTools/misc/filenames.py new file mode 100644 index 00000000..0f010008 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/filenames.py @@ -0,0 +1,242 @@ +""" +This module implements the algorithm for converting between a "user name" - +something that a user can choose arbitrarily inside a font editor - and a file +name suitable for use in a wide range of operating systems and filesystems. + +The `UFO 3 specification `_ +provides an example of an algorithm for such conversion, which avoids illegal +characters, reserved file names, ambiguity between upper- and lower-case +characters, and clashes with existing files. + +This code was originally copied from +`ufoLib `_ +by Tal Leming and is copyright (c) 2005-2016, The RoboFab Developers: + +- Erik van Blokland +- Tal Leming +- Just van Rossum +""" + + +illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ") +illegalCharacters += [chr(i) for i in range(1, 32)] +illegalCharacters += [chr(0x7F)] +reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ") +reservedFileNames += "LPT1 LPT2 LPT3 COM2 COM3 COM4".lower().split(" ") +maxFileNameLength = 255 + + +class NameTranslationError(Exception): + pass + + +def userNameToFileName(userName, existing=[], prefix="", suffix=""): + """Converts from a user name to a file name. + + Takes care to avoid illegal characters, reserved file names, ambiguity between + upper- and lower-case characters, and clashes with existing files. + + Args: + userName (str): The input file name. + existing: A case-insensitive list of all existing file names. + prefix: Prefix to be prepended to the file name. + suffix: Suffix to be appended to the file name. + + Returns: + A suitable filename. + + Raises: + NameTranslationError: If no suitable name could be generated. + + Examples:: + + >>> userNameToFileName("a") == "a" + True + >>> userNameToFileName("A") == "A_" + True + >>> userNameToFileName("AE") == "A_E_" + True + >>> userNameToFileName("Ae") == "A_e" + True + >>> userNameToFileName("ae") == "ae" + True + >>> userNameToFileName("aE") == "aE_" + True + >>> userNameToFileName("a.alt") == "a.alt" + True + >>> userNameToFileName("A.alt") == "A_.alt" + True + >>> userNameToFileName("A.Alt") == "A_.A_lt" + True + >>> userNameToFileName("A.aLt") == "A_.aL_t" + True + >>> userNameToFileName(u"A.alT") == "A_.alT_" + True + >>> userNameToFileName("T_H") == "T__H_" + True + >>> userNameToFileName("T_h") == "T__h" + True + >>> userNameToFileName("t_h") == "t_h" + True + >>> userNameToFileName("F_F_I") == "F__F__I_" + True + >>> userNameToFileName("f_f_i") == "f_f_i" + True + >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash" + True + >>> userNameToFileName(".notdef") == "_notdef" + True + >>> userNameToFileName("con") == "_con" + True + >>> userNameToFileName("CON") == "C_O_N_" + True + >>> userNameToFileName("con.alt") == "_con.alt" + True + >>> userNameToFileName("alt.con") == "alt._con" + True + """ + # the incoming name must be a str + if not isinstance(userName, str): + raise ValueError("The value for userName must be a string.") + # establish the prefix and suffix lengths + prefixLength = len(prefix) + suffixLength = len(suffix) + # replace an initial period with an _ + # if no prefix is to be added + if not prefix and userName[0] == ".": + userName = "_" + userName[1:] + # filter the user name + filteredUserName = [] + for character in userName: + # replace illegal characters with _ + if character in illegalCharacters: + character = "_" + # add _ to all non-lower characters + elif character != character.lower(): + character += "_" + filteredUserName.append(character) + userName = "".join(filteredUserName) + # clip to 255 + sliceLength = maxFileNameLength - prefixLength - suffixLength + userName = userName[:sliceLength] + # test for illegal files names + parts = [] + for part in userName.split("."): + if part.lower() in reservedFileNames: + part = "_" + part + parts.append(part) + userName = ".".join(parts) + # test for clash + fullName = prefix + userName + suffix + if fullName.lower() in existing: + fullName = handleClash1(userName, existing, prefix, suffix) + # finished + return fullName + +def handleClash1(userName, existing=[], prefix="", suffix=""): + """ + existing should be a case-insensitive list + of all existing file names. + + >>> prefix = ("0" * 5) + "." + >>> suffix = "." + ("0" * 10) + >>> existing = ["a" * 5] + + >>> e = list(existing) + >>> handleClash1(userName="A" * 5, existing=e, + ... prefix=prefix, suffix=suffix) == ( + ... '00000.AAAAA000000000000001.0000000000') + True + + >>> e = list(existing) + >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix) + >>> handleClash1(userName="A" * 5, existing=e, + ... prefix=prefix, suffix=suffix) == ( + ... '00000.AAAAA000000000000002.0000000000') + True + + >>> e = list(existing) + >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix) + >>> handleClash1(userName="A" * 5, existing=e, + ... prefix=prefix, suffix=suffix) == ( + ... '00000.AAAAA000000000000001.0000000000') + True + """ + # if the prefix length + user name length + suffix length + 15 is at + # or past the maximum length, silce 15 characters off of the user name + prefixLength = len(prefix) + suffixLength = len(suffix) + if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength: + l = (prefixLength + len(userName) + suffixLength + 15) + sliceLength = maxFileNameLength - l + userName = userName[:sliceLength] + finalName = None + # try to add numbers to create a unique name + counter = 1 + while finalName is None: + name = userName + str(counter).zfill(15) + fullName = prefix + name + suffix + if fullName.lower() not in existing: + finalName = fullName + break + else: + counter += 1 + if counter >= 999999999999999: + break + # if there is a clash, go to the next fallback + if finalName is None: + finalName = handleClash2(existing, prefix, suffix) + # finished + return finalName + +def handleClash2(existing=[], prefix="", suffix=""): + """ + existing should be a case-insensitive list + of all existing file names. + + >>> prefix = ("0" * 5) + "." + >>> suffix = "." + ("0" * 10) + >>> existing = [prefix + str(i) + suffix for i in range(100)] + + >>> e = list(existing) + >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( + ... '00000.100.0000000000') + True + + >>> e = list(existing) + >>> e.remove(prefix + "1" + suffix) + >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( + ... '00000.1.0000000000') + True + + >>> e = list(existing) + >>> e.remove(prefix + "2" + suffix) + >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( + ... '00000.2.0000000000') + True + """ + # calculate the longest possible string + maxLength = maxFileNameLength - len(prefix) - len(suffix) + maxValue = int("9" * maxLength) + # try to find a number + finalName = None + counter = 1 + while finalName is None: + fullName = prefix + str(counter) + suffix + if fullName.lower() not in existing: + finalName = fullName + break + else: + counter += 1 + if counter >= maxValue: + break + # raise an error if nothing has been found + if finalName is None: + raise NameTranslationError("No unique name could be found.") + # finished + return finalName + +if __name__ == "__main__": + import doctest + import sys + sys.exit(doctest.testmod().failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/fixedTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/fixedTools.py new file mode 100644 index 00000000..f87e3322 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/fixedTools.py @@ -0,0 +1,251 @@ +""" +The `OpenType specification `_ +defines two fixed-point data types: + +``Fixed`` + A 32-bit signed fixed-point number with a 16 bit twos-complement + magnitude component and 16 fractional bits. +``F2DOT14`` + A 16-bit signed fixed-point number with a 2 bit twos-complement + magnitude component and 14 fractional bits. + +To support reading and writing data with these data types, this module provides +functions for converting between fixed-point, float and string representations. + +.. data:: MAX_F2DOT14 + + The maximum value that can still fit in an F2Dot14. (1.99993896484375) +""" + +from .roundTools import otRound, nearestMultipleShortestRepr +import logging + +log = logging.getLogger(__name__) + +__all__ = [ + "MAX_F2DOT14", + "fixedToFloat", + "floatToFixed", + "floatToFixedToFloat", + "floatToFixedToStr", + "fixedToStr", + "strToFixed", + "strToFixedToFloat", + "ensureVersionIsLong", + "versionToFixed", +] + + +MAX_F2DOT14 = 0x7FFF / (1 << 14) + + +def fixedToFloat(value, precisionBits): + """Converts a fixed-point number to a float given the number of + precision bits. + + Args: + value (int): Number in fixed-point format. + precisionBits (int): Number of precision bits. + + Returns: + Floating point value. + + Examples:: + + >>> import math + >>> f = fixedToFloat(-10139, precisionBits=14) + >>> math.isclose(f, -0.61883544921875) + True + """ + return value / (1 << precisionBits) + + +def floatToFixed(value, precisionBits): + """Converts a float to a fixed-point number given the number of + precision bits. + + Args: + value (float): Floating point value. + precisionBits (int): Number of precision bits. + + Returns: + int: Fixed-point representation. + + Examples:: + + >>> floatToFixed(-0.61883544921875, precisionBits=14) + -10139 + >>> floatToFixed(-0.61884, precisionBits=14) + -10139 + """ + return otRound(value * (1 << precisionBits)) + + +def floatToFixedToFloat(value, precisionBits): + """Converts a float to a fixed-point number and back again. + + By converting the float to fixed, rounding it, and converting it back + to float again, this returns a floating point values which is exactly + representable in fixed-point format. + + Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``. + + Args: + value (float): The input floating point value. + precisionBits (int): Number of precision bits. + + Returns: + float: The transformed and rounded value. + + Examples:: + >>> import math + >>> f1 = -0.61884 + >>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14) + >>> f1 != f2 + True + >>> math.isclose(f2, -0.61883544921875) + True + """ + scale = 1 << precisionBits + return otRound(value * scale) / scale + + +def fixedToStr(value, precisionBits): + """Converts a fixed-point number to a string representing a decimal float. + + This chooses the float that has the shortest decimal representation (the least + number of fractional decimal digits). + + For example, to convert a fixed-point number in a 2.14 format, use + ``precisionBits=14``:: + + >>> fixedToStr(-10139, precisionBits=14) + '-0.61884' + + This is pretty slow compared to the simple division used in ``fixedToFloat``. + Use sporadically when you need to serialize or print the fixed-point number in + a human-readable form. + It uses nearestMultipleShortestRepr under the hood. + + Args: + value (int): The fixed-point value to convert. + precisionBits (int): Number of precision bits, *up to a maximum of 16*. + + Returns: + str: A string representation of the value. + """ + scale = 1 << precisionBits + return nearestMultipleShortestRepr(value/scale, factor=1.0/scale) + + +def strToFixed(string, precisionBits): + """Converts a string representing a decimal float to a fixed-point number. + + Args: + string (str): A string representing a decimal float. + precisionBits (int): Number of precision bits, *up to a maximum of 16*. + + Returns: + int: Fixed-point representation. + + Examples:: + + >>> ## to convert a float string to a 2.14 fixed-point number: + >>> strToFixed('-0.61884', precisionBits=14) + -10139 + """ + value = float(string) + return otRound(value * (1 << precisionBits)) + + +def strToFixedToFloat(string, precisionBits): + """Convert a string to a decimal float with fixed-point rounding. + + This first converts string to a float, then turns it into a fixed-point + number with ``precisionBits`` fractional binary digits, then back to a + float again. + + This is simply a shorthand for fixedToFloat(floatToFixed(float(s))). + + Args: + string (str): A string representing a decimal float. + precisionBits (int): Number of precision bits. + + Returns: + float: The transformed and rounded value. + + Examples:: + + >>> import math + >>> s = '-0.61884' + >>> bits = 14 + >>> f = strToFixedToFloat(s, precisionBits=bits) + >>> math.isclose(f, -0.61883544921875) + True + >>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits) + True + """ + value = float(string) + scale = 1 << precisionBits + return otRound(value * scale) / scale + + +def floatToFixedToStr(value, precisionBits): + """Convert float to string with fixed-point rounding. + + This uses the shortest decimal representation (ie. the least + number of fractional decimal digits) to represent the equivalent + fixed-point number with ``precisionBits`` fractional binary digits. + It uses nearestMultipleShortestRepr under the hood. + + >>> floatToFixedToStr(-0.61883544921875, precisionBits=14) + '-0.61884' + + Args: + value (float): The float value to convert. + precisionBits (int): Number of precision bits, *up to a maximum of 16*. + + Returns: + str: A string representation of the value. + + """ + scale = 1 << precisionBits + return nearestMultipleShortestRepr(value, factor=1.0/scale) + + +def ensureVersionIsLong(value): + """Ensure a table version is an unsigned long. + + OpenType table version numbers are expressed as a single unsigned long + comprising of an unsigned short major version and unsigned short minor + version. This function detects if the value to be used as a version number + looks too small (i.e. is less than ``0x10000``), and converts it to + fixed-point using :func:`floatToFixed` if so. + + Args: + value (Number): a candidate table version number. + + Returns: + int: A table version number, possibly corrected to fixed-point. + """ + if value < 0x10000: + newValue = floatToFixed(value, 16) + log.warning( + "Table version value is a float: %.4f; " + "fix to use hex instead: 0x%08x", value, newValue) + value = newValue + return value + + +def versionToFixed(value): + """Ensure a table version number is fixed-point. + + Args: + value (str): a candidate table version number. + + Returns: + int: A table version number, possibly corrected to fixed-point. + """ + value = int(value, 0) if value.startswith("0") else float(value) + value = ensureVersionIsLong(value) + return value diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/intTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/intTools.py new file mode 100644 index 00000000..6ba03e16 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/intTools.py @@ -0,0 +1,25 @@ +__all__ = ["popCount"] + + +try: + bit_count = int.bit_count +except AttributeError: + + def bit_count(v): + return bin(v).count("1") + + +"""Return number of 1 bits (population count) of the absolute value of an integer. + +See https://docs.python.org/3.10/library/stdtypes.html#int.bit_count +""" +popCount = bit_count + + +def bit_indices(v): + """Return list of indices where bits are set, 0 being the index of the least significant bit. + + >>> bit_indices(0b101) + [0, 2] + """ + return [i for i, b in enumerate(bin(v)[::-1]) if b == "1"] diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/loggingTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/loggingTools.py new file mode 100644 index 00000000..d1baa839 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/loggingTools.py @@ -0,0 +1,536 @@ +import sys +import logging +import timeit +from functools import wraps +from collections.abc import Mapping, Callable +import warnings +from logging import PercentStyle + + +# default logging level used by Timer class +TIME_LEVEL = logging.DEBUG + +# per-level format strings used by the default formatter +# (the level name is not printed for INFO and DEBUG messages) +DEFAULT_FORMATS = { + "*": "%(levelname)s: %(message)s", + "INFO": "%(message)s", + "DEBUG": "%(message)s", + } + + +class LevelFormatter(logging.Formatter): + """Log formatter with level-specific formatting. + + Formatter class which optionally takes a dict of logging levels to + format strings, allowing to customise the log records appearance for + specific levels. + + + Attributes: + fmt: A dictionary mapping logging levels to format strings. + The ``*`` key identifies the default format string. + datefmt: As per py:class:`logging.Formatter` + style: As per py:class:`logging.Formatter` + + >>> import sys + >>> handler = logging.StreamHandler(sys.stdout) + >>> formatter = LevelFormatter( + ... fmt={ + ... '*': '[%(levelname)s] %(message)s', + ... 'DEBUG': '%(name)s [%(levelname)s] %(message)s', + ... 'INFO': '%(message)s', + ... }) + >>> handler.setFormatter(formatter) + >>> log = logging.getLogger('test') + >>> log.setLevel(logging.DEBUG) + >>> log.addHandler(handler) + >>> log.debug('this uses a custom format string') + test [DEBUG] this uses a custom format string + >>> log.info('this also uses a custom format string') + this also uses a custom format string + >>> log.warning("this one uses the default format string") + [WARNING] this one uses the default format string + """ + + def __init__(self, fmt=None, datefmt=None, style="%"): + if style != '%': + raise ValueError( + "only '%' percent style is supported in both python 2 and 3") + if fmt is None: + fmt = DEFAULT_FORMATS + if isinstance(fmt, str): + default_format = fmt + custom_formats = {} + elif isinstance(fmt, Mapping): + custom_formats = dict(fmt) + default_format = custom_formats.pop("*", None) + else: + raise TypeError('fmt must be a str or a dict of str: %r' % fmt) + super(LevelFormatter, self).__init__(default_format, datefmt) + self.default_format = self._fmt + self.custom_formats = {} + for level, fmt in custom_formats.items(): + level = logging._checkLevel(level) + self.custom_formats[level] = fmt + + def format(self, record): + if self.custom_formats: + fmt = self.custom_formats.get(record.levelno, self.default_format) + if self._fmt != fmt: + self._fmt = fmt + # for python >= 3.2, _style needs to be set if _fmt changes + if PercentStyle: + self._style = PercentStyle(fmt) + return super(LevelFormatter, self).format(record) + + +def configLogger(**kwargs): + """A more sophisticated logging system configuation manager. + + This is more or less the same as :py:func:`logging.basicConfig`, + with some additional options and defaults. + + The default behaviour is to create a ``StreamHandler`` which writes to + sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add + the handler to the top-level library logger ("fontTools"). + + A number of optional keyword arguments may be specified, which can alter + the default behaviour. + + Args: + + logger: Specifies the logger name or a Logger instance to be + configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``, + this function can be called multiple times to reconfigure a logger. + If the logger or any of its children already exists before the call is + made, they will be reset before the new configuration is applied. + filename: Specifies that a ``FileHandler`` be created, using the + specified filename, rather than a ``StreamHandler``. + filemode: Specifies the mode to open the file, if filename is + specified. (If filemode is unspecified, it defaults to ``a``). + format: Use the specified format string for the handler. This + argument also accepts a dictionary of format strings keyed by + level name, to allow customising the records appearance for + specific levels. The special ``'*'`` key is for 'any other' level. + datefmt: Use the specified date/time format. + level: Set the logger level to the specified level. + stream: Use the specified stream to initialize the StreamHandler. Note + that this argument is incompatible with ``filename`` - if both + are present, ``stream`` is ignored. + handlers: If specified, this should be an iterable of already created + handlers, which will be added to the logger. Any handler in the + list which does not have a formatter assigned will be assigned the + formatter created in this function. + filters: If specified, this should be an iterable of already created + filters. If the ``handlers`` do not already have filters assigned, + these filters will be added to them. + propagate: All loggers have a ``propagate`` attribute which determines + whether to continue searching for handlers up the logging hierarchy. + If not provided, the "propagate" attribute will be set to ``False``. + """ + # using kwargs to enforce keyword-only arguments in py2. + handlers = kwargs.pop("handlers", None) + if handlers is None: + if "stream" in kwargs and "filename" in kwargs: + raise ValueError("'stream' and 'filename' should not be " + "specified together") + else: + if "stream" in kwargs or "filename" in kwargs: + raise ValueError("'stream' or 'filename' should not be " + "specified together with 'handlers'") + if handlers is None: + filename = kwargs.pop("filename", None) + mode = kwargs.pop("filemode", 'a') + if filename: + h = logging.FileHandler(filename, mode) + else: + stream = kwargs.pop("stream", None) + h = logging.StreamHandler(stream) + handlers = [h] + # By default, the top-level library logger is configured. + logger = kwargs.pop("logger", "fontTools") + if not logger or isinstance(logger, str): + # empty "" or None means the 'root' logger + logger = logging.getLogger(logger) + # before (re)configuring, reset named logger and its children (if exist) + _resetExistingLoggers(parent=logger.name) + # use DEFAULT_FORMATS if 'format' is None + fs = kwargs.pop("format", None) + dfs = kwargs.pop("datefmt", None) + # XXX: '%' is the only format style supported on both py2 and 3 + style = kwargs.pop("style", '%') + fmt = LevelFormatter(fs, dfs, style) + filters = kwargs.pop("filters", []) + for h in handlers: + if h.formatter is None: + h.setFormatter(fmt) + if not h.filters: + for f in filters: + h.addFilter(f) + logger.addHandler(h) + if logger.name != "root": + # stop searching up the hierarchy for handlers + logger.propagate = kwargs.pop("propagate", False) + # set a custom severity level + level = kwargs.pop("level", None) + if level is not None: + logger.setLevel(level) + if kwargs: + keys = ', '.join(kwargs.keys()) + raise ValueError('Unrecognised argument(s): %s' % keys) + + +def _resetExistingLoggers(parent="root"): + """ Reset the logger named 'parent' and all its children to their initial + state, if they already exist in the current configuration. + """ + root = logging.root + # get sorted list of all existing loggers + existing = sorted(root.manager.loggerDict.keys()) + if parent == "root": + # all the existing loggers are children of 'root' + loggers_to_reset = [parent] + existing + elif parent not in existing: + # nothing to do + return + elif parent in existing: + loggers_to_reset = [parent] + # collect children, starting with the entry after parent name + i = existing.index(parent) + 1 + prefixed = parent + "." + pflen = len(prefixed) + num_existing = len(existing) + while i < num_existing: + if existing[i][:pflen] == prefixed: + loggers_to_reset.append(existing[i]) + i += 1 + for name in loggers_to_reset: + if name == "root": + root.setLevel(logging.WARNING) + for h in root.handlers[:]: + root.removeHandler(h) + for f in root.filters[:]: + root.removeFilters(f) + root.disabled = False + else: + logger = root.manager.loggerDict[name] + logger.level = logging.NOTSET + logger.handlers = [] + logger.filters = [] + logger.propagate = True + logger.disabled = False + + +class Timer(object): + """ Keeps track of overall time and split/lap times. + + >>> import time + >>> timer = Timer() + >>> time.sleep(0.01) + >>> print("First lap:", timer.split()) + First lap: ... + >>> time.sleep(0.02) + >>> print("Second lap:", timer.split()) + Second lap: ... + >>> print("Overall time:", timer.time()) + Overall time: ... + + Can be used as a context manager inside with-statements. + + >>> with Timer() as t: + ... time.sleep(0.01) + >>> print("%0.3f seconds" % t.elapsed) + 0... seconds + + If initialised with a logger, it can log the elapsed time automatically + upon exiting the with-statement. + + >>> import logging + >>> log = logging.getLogger("my-fancy-timer-logger") + >>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout) + >>> with Timer(log, 'do something'): + ... time.sleep(0.01) + Took ... to do something + + The same Timer instance, holding a reference to a logger, can be reused + in multiple with-statements, optionally with different messages or levels. + + >>> timer = Timer(log) + >>> with timer(): + ... time.sleep(0.01) + elapsed time: ...s + >>> with timer('redo it', level=logging.INFO): + ... time.sleep(0.02) + Took ... to redo it + + It can also be used as a function decorator to log the time elapsed to run + the decorated function. + + >>> @timer() + ... def test1(): + ... time.sleep(0.01) + >>> @timer('run test 2', level=logging.INFO) + ... def test2(): + ... time.sleep(0.02) + >>> test1() + Took ... to run 'test1' + >>> test2() + Took ... to run test 2 + """ + + # timeit.default_timer choses the most accurate clock for each platform + _time = timeit.default_timer + default_msg = "elapsed time: %(time).3fs" + default_format = "Took %(time).3fs to %(msg)s" + + def __init__(self, logger=None, msg=None, level=None, start=None): + self.reset(start) + if logger is None: + for arg in ('msg', 'level'): + if locals().get(arg) is not None: + raise ValueError( + "'%s' can't be specified without a 'logger'" % arg) + self.logger = logger + self.level = level if level is not None else TIME_LEVEL + self.msg = msg + + def reset(self, start=None): + """ Reset timer to 'start_time' or the current time. """ + if start is None: + self.start = self._time() + else: + self.start = start + self.last = self.start + self.elapsed = 0.0 + + def time(self): + """ Return the overall time (in seconds) since the timer started. """ + return self._time() - self.start + + def split(self): + """ Split and return the lap time (in seconds) in between splits. """ + current = self._time() + self.elapsed = current - self.last + self.last = current + return self.elapsed + + def formatTime(self, msg, time): + """ Format 'time' value in 'msg' and return formatted string. + If 'msg' contains a '%(time)' format string, try to use that. + Otherwise, use the predefined 'default_format'. + If 'msg' is empty or None, fall back to 'default_msg'. + """ + if not msg: + msg = self.default_msg + if msg.find("%(time)") < 0: + msg = self.default_format % {"msg": msg, "time": time} + else: + try: + msg = msg % {"time": time} + except (KeyError, ValueError): + pass # skip if the format string is malformed + return msg + + def __enter__(self): + """ Start a new lap """ + self.last = self._time() + self.elapsed = 0.0 + return self + + def __exit__(self, exc_type, exc_value, traceback): + """ End the current lap. If timer has a logger, log the time elapsed, + using the format string in self.msg (or the default one). + """ + time = self.split() + if self.logger is None or exc_type: + # if there's no logger attached, or if any exception occurred in + # the with-statement, exit without logging the time + return + message = self.formatTime(self.msg, time) + # Allow log handlers to see the individual parts to facilitate things + # like a server accumulating aggregate stats. + msg_parts = { 'msg': self.msg, 'time': time } + self.logger.log(self.level, message, msg_parts) + + def __call__(self, func_or_msg=None, **kwargs): + """ If the first argument is a function, return a decorator which runs + the wrapped function inside Timer's context manager. + Otherwise, treat the first argument as a 'msg' string and return an updated + Timer instance, referencing the same logger. + A 'level' keyword can also be passed to override self.level. + """ + if isinstance(func_or_msg, Callable): + func = func_or_msg + # use the function name when no explicit 'msg' is provided + if not self.msg: + self.msg = "run '%s'" % func.__name__ + + @wraps(func) + def wrapper(*args, **kwds): + with self: + return func(*args, **kwds) + return wrapper + else: + msg = func_or_msg or kwargs.get("msg") + level = kwargs.get("level", self.level) + return self.__class__(self.logger, msg, level) + + def __float__(self): + return self.elapsed + + def __int__(self): + return int(self.elapsed) + + def __str__(self): + return "%.3f" % self.elapsed + + +class ChannelsFilter(logging.Filter): + """Provides a hierarchical filter for log entries based on channel names. + + Filters out records emitted from a list of enabled channel names, + including their children. It works the same as the ``logging.Filter`` + class, but allows the user to specify multiple channel names. + + >>> import sys + >>> handler = logging.StreamHandler(sys.stdout) + >>> handler.setFormatter(logging.Formatter("%(message)s")) + >>> filter = ChannelsFilter("A.B", "C.D") + >>> handler.addFilter(filter) + >>> root = logging.getLogger() + >>> root.addHandler(handler) + >>> root.setLevel(level=logging.DEBUG) + >>> logging.getLogger('A.B').debug('this record passes through') + this record passes through + >>> logging.getLogger('A.B.C').debug('records from children also pass') + records from children also pass + >>> logging.getLogger('C.D').debug('this one as well') + this one as well + >>> logging.getLogger('A.B.').debug('also this one') + also this one + >>> logging.getLogger('A.F').debug('but this one does not!') + >>> logging.getLogger('C.DE').debug('neither this one!') + """ + + def __init__(self, *names): + self.names = names + self.num = len(names) + self.lengths = {n: len(n) for n in names} + + def filter(self, record): + if self.num == 0: + return True + for name in self.names: + nlen = self.lengths[name] + if name == record.name: + return True + elif (record.name.find(name, 0, nlen) == 0 + and record.name[nlen] == "."): + return True + return False + + +class CapturingLogHandler(logging.Handler): + def __init__(self, logger, level): + super(CapturingLogHandler, self).__init__(level=level) + self.records = [] + if isinstance(logger, str): + self.logger = logging.getLogger(logger) + else: + self.logger = logger + + def __enter__(self): + self.original_disabled = self.logger.disabled + self.original_level = self.logger.level + self.original_propagate = self.logger.propagate + + self.logger.addHandler(self) + self.logger.setLevel(self.level) + self.logger.disabled = False + self.logger.propagate = False + + return self + + def __exit__(self, type, value, traceback): + self.logger.removeHandler(self) + self.logger.setLevel(self.original_level) + self.logger.disabled = self.original_disabled + self.logger.propagate = self.original_propagate + + return self + + def emit(self, record): + self.records.append(record) + + def assertRegex(self, regexp, msg=None): + import re + pattern = re.compile(regexp) + for r in self.records: + if pattern.search(r.getMessage()): + return True + if msg is None: + msg = "Pattern '%s' not found in logger records" % regexp + assert 0, msg + + +class LogMixin(object): + """ Mixin class that adds logging functionality to another class. + + You can define a new class that subclasses from ``LogMixin`` as well as + other base classes through multiple inheritance. + All instances of that class will have a ``log`` property that returns + a ``logging.Logger`` named after their respective ``.``. + + For example: + + >>> class BaseClass(object): + ... pass + >>> class MyClass(LogMixin, BaseClass): + ... pass + >>> a = MyClass() + >>> isinstance(a.log, logging.Logger) + True + >>> print(a.log.name) + fontTools.misc.loggingTools.MyClass + >>> class AnotherClass(MyClass): + ... pass + >>> b = AnotherClass() + >>> isinstance(b.log, logging.Logger) + True + >>> print(b.log.name) + fontTools.misc.loggingTools.AnotherClass + """ + + @property + def log(self): + if not hasattr(self, "_log"): + name = ".".join( + (self.__class__.__module__, self.__class__.__name__) + ) + self._log = logging.getLogger(name) + return self._log + + +def deprecateArgument(name, msg, category=UserWarning): + """ Raise a warning about deprecated function argument 'name'. """ + warnings.warn( + "%r is deprecated; %s" % (name, msg), category=category, stacklevel=3) + + +def deprecateFunction(msg, category=UserWarning): + """ Decorator to raise a warning when a deprecated function is called. """ + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + warnings.warn( + "%r is deprecated; %s" % (func.__name__, msg), + category=category, stacklevel=2) + return func(*args, **kwargs) + return wrapper + return decorator + + +if __name__ == "__main__": + import doctest + sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/macCreatorType.py b/.venv/lib/python3.9/site-packages/fontTools/misc/macCreatorType.py new file mode 100644 index 00000000..6b191054 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/macCreatorType.py @@ -0,0 +1,54 @@ +from fontTools.misc.textTools import Tag, bytesjoin, strjoin +try: + import xattr +except ImportError: + xattr = None + + +def _reverseString(s): + s = list(s) + s.reverse() + return strjoin(s) + + +def getMacCreatorAndType(path): + """Returns file creator and file type codes for a path. + + Args: + path (str): A file path. + + Returns: + A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first + representing the file creator and the second representing the + file type. + """ + if xattr is not None: + try: + finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo') + except (KeyError, IOError): + pass + else: + fileType = Tag(finderInfo[:4]) + fileCreator = Tag(finderInfo[4:8]) + return fileCreator, fileType + return None, None + + +def setMacCreatorAndType(path, fileCreator, fileType): + """Set file creator and file type codes for a path. + + Note that if the ``xattr`` module is not installed, no action is + taken but no error is raised. + + Args: + path (str): A file path. + fileCreator: A four-character file creator tag. + fileType: A four-character file type tag. + + """ + if xattr is not None: + from fontTools.misc.textTools import pad + if not all(len(s) == 4 for s in (fileCreator, fileType)): + raise TypeError('arg must be string of 4 chars') + finderInfo = pad(bytesjoin([fileType, fileCreator]), 32) + xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/macRes.py b/.venv/lib/python3.9/site-packages/fontTools/misc/macRes.py new file mode 100644 index 00000000..895ca1b8 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/macRes.py @@ -0,0 +1,259 @@ +from io import BytesIO +import struct +from fontTools.misc import sstruct +from fontTools.misc.textTools import bytesjoin, tostr +from collections import OrderedDict +from collections.abc import MutableMapping + + +class ResourceError(Exception): + pass + + +class ResourceReader(MutableMapping): + """Reader for Mac OS resource forks. + + Parses a resource fork and returns resources according to their type. + If run on OS X, this will open the resource fork in the filesystem. + Otherwise, it will open the file itself and attempt to read it as + though it were a resource fork. + + The returned object can be indexed by type and iterated over, + returning in each case a list of py:class:`Resource` objects + representing all the resources of a certain type. + + """ + def __init__(self, fileOrPath): + """Open a file + + Args: + fileOrPath: Either an object supporting a ``read`` method, an + ``os.PathLike`` object, or a string. + """ + self._resources = OrderedDict() + if hasattr(fileOrPath, 'read'): + self.file = fileOrPath + else: + try: + # try reading from the resource fork (only works on OS X) + self.file = self.openResourceFork(fileOrPath) + self._readFile() + return + except (ResourceError, IOError): + # if it fails, use the data fork + self.file = self.openDataFork(fileOrPath) + self._readFile() + + @staticmethod + def openResourceFork(path): + if hasattr(path, "__fspath__"): # support os.PathLike objects + path = path.__fspath__() + with open(path + '/..namedfork/rsrc', 'rb') as resfork: + data = resfork.read() + infile = BytesIO(data) + infile.name = path + return infile + + @staticmethod + def openDataFork(path): + with open(path, 'rb') as datafork: + data = datafork.read() + infile = BytesIO(data) + infile.name = path + return infile + + def _readFile(self): + self._readHeaderAndMap() + self._readTypeList() + + def _read(self, numBytes, offset=None): + if offset is not None: + try: + self.file.seek(offset) + except OverflowError: + raise ResourceError("Failed to seek offset ('offset' is too large)") + if self.file.tell() != offset: + raise ResourceError('Failed to seek offset (reached EOF)') + try: + data = self.file.read(numBytes) + except OverflowError: + raise ResourceError("Cannot read resource ('numBytes' is too large)") + if len(data) != numBytes: + raise ResourceError('Cannot read resource (not enough data)') + return data + + def _readHeaderAndMap(self): + self.file.seek(0) + headerData = self._read(ResourceForkHeaderSize) + sstruct.unpack(ResourceForkHeader, headerData, self) + # seek to resource map, skip reserved + mapOffset = self.mapOffset + 22 + resourceMapData = self._read(ResourceMapHeaderSize, mapOffset) + sstruct.unpack(ResourceMapHeader, resourceMapData, self) + self.absTypeListOffset = self.mapOffset + self.typeListOffset + self.absNameListOffset = self.mapOffset + self.nameListOffset + + def _readTypeList(self): + absTypeListOffset = self.absTypeListOffset + numTypesData = self._read(2, absTypeListOffset) + self.numTypes, = struct.unpack('>H', numTypesData) + absTypeListOffset2 = absTypeListOffset + 2 + for i in range(self.numTypes + 1): + resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i + resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset) + item = sstruct.unpack(ResourceTypeItem, resTypeItemData) + resType = tostr(item['type'], encoding='mac-roman') + refListOffset = absTypeListOffset + item['refListOffset'] + numRes = item['numRes'] + 1 + resources = self._readReferenceList(resType, refListOffset, numRes) + self._resources[resType] = resources + + def _readReferenceList(self, resType, refListOffset, numRes): + resources = [] + for i in range(numRes): + refOffset = refListOffset + ResourceRefItemSize * i + refData = self._read(ResourceRefItemSize, refOffset) + res = Resource(resType) + res.decompile(refData, self) + resources.append(res) + return resources + + def __getitem__(self, resType): + return self._resources[resType] + + def __delitem__(self, resType): + del self._resources[resType] + + def __setitem__(self, resType, resources): + self._resources[resType] = resources + + def __len__(self): + return len(self._resources) + + def __iter__(self): + return iter(self._resources) + + def keys(self): + return self._resources.keys() + + @property + def types(self): + """A list of the types of resources in the resource fork.""" + return list(self._resources.keys()) + + def countResources(self, resType): + """Return the number of resources of a given type.""" + try: + return len(self[resType]) + except KeyError: + return 0 + + def getIndices(self, resType): + """Returns a list of indices of resources of a given type.""" + numRes = self.countResources(resType) + if numRes: + return list(range(1, numRes+1)) + else: + return [] + + def getNames(self, resType): + """Return list of names of all resources of a given type.""" + return [res.name for res in self.get(resType, []) if res.name is not None] + + def getIndResource(self, resType, index): + """Return resource of given type located at an index ranging from 1 + to the number of resources for that type, or None if not found. + """ + if index < 1: + return None + try: + res = self[resType][index-1] + except (KeyError, IndexError): + return None + return res + + def getNamedResource(self, resType, name): + """Return the named resource of given type, else return None.""" + name = tostr(name, encoding='mac-roman') + for res in self.get(resType, []): + if res.name == name: + return res + return None + + def close(self): + if not self.file.closed: + self.file.close() + + +class Resource(object): + """Represents a resource stored within a resource fork. + + Attributes: + type: resource type. + data: resource data. + id: ID. + name: resource name. + attr: attributes. + """ + + def __init__(self, resType=None, resData=None, resID=None, resName=None, + resAttr=None): + self.type = resType + self.data = resData + self.id = resID + self.name = resName + self.attr = resAttr + + def decompile(self, refData, reader): + sstruct.unpack(ResourceRefItem, refData, self) + # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct + self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset])) + absDataOffset = reader.dataOffset + self.dataOffset + dataLength, = struct.unpack(">L", reader._read(4, absDataOffset)) + self.data = reader._read(dataLength) + if self.nameOffset == -1: + return + absNameOffset = reader.absNameListOffset + self.nameOffset + nameLength, = struct.unpack('B', reader._read(1, absNameOffset)) + name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength)) + self.name = tostr(name, encoding='mac-roman') + + +ResourceForkHeader = """ + > # big endian + dataOffset: L + mapOffset: L + dataLen: L + mapLen: L +""" + +ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader) + +ResourceMapHeader = """ + > # big endian + attr: H + typeListOffset: H + nameListOffset: H +""" + +ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader) + +ResourceTypeItem = """ + > # big endian + type: 4s + numRes: H + refListOffset: H +""" + +ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem) + +ResourceRefItem = """ + > # big endian + id: h + nameOffset: h + attr: B + dataOffset: 3s + reserved: L +""" + +ResourceRefItemSize = sstruct.calcsize(ResourceRefItem) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/plistlib/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/misc/plistlib/__init__.py new file mode 100644 index 00000000..04ca5326 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/plistlib/__init__.py @@ -0,0 +1,677 @@ +import collections.abc +import re +from typing import ( + Any, + Callable, + Dict, + List, + Mapping, + MutableMapping, + Optional, + Sequence, + Type, + Union, + IO, +) +import warnings +from io import BytesIO +from datetime import datetime +from base64 import b64encode, b64decode +from numbers import Integral +from types import SimpleNamespace +from functools import singledispatch + +from fontTools.misc import etree + +from fontTools.misc.textTools import tostr + + +# By default, we +# - deserialize elements as bytes and +# - serialize bytes as elements. +# Before, on Python 2, we +# - deserialized elements as plistlib.Data objects, in order to +# distinguish them from the built-in str type (which is bytes on python2) +# - serialized bytes as elements (they must have only contained +# ASCII characters in this case) +# You can pass use_builtin_types=[True|False] to the load/dump etc. functions +# to enforce a specific treatment. +# NOTE that unicode type always maps to element, and plistlib.Data +# always maps to element, regardless of use_builtin_types. +USE_BUILTIN_TYPES = True + +XML_DECLARATION = b"""""" + +PLIST_DOCTYPE = ( + b'' +) + + +# Date should conform to a subset of ISO 8601: +# YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z' +_date_parser = re.compile( + r"(?P\d\d\d\d)" + r"(?:-(?P\d\d)" + r"(?:-(?P\d\d)" + r"(?:T(?P\d\d)" + r"(?::(?P\d\d)" + r"(?::(?P\d\d))" + r"?)?)?)?)?Z", + re.ASCII, +) + + +def _date_from_string(s: str) -> datetime: + order = ("year", "month", "day", "hour", "minute", "second") + m = _date_parser.match(s) + if m is None: + raise ValueError(f"Expected ISO 8601 date string, but got '{s:r}'.") + gd = m.groupdict() + lst = [] + for key in order: + val = gd[key] + if val is None: + break + lst.append(int(val)) + # NOTE: mypy doesn't know that lst is 6 elements long. + return datetime(*lst) # type:ignore + + +def _date_to_string(d: datetime) -> str: + return "%04d-%02d-%02dT%02d:%02d:%02dZ" % ( + d.year, + d.month, + d.day, + d.hour, + d.minute, + d.second, + ) + + +class Data: + """Represents binary data when ``use_builtin_types=False.`` + + This class wraps binary data loaded from a plist file when the + ``use_builtin_types`` argument to the loading function (:py:func:`fromtree`, + :py:func:`load`, :py:func:`loads`) is false. + + The actual binary data is retrieved using the ``data`` attribute. + """ + + def __init__(self, data: bytes) -> None: + if not isinstance(data, bytes): + raise TypeError("Expected bytes, found %s" % type(data).__name__) + self.data = data + + @classmethod + def fromBase64(cls, data: Union[bytes, str]) -> "Data": + return cls(b64decode(data)) + + def asBase64(self, maxlinelength: int = 76, indent_level: int = 1) -> bytes: + return _encode_base64( + self.data, maxlinelength=maxlinelength, indent_level=indent_level + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self.data == other.data + elif isinstance(other, bytes): + return self.data == other + else: + return NotImplemented + + def __repr__(self) -> str: + return "%s(%s)" % (self.__class__.__name__, repr(self.data)) + + +def _encode_base64( + data: bytes, maxlinelength: Optional[int] = 76, indent_level: int = 1 +) -> bytes: + data = b64encode(data) + if data and maxlinelength: + # split into multiple lines right-justified to 'maxlinelength' chars + indent = b"\n" + b" " * indent_level + max_length = max(16, maxlinelength - len(indent)) + chunks = [] + for i in range(0, len(data), max_length): + chunks.append(indent) + chunks.append(data[i : i + max_length]) + chunks.append(indent) + data = b"".join(chunks) + return data + + +# Mypy does not support recursive type aliases as of 0.782, Pylance does. +# https://github.com/python/mypy/issues/731 +# https://devblogs.microsoft.com/python/pylance-introduces-five-new-features-that-enable-type-magic-for-python-developers/#1-support-for-recursive-type-aliases +PlistEncodable = Union[ + bool, + bytes, + Data, + datetime, + float, + int, + Mapping[str, Any], + Sequence[Any], + str, +] + + +class PlistTarget: + """Event handler using the ElementTree Target API that can be + passed to a XMLParser to produce property list objects from XML. + It is based on the CPython plistlib module's _PlistParser class, + but does not use the expat parser. + + >>> from fontTools.misc import etree + >>> parser = etree.XMLParser(target=PlistTarget()) + >>> result = etree.XML( + ... "" + ... " something" + ... " blah" + ... "", + ... parser=parser) + >>> result == {"something": "blah"} + True + + Links: + https://github.com/python/cpython/blob/master/Lib/plistlib.py + http://lxml.de/parsing.html#the-target-parser-interface + """ + + def __init__( + self, + use_builtin_types: Optional[bool] = None, + dict_type: Type[MutableMapping[str, Any]] = dict, + ) -> None: + self.stack: List[PlistEncodable] = [] + self.current_key: Optional[str] = None + self.root: Optional[PlistEncodable] = None + if use_builtin_types is None: + self._use_builtin_types = USE_BUILTIN_TYPES + else: + if use_builtin_types is False: + warnings.warn( + "Setting use_builtin_types to False is deprecated and will be " + "removed soon.", + DeprecationWarning, + ) + self._use_builtin_types = use_builtin_types + self._dict_type = dict_type + + def start(self, tag: str, attrib: Mapping[str, str]) -> None: + self._data: List[str] = [] + handler = _TARGET_START_HANDLERS.get(tag) + if handler is not None: + handler(self) + + def end(self, tag: str) -> None: + handler = _TARGET_END_HANDLERS.get(tag) + if handler is not None: + handler(self) + + def data(self, data: str) -> None: + self._data.append(data) + + def close(self) -> PlistEncodable: + if self.root is None: + raise ValueError("No root set.") + return self.root + + # helpers + + def add_object(self, value: PlistEncodable) -> None: + if self.current_key is not None: + stack_top = self.stack[-1] + if not isinstance(stack_top, collections.abc.MutableMapping): + raise ValueError("unexpected element: %r" % stack_top) + stack_top[self.current_key] = value + self.current_key = None + elif not self.stack: + # this is the root object + self.root = value + else: + stack_top = self.stack[-1] + if not isinstance(stack_top, list): + raise ValueError("unexpected element: %r" % stack_top) + stack_top.append(value) + + def get_data(self) -> str: + data = "".join(self._data) + self._data = [] + return data + + +# event handlers + + +def start_dict(self: PlistTarget) -> None: + d = self._dict_type() + self.add_object(d) + self.stack.append(d) + + +def end_dict(self: PlistTarget) -> None: + if self.current_key: + raise ValueError("missing value for key '%s'" % self.current_key) + self.stack.pop() + + +def end_key(self: PlistTarget) -> None: + if self.current_key or not isinstance(self.stack[-1], collections.abc.Mapping): + raise ValueError("unexpected key") + self.current_key = self.get_data() + + +def start_array(self: PlistTarget) -> None: + a: List[PlistEncodable] = [] + self.add_object(a) + self.stack.append(a) + + +def end_array(self: PlistTarget) -> None: + self.stack.pop() + + +def end_true(self: PlistTarget) -> None: + self.add_object(True) + + +def end_false(self: PlistTarget) -> None: + self.add_object(False) + + +def end_integer(self: PlistTarget) -> None: + self.add_object(int(self.get_data())) + + +def end_real(self: PlistTarget) -> None: + self.add_object(float(self.get_data())) + + +def end_string(self: PlistTarget) -> None: + self.add_object(self.get_data()) + + +def end_data(self: PlistTarget) -> None: + if self._use_builtin_types: + self.add_object(b64decode(self.get_data())) + else: + self.add_object(Data.fromBase64(self.get_data())) + + +def end_date(self: PlistTarget) -> None: + self.add_object(_date_from_string(self.get_data())) + + +_TARGET_START_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = { + "dict": start_dict, + "array": start_array, +} + +_TARGET_END_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = { + "dict": end_dict, + "array": end_array, + "key": end_key, + "true": end_true, + "false": end_false, + "integer": end_integer, + "real": end_real, + "string": end_string, + "data": end_data, + "date": end_date, +} + + +# functions to build element tree from plist data + + +def _string_element(value: str, ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("string") + el.text = value + return el + + +def _bool_element(value: bool, ctx: SimpleNamespace) -> etree.Element: + if value: + return etree.Element("true") + return etree.Element("false") + + +def _integer_element(value: int, ctx: SimpleNamespace) -> etree.Element: + if -1 << 63 <= value < 1 << 64: + el = etree.Element("integer") + el.text = "%d" % value + return el + raise OverflowError(value) + + +def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("real") + el.text = repr(value) + return el + + +def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("dict") + items = d.items() + if ctx.sort_keys: + items = sorted(items) # type: ignore + ctx.indent_level += 1 + for key, value in items: + if not isinstance(key, str): + if ctx.skipkeys: + continue + raise TypeError("keys must be strings") + k = etree.SubElement(el, "key") + k.text = tostr(key, "utf-8") + el.append(_make_element(value, ctx)) + ctx.indent_level -= 1 + return el + + +def _array_element(array: Sequence[PlistEncodable], ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("array") + if len(array) == 0: + return el + ctx.indent_level += 1 + for value in array: + el.append(_make_element(value, ctx)) + ctx.indent_level -= 1 + return el + + +def _date_element(date: datetime, ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("date") + el.text = _date_to_string(date) + return el + + +def _data_element(data: bytes, ctx: SimpleNamespace) -> etree.Element: + el = etree.Element("data") + # NOTE: mypy is confused about whether el.text should be str or bytes. + el.text = _encode_base64( # type: ignore + data, + maxlinelength=(76 if ctx.pretty_print else None), + indent_level=ctx.indent_level, + ) + return el + + +def _string_or_data_element(raw_bytes: bytes, ctx: SimpleNamespace) -> etree.Element: + if ctx.use_builtin_types: + return _data_element(raw_bytes, ctx) + else: + try: + string = raw_bytes.decode(encoding="ascii", errors="strict") + except UnicodeDecodeError: + raise ValueError( + "invalid non-ASCII bytes; use unicode string instead: %r" % raw_bytes + ) + return _string_element(string, ctx) + + +# The following is probably not entirely correct. The signature should take `Any` +# and return `NoReturn`. At the time of this writing, neither mypy nor Pyright +# can deal with singledispatch properly and will apply the signature of the base +# function to all others. Being slightly dishonest makes it type-check and return +# usable typing information for the optimistic case. +@singledispatch +def _make_element(value: PlistEncodable, ctx: SimpleNamespace) -> etree.Element: + raise TypeError("unsupported type: %s" % type(value)) + + +_make_element.register(str)(_string_element) +_make_element.register(bool)(_bool_element) +_make_element.register(Integral)(_integer_element) +_make_element.register(float)(_real_element) +_make_element.register(collections.abc.Mapping)(_dict_element) +_make_element.register(list)(_array_element) +_make_element.register(tuple)(_array_element) +_make_element.register(datetime)(_date_element) +_make_element.register(bytes)(_string_or_data_element) +_make_element.register(bytearray)(_data_element) +_make_element.register(Data)(lambda v, ctx: _data_element(v.data, ctx)) + + +# Public functions to create element tree from plist-compatible python +# data structures and viceversa, for use when (de)serializing GLIF xml. + + +def totree( + value: PlistEncodable, + sort_keys: bool = True, + skipkeys: bool = False, + use_builtin_types: Optional[bool] = None, + pretty_print: bool = True, + indent_level: int = 1, +) -> etree.Element: + """Convert a value derived from a plist into an XML tree. + + Args: + value: Any kind of value to be serialized to XML. + sort_keys: Whether keys of dictionaries should be sorted. + skipkeys (bool): Whether to silently skip non-string dictionary + keys. + use_builtin_types (bool): If true, byte strings will be + encoded in Base-64 and wrapped in a ``data`` tag; if + false, they will be either stored as ASCII strings or an + exception raised if they cannot be decoded as such. Defaults + to ``True`` if not present. Deprecated. + pretty_print (bool): Whether to indent the output. + indent_level (int): Level of indentation when serializing. + + Returns: an ``etree`` ``Element`` object. + + Raises: + ``TypeError`` + if non-string dictionary keys are serialized + and ``skipkeys`` is false. + ``ValueError`` + if non-ASCII binary data is present + and `use_builtin_types` is false. + """ + if use_builtin_types is None: + use_builtin_types = USE_BUILTIN_TYPES + else: + use_builtin_types = use_builtin_types + context = SimpleNamespace( + sort_keys=sort_keys, + skipkeys=skipkeys, + use_builtin_types=use_builtin_types, + pretty_print=pretty_print, + indent_level=indent_level, + ) + return _make_element(value, context) + + +def fromtree( + tree: etree.Element, + use_builtin_types: Optional[bool] = None, + dict_type: Type[MutableMapping[str, Any]] = dict, +) -> Any: + """Convert an XML tree to a plist structure. + + Args: + tree: An ``etree`` ``Element``. + use_builtin_types: If True, binary data is deserialized to + bytes strings. If False, it is wrapped in :py:class:`Data` + objects. Defaults to True if not provided. Deprecated. + dict_type: What type to use for dictionaries. + + Returns: An object (usually a dictionary). + """ + target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type) + for action, element in etree.iterwalk(tree, events=("start", "end")): + if action == "start": + target.start(element.tag, element.attrib) + elif action == "end": + # if there are no children, parse the leaf's data + if not len(element): + # always pass str, not None + target.data(element.text or "") + target.end(element.tag) + return target.close() + + +# python3 plistlib API + + +def load( + fp: IO[bytes], + use_builtin_types: Optional[bool] = None, + dict_type: Type[MutableMapping[str, Any]] = dict, +) -> Any: + """Load a plist file into an object. + + Args: + fp: An opened file. + use_builtin_types: If True, binary data is deserialized to + bytes strings. If False, it is wrapped in :py:class:`Data` + objects. Defaults to True if not provided. Deprecated. + dict_type: What type to use for dictionaries. + + Returns: + An object (usually a dictionary) representing the top level of + the plist file. + """ + + if not hasattr(fp, "read"): + raise AttributeError("'%s' object has no attribute 'read'" % type(fp).__name__) + target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type) + parser = etree.XMLParser(target=target) + result = etree.parse(fp, parser=parser) + # lxml returns the target object directly, while ElementTree wraps + # it as the root of an ElementTree object + try: + return result.getroot() + except AttributeError: + return result + + +def loads( + value: bytes, + use_builtin_types: Optional[bool] = None, + dict_type: Type[MutableMapping[str, Any]] = dict, +) -> Any: + """Load a plist file from a string into an object. + + Args: + value: A bytes string containing a plist. + use_builtin_types: If True, binary data is deserialized to + bytes strings. If False, it is wrapped in :py:class:`Data` + objects. Defaults to True if not provided. Deprecated. + dict_type: What type to use for dictionaries. + + Returns: + An object (usually a dictionary) representing the top level of + the plist file. + """ + + fp = BytesIO(value) + return load(fp, use_builtin_types=use_builtin_types, dict_type=dict_type) + + +def dump( + value: PlistEncodable, + fp: IO[bytes], + sort_keys: bool = True, + skipkeys: bool = False, + use_builtin_types: Optional[bool] = None, + pretty_print: bool = True, +) -> None: + """Write a Python object to a plist file. + + Args: + value: An object to write. + fp: A file opened for writing. + sort_keys (bool): Whether keys of dictionaries should be sorted. + skipkeys (bool): Whether to silently skip non-string dictionary + keys. + use_builtin_types (bool): If true, byte strings will be + encoded in Base-64 and wrapped in a ``data`` tag; if + false, they will be either stored as ASCII strings or an + exception raised if they cannot be represented. Defaults + pretty_print (bool): Whether to indent the output. + indent_level (int): Level of indentation when serializing. + + Raises: + ``TypeError`` + if non-string dictionary keys are serialized + and ``skipkeys`` is false. + ``ValueError`` + if non-representable binary data is present + and `use_builtin_types` is false. + """ + + if not hasattr(fp, "write"): + raise AttributeError("'%s' object has no attribute 'write'" % type(fp).__name__) + root = etree.Element("plist", version="1.0") + el = totree( + value, + sort_keys=sort_keys, + skipkeys=skipkeys, + use_builtin_types=use_builtin_types, + pretty_print=pretty_print, + ) + root.append(el) + tree = etree.ElementTree(root) + # we write the doctype ourselves instead of using the 'doctype' argument + # of 'write' method, becuse lxml will force adding a '\n' even when + # pretty_print is False. + if pretty_print: + header = b"\n".join((XML_DECLARATION, PLIST_DOCTYPE, b"")) + else: + header = XML_DECLARATION + PLIST_DOCTYPE + fp.write(header) + tree.write( # type: ignore + fp, + encoding="utf-8", + pretty_print=pretty_print, + xml_declaration=False, + ) + + +def dumps( + value: PlistEncodable, + sort_keys: bool = True, + skipkeys: bool = False, + use_builtin_types: Optional[bool] = None, + pretty_print: bool = True, +) -> bytes: + """Write a Python object to a string in plist format. + + Args: + value: An object to write. + sort_keys (bool): Whether keys of dictionaries should be sorted. + skipkeys (bool): Whether to silently skip non-string dictionary + keys. + use_builtin_types (bool): If true, byte strings will be + encoded in Base-64 and wrapped in a ``data`` tag; if + false, they will be either stored as strings or an + exception raised if they cannot be represented. Defaults + pretty_print (bool): Whether to indent the output. + indent_level (int): Level of indentation when serializing. + + Returns: + string: A plist representation of the Python object. + + Raises: + ``TypeError`` + if non-string dictionary keys are serialized + and ``skipkeys`` is false. + ``ValueError`` + if non-representable binary data is present + and `use_builtin_types` is false. + """ + fp = BytesIO() + dump( + value, + fp, + sort_keys=sort_keys, + skipkeys=skipkeys, + use_builtin_types=use_builtin_types, + pretty_print=pretty_print, + ) + return fp.getvalue() diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/plistlib/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/misc/plistlib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..71e83866 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/misc/plistlib/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/plistlib/py.typed b/.venv/lib/python3.9/site-packages/fontTools/misc/plistlib/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/psCharStrings.py b/.venv/lib/python3.9/site-packages/fontTools/misc/psCharStrings.py new file mode 100644 index 00000000..29c2d365 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/psCharStrings.py @@ -0,0 +1,1304 @@ +"""psCharStrings.py -- module implementing various kinds of CharStrings: +CFF dictionary data and Type1/Type2 CharStrings. +""" + +from fontTools.misc.fixedTools import ( + fixedToFloat, floatToFixed, floatToFixedToStr, strToFixedToFloat, +) +from fontTools.misc.textTools import bytechr, byteord, bytesjoin, strjoin +from fontTools.pens.boundsPen import BoundsPen +import struct +import logging + + +log = logging.getLogger(__name__) + + +def read_operator(self, b0, data, index): + if b0 == 12: + op = (b0, byteord(data[index])) + index = index+1 + else: + op = b0 + try: + operator = self.operators[op] + except KeyError: + return None, index + value = self.handle_operator(operator) + return value, index + +def read_byte(self, b0, data, index): + return b0 - 139, index + +def read_smallInt1(self, b0, data, index): + b1 = byteord(data[index]) + return (b0-247)*256 + b1 + 108, index+1 + +def read_smallInt2(self, b0, data, index): + b1 = byteord(data[index]) + return -(b0-251)*256 - b1 - 108, index+1 + +def read_shortInt(self, b0, data, index): + value, = struct.unpack(">h", data[index:index+2]) + return value, index+2 + +def read_longInt(self, b0, data, index): + value, = struct.unpack(">l", data[index:index+4]) + return value, index+4 + +def read_fixed1616(self, b0, data, index): + value, = struct.unpack(">l", data[index:index+4]) + return fixedToFloat(value, precisionBits=16), index+4 + +def read_reserved(self, b0, data, index): + assert NotImplementedError + return NotImplemented, index + +def read_realNumber(self, b0, data, index): + number = '' + while True: + b = byteord(data[index]) + index = index + 1 + nibble0 = (b & 0xf0) >> 4 + nibble1 = b & 0x0f + if nibble0 == 0xf: + break + number = number + realNibbles[nibble0] + if nibble1 == 0xf: + break + number = number + realNibbles[nibble1] + return float(number), index + + +t1OperandEncoding = [None] * 256 +t1OperandEncoding[0:32] = (32) * [read_operator] +t1OperandEncoding[32:247] = (247 - 32) * [read_byte] +t1OperandEncoding[247:251] = (251 - 247) * [read_smallInt1] +t1OperandEncoding[251:255] = (255 - 251) * [read_smallInt2] +t1OperandEncoding[255] = read_longInt +assert len(t1OperandEncoding) == 256 + +t2OperandEncoding = t1OperandEncoding[:] +t2OperandEncoding[28] = read_shortInt +t2OperandEncoding[255] = read_fixed1616 + +cffDictOperandEncoding = t2OperandEncoding[:] +cffDictOperandEncoding[29] = read_longInt +cffDictOperandEncoding[30] = read_realNumber +cffDictOperandEncoding[255] = read_reserved + + +realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'E', 'E-', None, '-'] +realNibblesDict = {v:i for i,v in enumerate(realNibbles)} + +maxOpStack = 193 + + +def buildOperatorDict(operatorList): + oper = {} + opc = {} + for item in operatorList: + if len(item) == 2: + oper[item[0]] = item[1] + else: + oper[item[0]] = item[1:] + if isinstance(item[0], tuple): + opc[item[1]] = item[0] + else: + opc[item[1]] = (item[0],) + return oper, opc + + +t2Operators = [ +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (10, 'callsubr'), + (11, 'return'), + (14, 'endchar'), + (15, 'vsindex'), + (16, 'blend'), + (18, 'hstemhm'), + (19, 'hintmask'), + (20, 'cntrmask'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (23, 'vstemhm'), + (24, 'rcurveline'), + (25, 'rlinecurve'), + (26, 'vvcurveto'), + (27, 'hhcurveto'), +# (28, 'shortint'), # not really an operator + (29, 'callgsubr'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF + # fonts with this deprecated operator. Just ignore it. + ((12, 3), 'and'), + ((12, 4), 'or'), + ((12, 5), 'not'), + ((12, 8), 'store'), + ((12, 9), 'abs'), + ((12, 10), 'add'), + ((12, 11), 'sub'), + ((12, 12), 'div'), + ((12, 13), 'load'), + ((12, 14), 'neg'), + ((12, 15), 'eq'), + ((12, 18), 'drop'), + ((12, 20), 'put'), + ((12, 21), 'get'), + ((12, 22), 'ifelse'), + ((12, 23), 'random'), + ((12, 24), 'mul'), + ((12, 26), 'sqrt'), + ((12, 27), 'dup'), + ((12, 28), 'exch'), + ((12, 29), 'index'), + ((12, 30), 'roll'), + ((12, 34), 'hflex'), + ((12, 35), 'flex'), + ((12, 36), 'hflex1'), + ((12, 37), 'flex1'), +] + +def getIntEncoder(format): + if format == "cff": + fourByteOp = bytechr(29) + elif format == "t1": + fourByteOp = bytechr(255) + else: + assert format == "t2" + fourByteOp = None + + def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr, + pack=struct.pack, unpack=struct.unpack): + if -107 <= value <= 107: + code = bytechr(value + 139) + elif 108 <= value <= 1131: + value = value - 108 + code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF) + elif -1131 <= value <= -108: + value = -value - 108 + code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF) + elif fourByteOp is None: + # T2 only supports 2 byte ints + if -32768 <= value <= 32767: + code = bytechr(28) + pack(">h", value) + else: + # Backwards compatible hack: due to a previous bug in FontTools, + # 16.16 fixed numbers were written out as 4-byte ints. When + # these numbers were small, they were wrongly written back as + # small ints instead of 4-byte ints, breaking round-tripping. + # This here workaround doesn't do it any better, since we can't + # distinguish anymore between small ints that were supposed to + # be small fixed numbers and small ints that were just small + # ints. Hence the warning. + log.warning("4-byte T2 number got passed to the " + "IntType handler. This should happen only when reading in " + "old XML files.\n") + code = bytechr(255) + pack(">l", value) + else: + code = fourByteOp + pack(">l", value) + return code + + return encodeInt + + +encodeIntCFF = getIntEncoder("cff") +encodeIntT1 = getIntEncoder("t1") +encodeIntT2 = getIntEncoder("t2") + +def encodeFixed(f, pack=struct.pack): + """For T2 only""" + value = floatToFixed(f, precisionBits=16) + if value & 0xFFFF == 0: # check if the fractional part is zero + return encodeIntT2(value >> 16) # encode only the integer part + else: + return b"\xff" + pack(">l", value) # encode the entire fixed point value + + +realZeroBytes = bytechr(30) + bytechr(0xf) + +def encodeFloat(f): + # For CFF only, used in cffLib + if f == 0.0: # 0.0 == +0.0 == -0.0 + return realZeroBytes + # Note: 14 decimal digits seems to be the limitation for CFF real numbers + # in macOS. However, we use 8 here to match the implementation of AFDKO. + s = "%.8G" % f + if s[:2] == "0.": + s = s[1:] + elif s[:3] == "-0.": + s = "-" + s[2:] + nibbles = [] + while s: + c = s[0] + s = s[1:] + if c == "E": + c2 = s[:1] + if c2 == "-": + s = s[1:] + c = "E-" + elif c2 == "+": + s = s[1:] + nibbles.append(realNibblesDict[c]) + nibbles.append(0xf) + if len(nibbles) % 2: + nibbles.append(0xf) + d = bytechr(30) + for i in range(0, len(nibbles), 2): + d = d + bytechr(nibbles[i] << 4 | nibbles[i+1]) + return d + + +class CharStringCompileError(Exception): pass + + +class SimpleT2Decompiler(object): + + def __init__(self, localSubrs, globalSubrs, private=None): + self.localSubrs = localSubrs + self.localBias = calcSubrBias(localSubrs) + self.globalSubrs = globalSubrs + self.globalBias = calcSubrBias(globalSubrs) + self.private = private + self.reset() + + def reset(self): + self.callingStack = [] + self.operandStack = [] + self.hintCount = 0 + self.hintMaskBytes = 0 + self.numRegions = 0 + + def execute(self, charString): + self.callingStack.append(charString) + needsDecompilation = charString.needsDecompilation() + if needsDecompilation: + program = [] + pushToProgram = program.append + else: + pushToProgram = lambda x: None + pushToStack = self.operandStack.append + index = 0 + while True: + token, isOperator, index = charString.getToken(index) + if token is None: + break # we're done! + pushToProgram(token) + if isOperator: + handlerName = "op_" + token + handler = getattr(self, handlerName, None) + if handler is not None: + rv = handler(index) + if rv: + hintMaskBytes, index = rv + pushToProgram(hintMaskBytes) + else: + self.popall() + else: + pushToStack(token) + if needsDecompilation: + charString.setProgram(program) + del self.callingStack[-1] + + def pop(self): + value = self.operandStack[-1] + del self.operandStack[-1] + return value + + def popall(self): + stack = self.operandStack[:] + self.operandStack[:] = [] + return stack + + def push(self, value): + self.operandStack.append(value) + + def op_return(self, index): + if self.operandStack: + pass + + def op_endchar(self, index): + pass + + def op_ignore(self, index): + pass + + def op_callsubr(self, index): + subrIndex = self.pop() + subr = self.localSubrs[subrIndex+self.localBias] + self.execute(subr) + + def op_callgsubr(self, index): + subrIndex = self.pop() + subr = self.globalSubrs[subrIndex+self.globalBias] + self.execute(subr) + + def op_hstem(self, index): + self.countHints() + def op_vstem(self, index): + self.countHints() + def op_hstemhm(self, index): + self.countHints() + def op_vstemhm(self, index): + self.countHints() + + def op_hintmask(self, index): + if not self.hintMaskBytes: + self.countHints() + self.hintMaskBytes = (self.hintCount + 7) // 8 + hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes) + return hintMaskBytes, index + + op_cntrmask = op_hintmask + + def countHints(self): + args = self.popall() + self.hintCount = self.hintCount + len(args) // 2 + + # misc + def op_and(self, index): + raise NotImplementedError + def op_or(self, index): + raise NotImplementedError + def op_not(self, index): + raise NotImplementedError + def op_store(self, index): + raise NotImplementedError + def op_abs(self, index): + raise NotImplementedError + def op_add(self, index): + raise NotImplementedError + def op_sub(self, index): + raise NotImplementedError + def op_div(self, index): + raise NotImplementedError + def op_load(self, index): + raise NotImplementedError + def op_neg(self, index): + raise NotImplementedError + def op_eq(self, index): + raise NotImplementedError + def op_drop(self, index): + raise NotImplementedError + def op_put(self, index): + raise NotImplementedError + def op_get(self, index): + raise NotImplementedError + def op_ifelse(self, index): + raise NotImplementedError + def op_random(self, index): + raise NotImplementedError + def op_mul(self, index): + raise NotImplementedError + def op_sqrt(self, index): + raise NotImplementedError + def op_dup(self, index): + raise NotImplementedError + def op_exch(self, index): + raise NotImplementedError + def op_index(self, index): + raise NotImplementedError + def op_roll(self, index): + raise NotImplementedError + + # TODO(behdad): move to T2OutlineExtractor and add a 'setVariation' + # method that takes VarStoreData and a location + def op_blend(self, index): + if self.numRegions == 0: + self.numRegions = self.private.getNumRegions() + numBlends = self.pop() + numOps = numBlends * (self.numRegions + 1) + del self.operandStack[-(numOps-numBlends):] # Leave the default operands on the stack. + + def op_vsindex(self, index): + vi = self.pop() + self.numRegions = self.private.getNumRegions(vi) + + +t1Operators = [ +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (9, 'closepath'), + (10, 'callsubr'), + (11, 'return'), + (13, 'hsbw'), + (14, 'endchar'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'dotsection'), + ((12, 1), 'vstem3'), + ((12, 2), 'hstem3'), + ((12, 6), 'seac'), + ((12, 7), 'sbw'), + ((12, 12), 'div'), + ((12, 16), 'callothersubr'), + ((12, 17), 'pop'), + ((12, 33), 'setcurrentpoint'), +] + + +class T2WidthExtractor(SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None): + SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private) + self.nominalWidthX = nominalWidthX + self.defaultWidthX = defaultWidthX + + def reset(self): + SimpleT2Decompiler.reset(self) + self.gotWidth = 0 + self.width = 0 + + def popallWidth(self, evenOdd=0): + args = self.popall() + if not self.gotWidth: + if evenOdd ^ (len(args) % 2): + # For CFF2 charstrings, this should never happen + assert self.defaultWidthX is not None, "CFF2 CharStrings must not have an initial width value" + self.width = self.nominalWidthX + args[0] + args = args[1:] + else: + self.width = self.defaultWidthX + self.gotWidth = 1 + return args + + def countHints(self): + args = self.popallWidth() + self.hintCount = self.hintCount + len(args) // 2 + + def op_rmoveto(self, index): + self.popallWidth() + + def op_hmoveto(self, index): + self.popallWidth(1) + + def op_vmoveto(self, index): + self.popallWidth(1) + + def op_endchar(self, index): + self.popallWidth() + + +class T2OutlineExtractor(T2WidthExtractor): + + def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None): + T2WidthExtractor.__init__( + self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private) + self.pen = pen + + def reset(self): + T2WidthExtractor.reset(self) + self.currentPoint = (0, 0) + self.sawMoveTo = 0 + + def _nextPoint(self, point): + x, y = self.currentPoint + point = x + point[0], y + point[1] + self.currentPoint = point + return point + + def rMoveTo(self, point): + self.pen.moveTo(self._nextPoint(point)) + self.sawMoveTo = 1 + + def rLineTo(self, point): + if not self.sawMoveTo: + self.rMoveTo((0, 0)) + self.pen.lineTo(self._nextPoint(point)) + + def rCurveTo(self, pt1, pt2, pt3): + if not self.sawMoveTo: + self.rMoveTo((0, 0)) + nextPoint = self._nextPoint + self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3)) + + def closePath(self): + if self.sawMoveTo: + self.pen.closePath() + self.sawMoveTo = 0 + + def endPath(self): + # In T2 there are no open paths, so always do a closePath when + # finishing a sub path. + self.closePath() + + # + # hint operators + # + #def op_hstem(self, index): + # self.countHints() + #def op_vstem(self, index): + # self.countHints() + #def op_hstemhm(self, index): + # self.countHints() + #def op_vstemhm(self, index): + # self.countHints() + #def op_hintmask(self, index): + # self.countHints() + #def op_cntrmask(self, index): + # self.countHints() + + # + # path constructors, moveto + # + def op_rmoveto(self, index): + self.endPath() + self.rMoveTo(self.popallWidth()) + def op_hmoveto(self, index): + self.endPath() + self.rMoveTo((self.popallWidth(1)[0], 0)) + def op_vmoveto(self, index): + self.endPath() + self.rMoveTo((0, self.popallWidth(1)[0])) + def op_endchar(self, index): + self.endPath() + args = self.popallWidth() + if args: + from fontTools.encodings.StandardEncoding import StandardEncoding + # endchar can do seac accent bulding; The T2 spec says it's deprecated, + # but recent software that shall remain nameless does output it. + adx, ady, bchar, achar = args + baseGlyph = StandardEncoding[bchar] + self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) + accentGlyph = StandardEncoding[achar] + self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) + + # + # path constructors, lines + # + def op_rlineto(self, index): + args = self.popall() + for i in range(0, len(args), 2): + point = args[i:i+2] + self.rLineTo(point) + + def op_hlineto(self, index): + self.alternatingLineto(1) + def op_vlineto(self, index): + self.alternatingLineto(0) + + # + # path constructors, curves + # + def op_rrcurveto(self, index): + """{dxa dya dxb dyb dxc dyc}+ rrcurveto""" + args = self.popall() + for i in range(0, len(args), 6): + dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6] + self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc)) + + def op_rcurveline(self, index): + """{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline""" + args = self.popall() + for i in range(0, len(args)-2, 6): + dxb, dyb, dxc, dyc, dxd, dyd = args[i:i+6] + self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) + self.rLineTo(args[-2:]) + + def op_rlinecurve(self, index): + """{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve""" + args = self.popall() + lineArgs = args[:-6] + for i in range(0, len(lineArgs), 2): + self.rLineTo(lineArgs[i:i+2]) + dxb, dyb, dxc, dyc, dxd, dyd = args[-6:] + self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) + + def op_vvcurveto(self, index): + "dx1? {dya dxb dyb dyc}+ vvcurveto" + args = self.popall() + if len(args) % 2: + dx1 = args[0] + args = args[1:] + else: + dx1 = 0 + for i in range(0, len(args), 4): + dya, dxb, dyb, dyc = args[i:i+4] + self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc)) + dx1 = 0 + + def op_hhcurveto(self, index): + """dy1? {dxa dxb dyb dxc}+ hhcurveto""" + args = self.popall() + if len(args) % 2: + dy1 = args[0] + args = args[1:] + else: + dy1 = 0 + for i in range(0, len(args), 4): + dxa, dxb, dyb, dxc = args[i:i+4] + self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0)) + dy1 = 0 + + def op_vhcurveto(self, index): + """dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30) + {dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto + """ + args = self.popall() + while args: + args = self.vcurveto(args) + if args: + args = self.hcurveto(args) + + def op_hvcurveto(self, index): + """dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf? + {dxa dxb dyb dyc dyd dxe dye dxf}+ dyf? + """ + args = self.popall() + while args: + args = self.hcurveto(args) + if args: + args = self.vcurveto(args) + + # + # path constructors, flex + # + def op_hflex(self, index): + dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall() + dy1 = dy3 = dy4 = dy6 = 0 + dy5 = -dy2 + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + def op_flex(self, index): + dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall() + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + def op_hflex1(self, index): + dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall() + dy3 = dy4 = 0 + dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5) + + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + def op_flex1(self, index): + dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall() + dx = dx1 + dx2 + dx3 + dx4 + dx5 + dy = dy1 + dy2 + dy3 + dy4 + dy5 + if abs(dx) > abs(dy): + dx6 = d6 + dy6 = -dy + else: + dx6 = -dx + dy6 = d6 + self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) + self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) + + # misc + def op_and(self, index): + raise NotImplementedError + def op_or(self, index): + raise NotImplementedError + def op_not(self, index): + raise NotImplementedError + def op_store(self, index): + raise NotImplementedError + def op_abs(self, index): + raise NotImplementedError + def op_add(self, index): + raise NotImplementedError + def op_sub(self, index): + raise NotImplementedError + def op_div(self, index): + num2 = self.pop() + num1 = self.pop() + d1 = num1//num2 + d2 = num1/num2 + if d1 == d2: + self.push(d1) + else: + self.push(d2) + def op_load(self, index): + raise NotImplementedError + def op_neg(self, index): + raise NotImplementedError + def op_eq(self, index): + raise NotImplementedError + def op_drop(self, index): + raise NotImplementedError + def op_put(self, index): + raise NotImplementedError + def op_get(self, index): + raise NotImplementedError + def op_ifelse(self, index): + raise NotImplementedError + def op_random(self, index): + raise NotImplementedError + def op_mul(self, index): + raise NotImplementedError + def op_sqrt(self, index): + raise NotImplementedError + def op_dup(self, index): + raise NotImplementedError + def op_exch(self, index): + raise NotImplementedError + def op_index(self, index): + raise NotImplementedError + def op_roll(self, index): + raise NotImplementedError + + # + # miscellaneous helpers + # + def alternatingLineto(self, isHorizontal): + args = self.popall() + for arg in args: + if isHorizontal: + point = (arg, 0) + else: + point = (0, arg) + self.rLineTo(point) + isHorizontal = not isHorizontal + + def vcurveto(self, args): + dya, dxb, dyb, dxc = args[:4] + args = args[4:] + if len(args) == 1: + dyc = args[0] + args = [] + else: + dyc = 0 + self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc)) + return args + + def hcurveto(self, args): + dxa, dxb, dyb, dyc = args[:4] + args = args[4:] + if len(args) == 1: + dxc = args[0] + args = [] + else: + dxc = 0 + self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc)) + return args + +class T1OutlineExtractor(T2OutlineExtractor): + + def __init__(self, pen, subrs): + self.pen = pen + self.subrs = subrs + self.reset() + + def reset(self): + self.flexing = 0 + self.width = 0 + self.sbx = 0 + T2OutlineExtractor.reset(self) + + def endPath(self): + if self.sawMoveTo: + self.pen.endPath() + self.sawMoveTo = 0 + + def popallWidth(self, evenOdd=0): + return self.popall() + + def exch(self): + stack = self.operandStack + stack[-1], stack[-2] = stack[-2], stack[-1] + + # + # path constructors + # + def op_rmoveto(self, index): + if self.flexing: + return + self.endPath() + self.rMoveTo(self.popall()) + def op_hmoveto(self, index): + if self.flexing: + # We must add a parameter to the stack if we are flexing + self.push(0) + return + self.endPath() + self.rMoveTo((self.popall()[0], 0)) + def op_vmoveto(self, index): + if self.flexing: + # We must add a parameter to the stack if we are flexing + self.push(0) + self.exch() + return + self.endPath() + self.rMoveTo((0, self.popall()[0])) + def op_closepath(self, index): + self.closePath() + def op_setcurrentpoint(self, index): + args = self.popall() + x, y = args + self.currentPoint = x, y + + def op_endchar(self, index): + self.endPath() + + def op_hsbw(self, index): + sbx, wx = self.popall() + self.width = wx + self.sbx = sbx + self.currentPoint = sbx, self.currentPoint[1] + def op_sbw(self, index): + self.popall() # XXX + + # + def op_callsubr(self, index): + subrIndex = self.pop() + subr = self.subrs[subrIndex] + self.execute(subr) + def op_callothersubr(self, index): + subrIndex = self.pop() + nArgs = self.pop() + #print nArgs, subrIndex, "callothersubr" + if subrIndex == 0 and nArgs == 3: + self.doFlex() + self.flexing = 0 + elif subrIndex == 1 and nArgs == 0: + self.flexing = 1 + # ignore... + def op_pop(self, index): + pass # ignore... + + def doFlex(self): + finaly = self.pop() + finalx = self.pop() + self.pop() # flex height is unused + + p3y = self.pop() + p3x = self.pop() + bcp4y = self.pop() + bcp4x = self.pop() + bcp3y = self.pop() + bcp3x = self.pop() + p2y = self.pop() + p2x = self.pop() + bcp2y = self.pop() + bcp2x = self.pop() + bcp1y = self.pop() + bcp1x = self.pop() + rpy = self.pop() + rpx = self.pop() + + # call rrcurveto + self.push(bcp1x+rpx) + self.push(bcp1y+rpy) + self.push(bcp2x) + self.push(bcp2y) + self.push(p2x) + self.push(p2y) + self.op_rrcurveto(None) + + # call rrcurveto + self.push(bcp3x) + self.push(bcp3y) + self.push(bcp4x) + self.push(bcp4y) + self.push(p3x) + self.push(p3y) + self.op_rrcurveto(None) + + # Push back final coords so subr 0 can find them + self.push(finalx) + self.push(finaly) + + def op_dotsection(self, index): + self.popall() # XXX + def op_hstem3(self, index): + self.popall() # XXX + def op_seac(self, index): + "asb adx ady bchar achar seac" + from fontTools.encodings.StandardEncoding import StandardEncoding + asb, adx, ady, bchar, achar = self.popall() + baseGlyph = StandardEncoding[bchar] + self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) + accentGlyph = StandardEncoding[achar] + adx = adx + self.sbx - asb # seac weirdness + self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) + def op_vstem3(self, index): + self.popall() # XXX + +class T2CharString(object): + + operandEncoding = t2OperandEncoding + operators, opcodes = buildOperatorDict(t2Operators) + decompilerClass = SimpleT2Decompiler + outlineExtractor = T2OutlineExtractor + + def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): + if program is None: + program = [] + self.bytecode = bytecode + self.program = program + self.private = private + self.globalSubrs = globalSubrs if globalSubrs is not None else [] + self._cur_vsindex = None + + def getNumRegions(self, vsindex=None): + pd = self.private + assert(pd is not None) + if vsindex is not None: + self._cur_vsindex = vsindex + elif self._cur_vsindex is None: + self._cur_vsindex = pd.vsindex if hasattr(pd, 'vsindex') else 0 + return pd.getNumRegions(self._cur_vsindex) + + def __repr__(self): + if self.bytecode is None: + return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) + else: + return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) + + def getIntEncoder(self): + return encodeIntT2 + + def getFixedEncoder(self): + return encodeFixed + + def decompile(self): + if not self.needsDecompilation(): + return + subrs = getattr(self.private, "Subrs", []) + decompiler = self.decompilerClass(subrs, self.globalSubrs, self.private) + decompiler.execute(self) + + def draw(self, pen): + subrs = getattr(self.private, "Subrs", []) + extractor = self.outlineExtractor(pen, subrs, self.globalSubrs, + self.private.nominalWidthX, self.private.defaultWidthX, + self.private) + extractor.execute(self) + self.width = extractor.width + + def calcBounds(self, glyphSet): + boundsPen = BoundsPen(glyphSet) + self.draw(boundsPen) + return boundsPen.bounds + + def compile(self, isCFF2=False): + if self.bytecode is not None: + return + opcodes = self.opcodes + program = self.program + + if isCFF2: + # If present, remove return and endchar operators. + if program and program[-1] in ("return", "endchar"): + program = program[:-1] + elif program and not isinstance(program[-1], str): + raise CharStringCompileError( + "T2CharString or Subr has items on the stack after last operator." + ) + + bytecode = [] + encodeInt = self.getIntEncoder() + encodeFixed = self.getFixedEncoder() + i = 0 + end = len(program) + while i < end: + token = program[i] + i = i + 1 + if isinstance(token, str): + try: + bytecode.extend(bytechr(b) for b in opcodes[token]) + except KeyError: + raise CharStringCompileError("illegal operator: %s" % token) + if token in ('hintmask', 'cntrmask'): + bytecode.append(program[i]) # hint mask + i = i + 1 + elif isinstance(token, int): + bytecode.append(encodeInt(token)) + elif isinstance(token, float): + bytecode.append(encodeFixed(token)) + else: + assert 0, "unsupported type: %s" % type(token) + try: + bytecode = bytesjoin(bytecode) + except TypeError: + log.error(bytecode) + raise + self.setBytecode(bytecode) + + def needsDecompilation(self): + return self.bytecode is not None + + def setProgram(self, program): + self.program = program + self.bytecode = None + + def setBytecode(self, bytecode): + self.bytecode = bytecode + self.program = None + + def getToken(self, index, + len=len, byteord=byteord, isinstance=isinstance): + if self.bytecode is not None: + if index >= len(self.bytecode): + return None, 0, 0 + b0 = byteord(self.bytecode[index]) + index = index + 1 + handler = self.operandEncoding[b0] + token, index = handler(self, b0, self.bytecode, index) + else: + if index >= len(self.program): + return None, 0, 0 + token = self.program[index] + index = index + 1 + isOperator = isinstance(token, str) + return token, isOperator, index + + def getBytes(self, index, nBytes): + if self.bytecode is not None: + newIndex = index + nBytes + bytes = self.bytecode[index:newIndex] + index = newIndex + else: + bytes = self.program[index] + index = index + 1 + assert len(bytes) == nBytes + return bytes, index + + def handle_operator(self, operator): + return operator + + def toXML(self, xmlWriter, ttFont=None): + from fontTools.misc.textTools import num2binary + if self.bytecode is not None: + xmlWriter.dumphex(self.bytecode) + else: + index = 0 + args = [] + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + if isOperator: + if token in ('hintmask', 'cntrmask'): + hintMask, isOperator, index = self.getToken(index) + bits = [] + for byte in hintMask: + bits.append(num2binary(byteord(byte), 8)) + hintMask = strjoin(bits) + line = ' '.join(args + [token, hintMask]) + else: + line = ' '.join(args + [token]) + xmlWriter.write(line) + xmlWriter.newline() + args = [] + else: + if isinstance(token, float): + token = floatToFixedToStr(token, precisionBits=16) + else: + token = str(token) + args.append(token) + if args: + # NOTE: only CFF2 charstrings/subrs can have numeric arguments on + # the stack after the last operator. Compiling this would fail if + # this is part of CFF 1.0 table. + line = ' '.join(args) + xmlWriter.write(line) + + def fromXML(self, name, attrs, content): + from fontTools.misc.textTools import binary2num, readHex + if attrs.get("raw"): + self.setBytecode(readHex(content)) + return + content = strjoin(content) + content = content.split() + program = [] + end = len(content) + i = 0 + while i < end: + token = content[i] + i = i + 1 + try: + token = int(token) + except ValueError: + try: + token = strToFixedToFloat(token, precisionBits=16) + except ValueError: + program.append(token) + if token in ('hintmask', 'cntrmask'): + mask = content[i] + maskBytes = b"" + for j in range(0, len(mask), 8): + maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8])) + program.append(maskBytes) + i = i + 1 + else: + program.append(token) + else: + program.append(token) + self.setProgram(program) + +class T1CharString(T2CharString): + + operandEncoding = t1OperandEncoding + operators, opcodes = buildOperatorDict(t1Operators) + + def __init__(self, bytecode=None, program=None, subrs=None): + super().__init__(bytecode, program) + self.subrs = subrs + + def getIntEncoder(self): + return encodeIntT1 + + def getFixedEncoder(self): + def encodeFixed(value): + raise TypeError("Type 1 charstrings don't support floating point operands") + + def decompile(self): + if self.bytecode is None: + return + program = [] + index = 0 + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + program.append(token) + self.setProgram(program) + + def draw(self, pen): + extractor = T1OutlineExtractor(pen, self.subrs) + extractor.execute(self) + self.width = extractor.width + +class DictDecompiler(object): + + operandEncoding = cffDictOperandEncoding + + def __init__(self, strings, parent=None): + self.stack = [] + self.strings = strings + self.dict = {} + self.parent = parent + + def getDict(self): + assert len(self.stack) == 0, "non-empty stack" + return self.dict + + def decompile(self, data): + index = 0 + lenData = len(data) + push = self.stack.append + while index < lenData: + b0 = byteord(data[index]) + index = index + 1 + handler = self.operandEncoding[b0] + value, index = handler(self, b0, data, index) + if value is not None: + push(value) + def pop(self): + value = self.stack[-1] + del self.stack[-1] + return value + + def popall(self): + args = self.stack[:] + del self.stack[:] + return args + + def handle_operator(self, operator): + operator, argType = operator + if isinstance(argType, tuple): + value = () + for i in range(len(argType)-1, -1, -1): + arg = argType[i] + arghandler = getattr(self, "arg_" + arg) + value = (arghandler(operator),) + value + else: + arghandler = getattr(self, "arg_" + argType) + value = arghandler(operator) + if operator == "blend": + self.stack.extend(value) + else: + self.dict[operator] = value + + def arg_number(self, name): + if isinstance(self.stack[0], list): + out = self.arg_blend_number(self.stack) + else: + out = self.pop() + return out + + def arg_blend_number(self, name): + out = [] + blendArgs = self.pop() + numMasters = len(blendArgs) + out.append(blendArgs) + out.append("blend") + dummy = self.popall() + return blendArgs + + def arg_SID(self, name): + return self.strings[self.pop()] + def arg_array(self, name): + return self.popall() + def arg_blendList(self, name): + """ + There may be non-blend args at the top of the stack. We first calculate + where the blend args start in the stack. These are the last + numMasters*numBlends) +1 args. + The blend args starts with numMasters relative coordinate values, the BlueValues in the list from the default master font. This is followed by + numBlends list of values. Each of value in one of these lists is the + Variable Font delta for the matching region. + + We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by + the delta values. We then convert the default values, the first item in each entry, to an absolute value. + """ + vsindex = self.dict.get('vsindex', 0) + numMasters = self.parent.getNumRegions(vsindex) + 1 # only a PrivateDict has blended ops. + numBlends = self.pop() + args = self.popall() + numArgs = len(args) + # The spec says that there should be no non-blended Blue Values,. + assert(numArgs == numMasters * numBlends) + value = [None]*numBlends + numDeltas = numMasters-1 + i = 0 + prevVal = 0 + while i < numBlends: + newVal = args[i] + prevVal + prevVal = newVal + masterOffset = numBlends + (i* numDeltas) + blendList = [newVal] + args[masterOffset:masterOffset+numDeltas] + value[i] = blendList + i += 1 + return value + + def arg_delta(self, name): + valueList = self.popall() + out = [] + if valueList and isinstance(valueList[0], list): + # arg_blendList() has already converted these to absolute values. + out = valueList + else: + current = 0 + for v in valueList: + current = current + v + out.append(current) + return out + + +def calcSubrBias(subrs): + nSubrs = len(subrs) + if nSubrs < 1240: + bias = 107 + elif nSubrs < 33900: + bias = 1131 + else: + bias = 32768 + return bias diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/psLib.py b/.venv/lib/python3.9/site-packages/fontTools/misc/psLib.py new file mode 100644 index 00000000..c97c418a --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/psLib.py @@ -0,0 +1,384 @@ +from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr +from fontTools.misc import eexec +from .psOperators import ( + PSOperators, + ps_StandardEncoding, + ps_array, + ps_boolean, + ps_dict, + ps_integer, + ps_literal, + ps_mark, + ps_name, + ps_operator, + ps_procedure, + ps_procmark, + ps_real, + ps_string, +) +import re +from collections.abc import Callable +from string import whitespace +import logging + + +log = logging.getLogger(__name__) + +ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently + +skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"])) +endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"]) +endofthingRE = re.compile(endofthingPat) +commentRE = re.compile(b"%[^\n\r]*") + +# XXX This not entirely correct as it doesn't allow *nested* embedded parens: +stringPat = br""" + \( + ( + ( + [^()]* \ [()] + ) + | + ( + [^()]* \( [^()]* \) + ) + )* + [^()]* + \) +""" +stringPat = b"".join(stringPat.split()) +stringRE = re.compile(stringPat) + +hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"])) + +class PSTokenError(Exception): pass +class PSError(Exception): pass + + +class PSTokenizer(object): + + def __init__(self, buf=b'', encoding="ascii"): + # Force self.buf to be a byte string + buf = tobytes(buf) + self.buf = buf + self.len = len(buf) + self.pos = 0 + self.closed = False + self.encoding = encoding + + def read(self, n=-1): + """Read at most 'n' bytes from the buffer, or less if the read + hits EOF before obtaining 'n' bytes. + If 'n' is negative or omitted, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + if n is None or n < 0: + newpos = self.len + else: + newpos = min(self.pos+n, self.len) + r = self.buf[self.pos:newpos] + self.pos = newpos + return r + + def close(self): + if not self.closed: + self.closed = True + del self.buf, self.pos + + def getnexttoken(self, + # localize some stuff, for performance + len=len, + ps_special=ps_special, + stringmatch=stringRE.match, + hexstringmatch=hexstringRE.match, + commentmatch=commentRE.match, + endmatch=endofthingRE.match): + + self.skipwhite() + if self.pos >= self.len: + return None, None + pos = self.pos + buf = self.buf + char = bytechr(byteord(buf[pos])) + if char in ps_special: + if char in b'{}[]': + tokentype = 'do_special' + token = char + elif char == b'%': + tokentype = 'do_comment' + _, nextpos = commentmatch(buf, pos).span() + token = buf[pos:nextpos] + elif char == b'(': + tokentype = 'do_string' + m = stringmatch(buf, pos) + if m is None: + raise PSTokenError('bad string at character %d' % pos) + _, nextpos = m.span() + token = buf[pos:nextpos] + elif char == b'<': + tokentype = 'do_hexstring' + m = hexstringmatch(buf, pos) + if m is None: + raise PSTokenError('bad hexstring at character %d' % pos) + _, nextpos = m.span() + token = buf[pos:nextpos] + else: + raise PSTokenError('bad token at character %d' % pos) + else: + if char == b'/': + tokentype = 'do_literal' + m = endmatch(buf, pos+1) + else: + tokentype = '' + m = endmatch(buf, pos) + if m is None: + raise PSTokenError('bad token at character %d' % pos) + _, nextpos = m.span() + token = buf[pos:nextpos] + self.pos = pos + len(token) + token = tostr(token, encoding=self.encoding) + return tokentype, token + + def skipwhite(self, whitematch=skipwhiteRE.match): + _, nextpos = whitematch(self.buf, self.pos).span() + self.pos = nextpos + + def starteexec(self): + self.pos = self.pos + 1 + self.dirtybuf = self.buf[self.pos:] + self.buf, R = eexec.decrypt(self.dirtybuf, 55665) + self.len = len(self.buf) + self.pos = 4 + + def stopeexec(self): + if not hasattr(self, 'dirtybuf'): + return + self.buf = self.dirtybuf + del self.dirtybuf + + +class PSInterpreter(PSOperators): + + def __init__(self, encoding="ascii"): + systemdict = {} + userdict = {} + self.encoding = encoding + self.dictstack = [systemdict, userdict] + self.stack = [] + self.proclevel = 0 + self.procmark = ps_procmark() + self.fillsystemdict() + + def fillsystemdict(self): + systemdict = self.dictstack[0] + systemdict['['] = systemdict['mark'] = self.mark = ps_mark() + systemdict[']'] = ps_operator(']', self.do_makearray) + systemdict['true'] = ps_boolean(1) + systemdict['false'] = ps_boolean(0) + systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding) + systemdict['FontDirectory'] = ps_dict({}) + self.suckoperators(systemdict, self.__class__) + + def suckoperators(self, systemdict, klass): + for name in dir(klass): + attr = getattr(self, name) + if isinstance(attr, Callable) and name[:3] == 'ps_': + name = name[3:] + systemdict[name] = ps_operator(name, attr) + for baseclass in klass.__bases__: + self.suckoperators(systemdict, baseclass) + + def interpret(self, data, getattr=getattr): + tokenizer = self.tokenizer = PSTokenizer(data, self.encoding) + getnexttoken = tokenizer.getnexttoken + do_token = self.do_token + handle_object = self.handle_object + try: + while 1: + tokentype, token = getnexttoken() + if not token: + break + if tokentype: + handler = getattr(self, tokentype) + object = handler(token) + else: + object = do_token(token) + if object is not None: + handle_object(object) + tokenizer.close() + self.tokenizer = None + except: + if self.tokenizer is not None: + log.debug( + 'ps error:\n' + '- - - - - - -\n' + '%s\n' + '>>>\n' + '%s\n' + '- - - - - - -', + self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos], + self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]) + raise + + def handle_object(self, object): + if not (self.proclevel or object.literal or object.type == 'proceduretype'): + if object.type != 'operatortype': + object = self.resolve_name(object.value) + if object.literal: + self.push(object) + else: + if object.type == 'proceduretype': + self.call_procedure(object) + else: + object.function() + else: + self.push(object) + + def call_procedure(self, proc): + handle_object = self.handle_object + for item in proc.value: + handle_object(item) + + def resolve_name(self, name): + dictstack = self.dictstack + for i in range(len(dictstack)-1, -1, -1): + if name in dictstack[i]: + return dictstack[i][name] + raise PSError('name error: ' + str(name)) + + def do_token(self, token, + int=int, + float=float, + ps_name=ps_name, + ps_integer=ps_integer, + ps_real=ps_real): + try: + num = int(token) + except (ValueError, OverflowError): + try: + num = float(token) + except (ValueError, OverflowError): + if '#' in token: + hashpos = token.find('#') + try: + base = int(token[:hashpos]) + num = int(token[hashpos+1:], base) + except (ValueError, OverflowError): + return ps_name(token) + else: + return ps_integer(num) + else: + return ps_name(token) + else: + return ps_real(num) + else: + return ps_integer(num) + + def do_comment(self, token): + pass + + def do_literal(self, token): + return ps_literal(token[1:]) + + def do_string(self, token): + return ps_string(token[1:-1]) + + def do_hexstring(self, token): + hexStr = "".join(token[1:-1].split()) + if len(hexStr) % 2: + hexStr = hexStr + '0' + cleanstr = [] + for i in range(0, len(hexStr), 2): + cleanstr.append(chr(int(hexStr[i:i+2], 16))) + cleanstr = "".join(cleanstr) + return ps_string(cleanstr) + + def do_special(self, token): + if token == '{': + self.proclevel = self.proclevel + 1 + return self.procmark + elif token == '}': + proc = [] + while 1: + topobject = self.pop() + if topobject == self.procmark: + break + proc.append(topobject) + self.proclevel = self.proclevel - 1 + proc.reverse() + return ps_procedure(proc) + elif token == '[': + return self.mark + elif token == ']': + return ps_name(']') + else: + raise PSTokenError('huh?') + + def push(self, object): + self.stack.append(object) + + def pop(self, *types): + stack = self.stack + if not stack: + raise PSError('stack underflow') + object = stack[-1] + if types: + if object.type not in types: + raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type)) + del stack[-1] + return object + + def do_makearray(self): + array = [] + while 1: + topobject = self.pop() + if topobject == self.mark: + break + array.append(topobject) + array.reverse() + self.push(ps_array(array)) + + def close(self): + """Remove circular references.""" + del self.stack + del self.dictstack + + +def unpack_item(item): + tp = type(item.value) + if tp == dict: + newitem = {} + for key, value in item.value.items(): + newitem[key] = unpack_item(value) + elif tp == list: + newitem = [None] * len(item.value) + for i in range(len(item.value)): + newitem[i] = unpack_item(item.value[i]) + if item.type == 'proceduretype': + newitem = tuple(newitem) + else: + newitem = item.value + return newitem + +def suckfont(data, encoding="ascii"): + m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data) + if m: + fontName = m.group(1) + else: + fontName = None + interpreter = PSInterpreter(encoding=encoding) + interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop") + interpreter.interpret(data) + fontdir = interpreter.dictstack[0]['FontDirectory'].value + if fontName in fontdir: + rawfont = fontdir[fontName] + else: + # fall back, in case fontName wasn't found + fontNames = list(fontdir.keys()) + if len(fontNames) > 1: + fontNames.remove("Helvetica") + fontNames.sort() + rawfont = fontdir[fontNames[0]] + interpreter.close() + return unpack_item(rawfont) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/psOperators.py b/.venv/lib/python3.9/site-packages/fontTools/misc/psOperators.py new file mode 100644 index 00000000..3b378f59 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/psOperators.py @@ -0,0 +1,537 @@ +_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"} + + +class ps_object(object): + + literal = 1 + access = 0 + value = None + + def __init__(self, value): + self.value = value + self.type = self.__class__.__name__[3:] + "type" + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value)) + + +class ps_operator(ps_object): + + literal = 0 + + def __init__(self, name, function): + self.name = name + self.function = function + self.type = self.__class__.__name__[3:] + "type" + def __repr__(self): + return "" % self.name + +class ps_procedure(ps_object): + literal = 0 + def __repr__(self): + return "" + def __str__(self): + psstring = '{' + for i in range(len(self.value)): + if i: + psstring = psstring + ' ' + str(self.value[i]) + else: + psstring = psstring + str(self.value[i]) + return psstring + '}' + +class ps_name(ps_object): + literal = 0 + def __str__(self): + if self.literal: + return '/' + self.value + else: + return self.value + +class ps_literal(ps_object): + def __str__(self): + return '/' + self.value + +class ps_array(ps_object): + def __str__(self): + psstring = '[' + for i in range(len(self.value)): + item = self.value[i] + access = _accessstrings[item.access] + if access: + access = ' ' + access + if i: + psstring = psstring + ' ' + str(item) + access + else: + psstring = psstring + str(item) + access + return psstring + ']' + def __repr__(self): + return "" + +_type1_pre_eexec_order = [ + "FontInfo", + "FontName", + "Encoding", + "PaintType", + "FontType", + "FontMatrix", + "FontBBox", + "UniqueID", + "Metrics", + "StrokeWidth" + ] + +_type1_fontinfo_order = [ + "version", + "Notice", + "FullName", + "FamilyName", + "Weight", + "ItalicAngle", + "isFixedPitch", + "UnderlinePosition", + "UnderlineThickness" + ] + +_type1_post_eexec_order = [ + "Private", + "CharStrings", + "FID" + ] + +def _type1_item_repr(key, value): + psstring = "" + access = _accessstrings[value.access] + if access: + access = access + ' ' + if key == 'CharStrings': + psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value)) + elif key == 'Encoding': + psstring = psstring + _type1_Encoding_repr(value, access) + else: + psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) + return psstring + +def _type1_Encoding_repr(encoding, access): + encoding = encoding.value + psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n" + for i in range(256): + name = encoding[i].value + if name != '.notdef': + psstring = psstring + "dup %d /%s put\n" % (i, name) + return psstring + access + "def\n" + +def _type1_CharString_repr(charstrings): + items = sorted(charstrings.items()) + return 'xxx' + +class ps_font(ps_object): + def __str__(self): + psstring = "%d dict dup begin\n" % len(self.value) + for key in _type1_pre_eexec_order: + try: + value = self.value[key] + except KeyError: + pass + else: + psstring = psstring + _type1_item_repr(key, value) + items = sorted(self.value.items()) + for key, value in items: + if key not in _type1_pre_eexec_order + _type1_post_eexec_order: + psstring = psstring + _type1_item_repr(key, value) + psstring = psstring + "currentdict end\ncurrentfile eexec\ndup " + for key in _type1_post_eexec_order: + try: + value = self.value[key] + except KeyError: + pass + else: + psstring = psstring + _type1_item_repr(key, value) + return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \ + 8 * (64 * '0' + '\n') + 'cleartomark' + '\n' + def __repr__(self): + return '' + +class ps_file(ps_object): + pass + +class ps_dict(ps_object): + def __str__(self): + psstring = "%d dict dup begin\n" % len(self.value) + items = sorted(self.value.items()) + for key, value in items: + access = _accessstrings[value.access] + if access: + access = access + ' ' + psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) + return psstring + 'end ' + def __repr__(self): + return "" + +class ps_mark(ps_object): + def __init__(self): + self.value = 'mark' + self.type = self.__class__.__name__[3:] + "type" + +class ps_procmark(ps_object): + def __init__(self): + self.value = 'procmark' + self.type = self.__class__.__name__[3:] + "type" + +class ps_null(ps_object): + def __init__(self): + self.type = self.__class__.__name__[3:] + "type" + +class ps_boolean(ps_object): + def __str__(self): + if self.value: + return 'true' + else: + return 'false' + +class ps_string(ps_object): + def __str__(self): + return "(%s)" % repr(self.value)[1:-1] + +class ps_integer(ps_object): + def __str__(self): + return repr(self.value) + +class ps_real(ps_object): + def __str__(self): + return repr(self.value) + + +class PSOperators(object): + + def ps_def(self): + obj = self.pop() + name = self.pop() + self.dictstack[-1][name.value] = obj + + def ps_bind(self): + proc = self.pop('proceduretype') + self.proc_bind(proc) + self.push(proc) + + def proc_bind(self, proc): + for i in range(len(proc.value)): + item = proc.value[i] + if item.type == 'proceduretype': + self.proc_bind(item) + else: + if not item.literal: + try: + obj = self.resolve_name(item.value) + except: + pass + else: + if obj.type == 'operatortype': + proc.value[i] = obj + + def ps_exch(self): + if len(self.stack) < 2: + raise RuntimeError('stack underflow') + obj1 = self.pop() + obj2 = self.pop() + self.push(obj1) + self.push(obj2) + + def ps_dup(self): + if not self.stack: + raise RuntimeError('stack underflow') + self.push(self.stack[-1]) + + def ps_exec(self): + obj = self.pop() + if obj.type == 'proceduretype': + self.call_procedure(obj) + else: + self.handle_object(obj) + + def ps_count(self): + self.push(ps_integer(len(self.stack))) + + def ps_eq(self): + any1 = self.pop() + any2 = self.pop() + self.push(ps_boolean(any1.value == any2.value)) + + def ps_ne(self): + any1 = self.pop() + any2 = self.pop() + self.push(ps_boolean(any1.value != any2.value)) + + def ps_cvx(self): + obj = self.pop() + obj.literal = 0 + self.push(obj) + + def ps_matrix(self): + matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)] + self.push(ps_array(matrix)) + + def ps_string(self): + num = self.pop('integertype').value + self.push(ps_string('\0' * num)) + + def ps_type(self): + obj = self.pop() + self.push(ps_string(obj.type)) + + def ps_store(self): + value = self.pop() + key = self.pop() + name = key.value + for i in range(len(self.dictstack)-1, -1, -1): + if name in self.dictstack[i]: + self.dictstack[i][name] = value + break + self.dictstack[-1][name] = value + + def ps_where(self): + name = self.pop() + # XXX + self.push(ps_boolean(0)) + + def ps_systemdict(self): + self.push(ps_dict(self.dictstack[0])) + + def ps_userdict(self): + self.push(ps_dict(self.dictstack[1])) + + def ps_currentdict(self): + self.push(ps_dict(self.dictstack[-1])) + + def ps_currentfile(self): + self.push(ps_file(self.tokenizer)) + + def ps_eexec(self): + f = self.pop('filetype').value + f.starteexec() + + def ps_closefile(self): + f = self.pop('filetype').value + f.skipwhite() + f.stopeexec() + + def ps_cleartomark(self): + obj = self.pop() + while obj != self.mark: + obj = self.pop() + + def ps_readstring(self, + ps_boolean=ps_boolean, + len=len): + s = self.pop('stringtype') + oldstr = s.value + f = self.pop('filetype') + #pad = file.value.read(1) + # for StringIO, this is faster + f.value.pos = f.value.pos + 1 + newstr = f.value.read(len(oldstr)) + s.value = newstr + self.push(s) + self.push(ps_boolean(len(oldstr) == len(newstr))) + + def ps_known(self): + key = self.pop() + d = self.pop('dicttype', 'fonttype') + self.push(ps_boolean(key.value in d.value)) + + def ps_if(self): + proc = self.pop('proceduretype') + if self.pop('booleantype').value: + self.call_procedure(proc) + + def ps_ifelse(self): + proc2 = self.pop('proceduretype') + proc1 = self.pop('proceduretype') + if self.pop('booleantype').value: + self.call_procedure(proc1) + else: + self.call_procedure(proc2) + + def ps_readonly(self): + obj = self.pop() + if obj.access < 1: + obj.access = 1 + self.push(obj) + + def ps_executeonly(self): + obj = self.pop() + if obj.access < 2: + obj.access = 2 + self.push(obj) + + def ps_noaccess(self): + obj = self.pop() + if obj.access < 3: + obj.access = 3 + self.push(obj) + + def ps_not(self): + obj = self.pop('booleantype', 'integertype') + if obj.type == 'booleantype': + self.push(ps_boolean(not obj.value)) + else: + self.push(ps_integer(~obj.value)) + + def ps_print(self): + str = self.pop('stringtype') + print('PS output --->', str.value) + + def ps_anchorsearch(self): + seek = self.pop('stringtype') + s = self.pop('stringtype') + seeklen = len(seek.value) + if s.value[:seeklen] == seek.value: + self.push(ps_string(s.value[seeklen:])) + self.push(seek) + self.push(ps_boolean(1)) + else: + self.push(s) + self.push(ps_boolean(0)) + + def ps_array(self): + num = self.pop('integertype') + array = ps_array([None] * num.value) + self.push(array) + + def ps_astore(self): + array = self.pop('arraytype') + for i in range(len(array.value)-1, -1, -1): + array.value[i] = self.pop() + self.push(array) + + def ps_load(self): + name = self.pop() + self.push(self.resolve_name(name.value)) + + def ps_put(self): + obj1 = self.pop() + obj2 = self.pop() + obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype') + tp = obj3.type + if tp == 'arraytype' or tp == 'proceduretype': + obj3.value[obj2.value] = obj1 + elif tp == 'dicttype': + obj3.value[obj2.value] = obj1 + elif tp == 'stringtype': + index = obj2.value + obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index+1:] + + def ps_get(self): + obj1 = self.pop() + if obj1.value == "Encoding": + pass + obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype') + tp = obj2.type + if tp in ('arraytype', 'proceduretype'): + self.push(obj2.value[obj1.value]) + elif tp in ('dicttype', 'fonttype'): + self.push(obj2.value[obj1.value]) + elif tp == 'stringtype': + self.push(ps_integer(ord(obj2.value[obj1.value]))) + else: + assert False, "shouldn't get here" + + def ps_getinterval(self): + obj1 = self.pop('integertype') + obj2 = self.pop('integertype') + obj3 = self.pop('arraytype', 'stringtype') + tp = obj3.type + if tp == 'arraytype': + self.push(ps_array(obj3.value[obj2.value:obj2.value + obj1.value])) + elif tp == 'stringtype': + self.push(ps_string(obj3.value[obj2.value:obj2.value + obj1.value])) + + def ps_putinterval(self): + obj1 = self.pop('arraytype', 'stringtype') + obj2 = self.pop('integertype') + obj3 = self.pop('arraytype', 'stringtype') + tp = obj3.type + if tp == 'arraytype': + obj3.value[obj2.value:obj2.value + len(obj1.value)] = obj1.value + elif tp == 'stringtype': + newstr = obj3.value[:obj2.value] + newstr = newstr + obj1.value + newstr = newstr + obj3.value[obj2.value + len(obj1.value):] + obj3.value = newstr + + def ps_cvn(self): + self.push(ps_name(self.pop('stringtype').value)) + + def ps_index(self): + n = self.pop('integertype').value + if n < 0: + raise RuntimeError('index may not be negative') + self.push(self.stack[-1-n]) + + def ps_for(self): + proc = self.pop('proceduretype') + limit = self.pop('integertype', 'realtype').value + increment = self.pop('integertype', 'realtype').value + i = self.pop('integertype', 'realtype').value + while 1: + if increment > 0: + if i > limit: + break + else: + if i < limit: + break + if type(i) == type(0.0): + self.push(ps_real(i)) + else: + self.push(ps_integer(i)) + self.call_procedure(proc) + i = i + increment + + def ps_forall(self): + proc = self.pop('proceduretype') + obj = self.pop('arraytype', 'stringtype', 'dicttype') + tp = obj.type + if tp == 'arraytype': + for item in obj.value: + self.push(item) + self.call_procedure(proc) + elif tp == 'stringtype': + for item in obj.value: + self.push(ps_integer(ord(item))) + self.call_procedure(proc) + elif tp == 'dicttype': + for key, value in obj.value.items(): + self.push(ps_name(key)) + self.push(value) + self.call_procedure(proc) + + def ps_definefont(self): + font = self.pop('dicttype') + name = self.pop() + font = ps_font(font.value) + self.dictstack[0]['FontDirectory'].value[name.value] = font + self.push(font) + + def ps_findfont(self): + name = self.pop() + font = self.dictstack[0]['FontDirectory'].value[name.value] + self.push(font) + + def ps_pop(self): + self.pop() + + def ps_dict(self): + self.pop('integertype') + self.push(ps_dict({})) + + def ps_begin(self): + self.dictstack.append(self.pop('dicttype').value) + + def ps_end(self): + if len(self.dictstack) > 2: + del self.dictstack[-1] + else: + raise RuntimeError('dictstack underflow') + +notdef = '.notdef' +from fontTools.encodings.StandardEncoding import StandardEncoding +ps_StandardEncoding = list(map(ps_name, StandardEncoding)) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/py23.py b/.venv/lib/python3.9/site-packages/fontTools/misc/py23.py new file mode 100644 index 00000000..29f634d6 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/py23.py @@ -0,0 +1,96 @@ +"""Python 2/3 compat layer leftovers.""" + +import decimal as _decimal +import math as _math +import warnings +from contextlib import redirect_stderr, redirect_stdout +from io import BytesIO +from io import StringIO as UnicodeIO +from types import SimpleNamespace + +from .textTools import Tag, bytechr, byteord, bytesjoin, strjoin, tobytes, tostr + +warnings.warn( + "The py23 module has been deprecated and will be removed in a future release. " + "Please update your code.", + DeprecationWarning, +) + +__all__ = [ + "basestring", + "bytechr", + "byteord", + "BytesIO", + "bytesjoin", + "open", + "Py23Error", + "range", + "RecursionError", + "round", + "SimpleNamespace", + "StringIO", + "strjoin", + "Tag", + "tobytes", + "tostr", + "tounicode", + "unichr", + "unicode", + "UnicodeIO", + "xrange", + "zip", +] + + +class Py23Error(NotImplementedError): + pass + + +RecursionError = RecursionError +StringIO = UnicodeIO + +basestring = str +isclose = _math.isclose +isfinite = _math.isfinite +open = open +range = range +round = round3 = round +unichr = chr +unicode = str +zip = zip + +tounicode = tostr + + +def xrange(*args, **kwargs): + raise Py23Error("'xrange' is not defined. Use 'range' instead.") + + +def round2(number, ndigits=None): + """ + Implementation of Python 2 built-in round() function. + Rounds a number to a given precision in decimal digits (default + 0 digits). The result is a floating point number. Values are rounded + to the closest multiple of 10 to the power minus ndigits; if two + multiples are equally close, rounding is done away from 0. + ndigits may be negative. + See Python 2 documentation: + https://docs.python.org/2/library/functions.html?highlight=round#round + """ + if ndigits is None: + ndigits = 0 + + if ndigits < 0: + exponent = 10 ** (-ndigits) + quotient, remainder = divmod(number, exponent) + if remainder >= exponent // 2 and number >= 0: + quotient += 1 + return float(quotient * exponent) + else: + exponent = _decimal.Decimal("10") ** (-ndigits) + + d = _decimal.Decimal.from_float(number).quantize( + exponent, rounding=_decimal.ROUND_HALF_UP + ) + + return float(d) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/roundTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/roundTools.py new file mode 100644 index 00000000..6f4aa634 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/roundTools.py @@ -0,0 +1,105 @@ +""" +Various round-to-integer helpers. +""" + +import math +import functools +import logging + +log = logging.getLogger(__name__) + +__all__ = [ + "noRound", + "otRound", + "maybeRound", + "roundFunc", +] + +def noRound(value): + return value + +def otRound(value): + """Round float value to nearest integer towards ``+Infinity``. + + The OpenType spec (in the section on `"normalization" of OpenType Font Variations `_) + defines the required method for converting floating point values to + fixed-point. In particular it specifies the following rounding strategy: + + for fractional values of 0.5 and higher, take the next higher integer; + for other fractional values, truncate. + + This function rounds the floating-point value according to this strategy + in preparation for conversion to fixed-point. + + Args: + value (float): The input floating-point value. + + Returns + float: The rounded value. + """ + # See this thread for how we ended up with this implementation: + # https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166 + return int(math.floor(value + 0.5)) + +def maybeRound(v, tolerance, round=otRound): + rounded = round(v) + return rounded if abs(rounded - v) <= tolerance else v + +def roundFunc(tolerance, round=otRound): + if tolerance < 0: + raise ValueError("Rounding tolerance must be positive") + + if tolerance == 0: + return noRound + + if tolerance >= .5: + return round + + return functools.partial(maybeRound, tolerance=tolerance, round=round) + + +def nearestMultipleShortestRepr(value: float, factor: float) -> str: + """Round to nearest multiple of factor and return shortest decimal representation. + + This chooses the float that is closer to a multiple of the given factor while + having the shortest decimal representation (the least number of fractional decimal + digits). + + For example, given the following: + + >>> nearestMultipleShortestRepr(-0.61883544921875, 1.0/(1<<14)) + '-0.61884' + + Useful when you need to serialize or print a fixed-point number (or multiples + thereof, such as F2Dot14 fractions of 180 degrees in COLRv1 PaintRotate) in + a human-readable form. + + Args: + value (value): The value to be rounded and serialized. + factor (float): The value which the result is a close multiple of. + + Returns: + str: A compact string representation of the value. + """ + if not value: + return "0.0" + + value = otRound(value / factor) * factor + eps = .5 * factor + lo = value - eps + hi = value + eps + # If the range of valid choices spans an integer, return the integer. + if int(lo) != int(hi): + return str(float(round(value))) + + fmt = "%.8f" + lo = fmt % lo + hi = fmt % hi + assert len(lo) == len(hi) and lo != hi + for i in range(len(lo)): + if lo[i] != hi[i]: + break + period = lo.find('.') + assert period < i + fmt = "%%.%df" % (i - period) + return fmt % value diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/sstruct.py b/.venv/lib/python3.9/site-packages/fontTools/misc/sstruct.py new file mode 100644 index 00000000..6db8b515 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/sstruct.py @@ -0,0 +1,216 @@ +"""sstruct.py -- SuperStruct + +Higher level layer on top of the struct module, enabling to +bind names to struct elements. The interface is similar to +struct, except the objects passed and returned are not tuples +(or argument lists), but dictionaries or instances. + +Just like struct, we use fmt strings to describe a data +structure, except we use one line per element. Lines are +separated by newlines or semi-colons. Each line contains +either one of the special struct characters ('@', '=', '<', +'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f'). +Repetitions, like the struct module offers them are not useful +in this context, except for fixed length strings (eg. 'myInt:5h' +is not allowed but 'myString:5s' is). The 'x' fmt character +(pad byte) is treated as 'special', since it is by definition +anonymous. Extra whitespace is allowed everywhere. + +The sstruct module offers one feature that the "normal" struct +module doesn't: support for fixed point numbers. These are spelled +as "n.mF", where n is the number of bits before the point, and m +the number of bits after the point. Fixed point numbers get +converted to floats. + +pack(fmt, object): + 'object' is either a dictionary or an instance (or actually + anything that has a __dict__ attribute). If it is a dictionary, + its keys are used for names. If it is an instance, it's + attributes are used to grab struct elements from. Returns + a string containing the data. + +unpack(fmt, data, object=None) + If 'object' is omitted (or None), a new dictionary will be + returned. If 'object' is a dictionary, it will be used to add + struct elements to. If it is an instance (or in fact anything + that has a __dict__ attribute), an attribute will be added for + each struct element. In the latter two cases, 'object' itself + is returned. + +unpack2(fmt, data, object=None) + Convenience function. Same as unpack, except data may be longer + than needed. The returned value is a tuple: (object, leftoverdata). + +calcsize(fmt) + like struct.calcsize(), but uses our own fmt strings: + it returns the size of the data in bytes. +""" + +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from fontTools.misc.textTools import tobytes, tostr +import struct +import re + +__version__ = "1.2" +__copyright__ = "Copyright 1998, Just van Rossum " + + +class Error(Exception): + pass + +def pack(fmt, obj): + formatstring, names, fixes = getformat(fmt, keep_pad_byte=True) + elements = [] + if not isinstance(obj, dict): + obj = obj.__dict__ + for name in names: + value = obj[name] + if name in fixes: + # fixed point conversion + value = fl2fi(value, fixes[name]) + elif isinstance(value, str): + value = tobytes(value) + elements.append(value) + data = struct.pack(*(formatstring,) + tuple(elements)) + return data + +def unpack(fmt, data, obj=None): + if obj is None: + obj = {} + data = tobytes(data) + formatstring, names, fixes = getformat(fmt) + if isinstance(obj, dict): + d = obj + else: + d = obj.__dict__ + elements = struct.unpack(formatstring, data) + for i in range(len(names)): + name = names[i] + value = elements[i] + if name in fixes: + # fixed point conversion + value = fi2fl(value, fixes[name]) + elif isinstance(value, bytes): + try: + value = tostr(value) + except UnicodeDecodeError: + pass + d[name] = value + return obj + +def unpack2(fmt, data, obj=None): + length = calcsize(fmt) + return unpack(fmt, data[:length], obj), data[length:] + +def calcsize(fmt): + formatstring, names, fixes = getformat(fmt) + return struct.calcsize(formatstring) + + +# matches "name:formatchar" (whitespace is allowed) +_elementRE = re.compile( + r"\s*" # whitespace + r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier) + r"\s*:\s*" # whitespace : whitespace + r"([xcbB?hHiIlLqQfd]|" # formatchar... + r"[0-9]+[ps]|" # ...formatchar... + r"([0-9]+)\.([0-9]+)(F))" # ...formatchar + r"\s*" # whitespace + r"(#.*)?$" # [comment] + end of string + ) + +# matches the special struct fmt chars and 'x' (pad byte) +_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$") + +# matches an "empty" string, possibly containing whitespace and/or a comment +_emptyRE = re.compile(r"\s*(#.*)?$") + +_fixedpointmappings = { + 8: "b", + 16: "h", + 32: "l"} + +_formatcache = {} + +def getformat(fmt, keep_pad_byte=False): + fmt = tostr(fmt, encoding="ascii") + try: + formatstring, names, fixes = _formatcache[fmt] + except KeyError: + lines = re.split("[\n;]", fmt) + formatstring = "" + names = [] + fixes = {} + for line in lines: + if _emptyRE.match(line): + continue + m = _extraRE.match(line) + if m: + formatchar = m.group(1) + if formatchar != 'x' and formatstring: + raise Error("a special fmt char must be first") + else: + m = _elementRE.match(line) + if not m: + raise Error("syntax error in fmt: '%s'" % line) + name = m.group(1) + formatchar = m.group(2) + if keep_pad_byte or formatchar != "x": + names.append(name) + if m.group(3): + # fixed point + before = int(m.group(3)) + after = int(m.group(4)) + bits = before + after + if bits not in [8, 16, 32]: + raise Error("fixed point must be 8, 16 or 32 bits long") + formatchar = _fixedpointmappings[bits] + assert m.group(5) == "F" + fixes[name] = after + formatstring = formatstring + formatchar + _formatcache[fmt] = formatstring, names, fixes + return formatstring, names, fixes + +def _test(): + fmt = """ + # comments are allowed + > # big endian (see documentation for struct) + # empty lines are allowed: + + ashort: h + along: l + abyte: b # a byte + achar: c + astr: 5s + afloat: f; adouble: d # multiple "statements" are allowed + afixed: 16.16F + abool: ? + apad: x + """ + + print('size:', calcsize(fmt)) + + class foo(object): + pass + + i = foo() + + i.ashort = 0x7fff + i.along = 0x7fffffff + i.abyte = 0x7f + i.achar = "a" + i.astr = "12345" + i.afloat = 0.5 + i.adouble = 0.5 + i.afixed = 1.5 + i.abool = True + + data = pack(fmt, i) + print('data:', repr(data)) + print(unpack(fmt, data)) + i2 = foo() + unpack(fmt, data, i2) + print(vars(i2)) + +if __name__ == "__main__": + _test() diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/symfont.py b/.venv/lib/python3.9/site-packages/fontTools/misc/symfont.py new file mode 100644 index 00000000..a1a87300 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/symfont.py @@ -0,0 +1,192 @@ +from fontTools.pens.basePen import BasePen +from functools import partial +from itertools import count +import sympy as sp +import sys + +n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic + +t, x, y = sp.symbols('t x y', real=True) +c = sp.symbols('c', real=False) # Complex representation instead of x/y + +X = tuple(sp.symbols('x:%d'%(n+1), real=True)) +Y = tuple(sp.symbols('y:%d'%(n+1), real=True)) +P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01'))) +C = tuple(sp.symbols('c:%d'%(n+1), real=False)) + +# Cubic Bernstein basis functions +BinomialCoefficient = [(1, 0)] +for i in range(1, n+1): + last = BinomialCoefficient[-1] + this = tuple(last[j-1]+last[j] for j in range(len(last)))+(0,) + BinomialCoefficient.append(this) +BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient) +del last, this + +BernsteinPolynomial = tuple( + tuple(c * t**i * (1-t)**(n-i) for i,c in enumerate(coeffs)) + for n,coeffs in enumerate(BinomialCoefficient)) + +BezierCurve = tuple( + tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins)) + for j in range(2)) + for n,bernsteins in enumerate(BernsteinPolynomial)) +BezierCurveC = tuple( + sum(C[i]*bernstein for i,bernstein in enumerate(bernsteins)) + for n,bernsteins in enumerate(BernsteinPolynomial)) + + +def green(f, curveXY): + f = -sp.integrate(sp.sympify(f), y) + f = f.subs({x:curveXY[0], y:curveXY[1]}) + f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1)) + return f + + +class _BezierFuncsLazy(dict): + + def __init__(self, symfunc): + self._symfunc = symfunc + self._bezfuncs = {} + + def __missing__(self, i): + args = ['p%d'%d for d in range(i+1)] + f = green(self._symfunc, BezierCurve[i]) + f = sp.gcd_terms(f.collect(sum(P,()))) # Optimize + return sp.lambdify(args, f) + +class GreenPen(BasePen): + + _BezierFuncs = {} + + @classmethod + def _getGreenBezierFuncs(celf, func): + funcstr = str(func) + if not funcstr in celf._BezierFuncs: + celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func) + return celf._BezierFuncs[funcstr] + + def __init__(self, func, glyphset=None): + BasePen.__init__(self, glyphset) + self._funcs = self._getGreenBezierFuncs(func) + self.value = 0 + + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _endPath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + # Green theorem is not defined on open contours. + raise NotImplementedError + + def _lineTo(self, p1): + p0 = self._getCurrentPoint() + self.value += self._funcs[1](p0, p1) + + def _qCurveToOne(self, p1, p2): + p0 = self._getCurrentPoint() + self.value += self._funcs[2](p0, p1, p2) + + def _curveToOne(self, p1, p2, p3): + p0 = self._getCurrentPoint() + self.value += self._funcs[3](p0, p1, p2, p3) + +# Sample pens. +# Do not use this in real code. +# Use fontTools.pens.momentsPen.MomentsPen instead. +AreaPen = partial(GreenPen, func=1) +MomentXPen = partial(GreenPen, func=x) +MomentYPen = partial(GreenPen, func=y) +MomentXXPen = partial(GreenPen, func=x*x) +MomentYYPen = partial(GreenPen, func=y*y) +MomentXYPen = partial(GreenPen, func=x*y) + + +def printGreenPen(penName, funcs, file=sys.stdout): + + print( +'''from fontTools.pens.basePen import BasePen + +class %s(BasePen): + + def __init__(self, glyphset=None): + BasePen.__init__(self, glyphset) +'''%penName, file=file) + for name,f in funcs: + print(' self.%s = 0' % name, file=file) + print(''' + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _endPath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + # Green theorem is not defined on open contours. + raise NotImplementedError +''', end='', file=file) + + for n in (1, 2, 3): + + if n == 1: + print(''' + def _lineTo(self, p1): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 +''', file=file) + elif n == 2: + print(''' + def _qCurveToOne(self, p1, p2): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 +''', file=file) + elif n == 3: + print(''' + def _curveToOne(self, p1, p2, p3): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 + x3,y3 = p3 +''', file=file) + subs = {P[i][j]: [X, Y][j][i] for i in range(n+1) for j in range(2)} + greens = [green(f, BezierCurve[n]) for name,f in funcs] + greens = [sp.gcd_terms(f.collect(sum(P,()))) for f in greens] # Optimize + greens = [f.subs(subs) for f in greens] # Convert to p to x/y + defs, exprs = sp.cse(greens, + optimizations='basic', + symbols=(sp.Symbol('r%d'%i) for i in count())) + for name,value in defs: + print(' %s = %s' % (name, value), file=file) + print(file=file) + for name,value in zip([f[0] for f in funcs], exprs): + print(' self.%s += %s' % (name, value), file=file) + + print(''' +if __name__ == '__main__': + from fontTools.misc.symfont import x, y, printGreenPen + printGreenPen('%s', ['''%penName, file=file) + for name,f in funcs: + print(" ('%s', %s)," % (name, str(f)), file=file) + print(' ])', file=file) + + +if __name__ == '__main__': + pen = AreaPen() + pen.moveTo((100,100)) + pen.lineTo((100,200)) + pen.lineTo((200,200)) + pen.curveTo((200,250),(300,300),(250,350)) + pen.lineTo((200,100)) + pen.closePath() + print(pen.value) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/testTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/testTools.py new file mode 100644 index 00000000..db316a82 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/testTools.py @@ -0,0 +1,198 @@ +"""Helpers for writing unit tests.""" + +from collections.abc import Iterable +from io import BytesIO +import os +import shutil +import sys +import tempfile +from unittest import TestCase as _TestCase +from fontTools.misc.textTools import tobytes +from fontTools.misc.xmlWriter import XMLWriter + + +def parseXML(xmlSnippet): + """Parses a snippet of XML. + + Input can be either a single string (unicode or UTF-8 bytes), or a + a sequence of strings. + + The result is in the same format that would be returned by + XMLReader, but the parser imposes no constraints on the root + element so it can be called on small snippets of TTX files. + """ + # To support snippets with multiple elements, we add a fake root. + reader = TestXMLReader_() + xml = b"" + if isinstance(xmlSnippet, bytes): + xml += xmlSnippet + elif isinstance(xmlSnippet, str): + xml += tobytes(xmlSnippet, 'utf-8') + elif isinstance(xmlSnippet, Iterable): + xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet) + else: + raise TypeError("expected string or sequence of strings; found %r" + % type(xmlSnippet).__name__) + xml += b"" + reader.parser.Parse(xml, 0) + return reader.root[2] + + +def parseXmlInto(font, parseInto, xmlSnippet): + parsed_xml = [e for e in parseXML(xmlSnippet.strip()) if not isinstance(e, str)] + for name, attrs, content in parsed_xml: + parseInto.fromXML(name, attrs, content, font) + parseInto.populateDefaults() + return parseInto + + +class FakeFont: + def __init__(self, glyphs): + self.glyphOrder_ = glyphs + self.reverseGlyphOrderDict_ = {g: i for i, g in enumerate(glyphs)} + self.lazy = False + self.tables = {} + + def __getitem__(self, tag): + return self.tables[tag] + + def __setitem__(self, tag, table): + self.tables[tag] = table + + def get(self, tag, default=None): + return self.tables.get(tag, default) + + def getGlyphID(self, name): + return self.reverseGlyphOrderDict_[name] + + def getGlyphIDMany(self, lst): + return [self.getGlyphID(gid) for gid in lst] + + def getGlyphName(self, glyphID): + if glyphID < len(self.glyphOrder_): + return self.glyphOrder_[glyphID] + else: + return "glyph%.5d" % glyphID + def getGlyphNameMany(self, lst): + return [self.getGlyphName(gid) for gid in lst] + + def getGlyphOrder(self): + return self.glyphOrder_ + + def getReverseGlyphMap(self): + return self.reverseGlyphOrderDict_ + + def getGlyphNames(self): + return sorted(self.getGlyphOrder()) + + +class TestXMLReader_(object): + def __init__(self): + from xml.parsers.expat import ParserCreate + self.parser = ParserCreate() + self.parser.StartElementHandler = self.startElement_ + self.parser.EndElementHandler = self.endElement_ + self.parser.CharacterDataHandler = self.addCharacterData_ + self.root = None + self.stack = [] + + def startElement_(self, name, attrs): + element = (name, attrs, []) + if self.stack: + self.stack[-1][2].append(element) + else: + self.root = element + self.stack.append(element) + + def endElement_(self, name): + self.stack.pop() + + def addCharacterData_(self, data): + self.stack[-1][2].append(data) + + +def makeXMLWriter(newlinestr='\n'): + # don't write OS-specific new lines + writer = XMLWriter(BytesIO(), newlinestr=newlinestr) + # erase XML declaration + writer.file.seek(0) + writer.file.truncate() + return writer + + +def getXML(func, ttFont=None): + """Call the passed toXML function and return the written content as a + list of lines (unicode strings). + Result is stripped of XML declaration and OS-specific newline characters. + """ + writer = makeXMLWriter() + func(writer, ttFont) + xml = writer.file.getvalue().decode("utf-8") + # toXML methods must always end with a writer.newline() + assert xml.endswith("\n") + return xml.splitlines() + + +class MockFont(object): + """A font-like object that automatically adds any looked up glyphname + to its glyphOrder.""" + + def __init__(self): + self._glyphOrder = ['.notdef'] + + class AllocatingDict(dict): + def __missing__(reverseDict, key): + self._glyphOrder.append(key) + gid = len(reverseDict) + reverseDict[key] = gid + return gid + self._reverseGlyphOrder = AllocatingDict({'.notdef': 0}) + self.lazy = False + + def getGlyphID(self, glyph): + gid = self._reverseGlyphOrder[glyph] + return gid + + def getReverseGlyphMap(self): + return self._reverseGlyphOrder + + def getGlyphName(self, gid): + return self._glyphOrder[gid] + + def getGlyphOrder(self): + return self._glyphOrder + + +class TestCase(_TestCase): + + def __init__(self, methodName): + _TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + +class DataFilesHandler(TestCase): + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + def getpath(self, testfile): + folder = os.path.dirname(sys.modules[self.__module__].__file__) + return os.path.join(folder, "data", testfile) + + def temp_dir(self): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + + def temp_font(self, font_path, file_name): + self.temp_dir() + temppath = os.path.join(self.tempdir, file_name) + shutil.copy2(font_path, temppath) + return temppath diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/textTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/textTools.py new file mode 100644 index 00000000..bf75bcbd --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/textTools.py @@ -0,0 +1,154 @@ +"""fontTools.misc.textTools.py -- miscellaneous routines.""" + + +import ast +import string + + +# alias kept for backward compatibility +safeEval = ast.literal_eval + + +class Tag(str): + @staticmethod + def transcode(blob): + if isinstance(blob, bytes): + blob = blob.decode("latin-1") + return blob + + def __new__(self, content): + return str.__new__(self, self.transcode(content)) + + def __ne__(self, other): + return not self.__eq__(other) + + def __eq__(self, other): + return str.__eq__(self, self.transcode(other)) + + def __hash__(self): + return str.__hash__(self) + + def tobytes(self): + return self.encode("latin-1") + + +def readHex(content): + """Convert a list of hex strings to binary data.""" + return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str))) + + +def deHexStr(hexdata): + """Convert a hex string to binary data.""" + hexdata = strjoin(hexdata.split()) + if len(hexdata) % 2: + hexdata = hexdata + "0" + data = [] + for i in range(0, len(hexdata), 2): + data.append(bytechr(int(hexdata[i:i+2], 16))) + return bytesjoin(data) + + +def hexStr(data): + """Convert binary data to a hex string.""" + h = string.hexdigits + r = '' + for c in data: + i = byteord(c) + r = r + h[(i >> 4) & 0xF] + h[i & 0xF] + return r + + +def num2binary(l, bits=32): + items = [] + binary = "" + for i in range(bits): + if l & 0x1: + binary = "1" + binary + else: + binary = "0" + binary + l = l >> 1 + if not ((i+1) % 8): + items.append(binary) + binary = "" + if binary: + items.append(binary) + items.reverse() + assert l in (0, -1), "number doesn't fit in number of bits" + return ' '.join(items) + + +def binary2num(bin): + bin = strjoin(bin.split()) + l = 0 + for digit in bin: + l = l << 1 + if digit != "0": + l = l | 0x1 + return l + + +def caselessSort(alist): + """Return a sorted copy of a list. If there are only strings + in the list, it will not consider case. + """ + + try: + return sorted(alist, key=lambda a: (a.lower(), a)) + except TypeError: + return sorted(alist) + + +def pad(data, size): + r""" Pad byte string 'data' with null bytes until its length is a + multiple of 'size'. + + >>> len(pad(b'abcd', 4)) + 4 + >>> len(pad(b'abcde', 2)) + 6 + >>> len(pad(b'abcde', 4)) + 8 + >>> pad(b'abcdef', 4) == b'abcdef\x00\x00' + True + """ + data = tobytes(data) + if size > 1: + remainder = len(data) % size + if remainder: + data += b"\0" * (size - remainder) + return data + + +def tostr(s, encoding="ascii", errors="strict"): + if not isinstance(s, str): + return s.decode(encoding, errors) + else: + return s + + +def tobytes(s, encoding="ascii", errors="strict"): + if isinstance(s, str): + return s.encode(encoding, errors) + else: + return bytes(s) + + +def bytechr(n): + return bytes([n]) + + +def byteord(c): + return c if isinstance(c, int) else ord(c) + + +def strjoin(iterable, joiner=""): + return tostr(joiner).join(iterable) + + +def bytesjoin(iterable, joiner=b""): + return tobytes(joiner).join(tobytes(item) for item in iterable) + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/timeTools.py b/.venv/lib/python3.9/site-packages/fontTools/misc/timeTools.py new file mode 100644 index 00000000..f4b84f6e --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/timeTools.py @@ -0,0 +1,68 @@ +"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps. +""" + +import os +import time +from datetime import datetime, timezone +import calendar + + +epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0)) + +DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] +MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] + + +def asctime(t=None): + """ + Convert a tuple or struct_time representing a time as returned by gmtime() + or localtime() to a 24-character string of the following form: + + >>> asctime(time.gmtime(0)) + 'Thu Jan 1 00:00:00 1970' + + If t is not provided, the current time as returned by localtime() is used. + Locale information is not used by asctime(). + + This is meant to normalise the output of the built-in time.asctime() across + different platforms and Python versions. + In Python 3.x, the day of the month is right-justified, whereas on Windows + Python 2.7 it is padded with zeros. + + See https://github.com/fonttools/fonttools/issues/455 + """ + if t is None: + t = time.localtime() + s = "%s %s %2s %s" % ( + DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday, + time.strftime("%H:%M:%S %Y", t)) + return s + + +def timestampToString(value): + return asctime(time.gmtime(max(0, value + epoch_diff))) + +def timestampFromString(value): + wkday, mnth = value[:7].split() + t = datetime.strptime(value[7:], ' %d %H:%M:%S %Y') + t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc) + wkday_idx = DAYNAMES.index(wkday) + assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday' + return int(t.timestamp()) - epoch_diff + +def timestampNow(): + # https://reproducible-builds.org/specs/source-date-epoch/ + source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH") + if source_date_epoch is not None: + return int(source_date_epoch) - epoch_diff + return int(time.time() - epoch_diff) + +def timestampSinceEpoch(value): + return int(value - epoch_diff) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/transform.py b/.venv/lib/python3.9/site-packages/fontTools/misc/transform.py new file mode 100644 index 00000000..22ad776a --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/transform.py @@ -0,0 +1,378 @@ +"""Affine 2D transformation matrix class. + +The Transform class implements various transformation matrix operations, +both on the matrix itself, as well as on 2D coordinates. + +Transform instances are effectively immutable: all methods that operate on the +transformation itself always return a new instance. This has as the +interesting side effect that Transform instances are hashable, ie. they can be +used as dictionary keys. + +This module exports the following symbols: + + Transform -- this is the main class + Identity -- Transform instance set to the identity transformation + Offset -- Convenience function that returns a translating transformation + Scale -- Convenience function that returns a scaling transformation + +Examples: + + >>> t = Transform(2, 0, 0, 3, 0, 0) + >>> t.transformPoint((100, 100)) + (200, 300) + >>> t = Scale(2, 3) + >>> t.transformPoint((100, 100)) + (200, 300) + >>> t.transformPoint((0, 0)) + (0, 0) + >>> t = Offset(2, 3) + >>> t.transformPoint((100, 100)) + (102, 103) + >>> t.transformPoint((0, 0)) + (2, 3) + >>> t2 = t.scale(0.5) + >>> t2.transformPoint((100, 100)) + (52.0, 53.0) + >>> import math + >>> t3 = t2.rotate(math.pi / 2) + >>> t3.transformPoint((0, 0)) + (2.0, 3.0) + >>> t3.transformPoint((100, 100)) + (-48.0, 53.0) + >>> t = Identity.scale(0.5).translate(100, 200).skew(0.1, 0.2) + >>> t.transformPoints([(0, 0), (1, 1), (100, 100)]) + [(50.0, 100.0), (50.550167336042726, 100.60135501775433), (105.01673360427253, 160.13550177543362)] + >>> +""" + +from typing import NamedTuple + + +__all__ = ["Transform", "Identity", "Offset", "Scale"] + + +_EPSILON = 1e-15 +_ONE_EPSILON = 1 - _EPSILON +_MINUS_ONE_EPSILON = -1 + _EPSILON + + +def _normSinCos(v): + if abs(v) < _EPSILON: + v = 0 + elif v > _ONE_EPSILON: + v = 1 + elif v < _MINUS_ONE_EPSILON: + v = -1 + return v + + +class Transform(NamedTuple): + + """2x2 transformation matrix plus offset, a.k.a. Affine transform. + Transform instances are immutable: all transforming methods, eg. + rotate(), return a new Transform instance. + + Examples: + >>> t = Transform() + >>> t + + >>> t.scale(2) + + >>> t.scale(2.5, 5.5) + + >>> + >>> t.scale(2, 3).transformPoint((100, 100)) + (200, 300) + + Transform's constructor takes six arguments, all of which are + optional, and can be used as keyword arguments: + >>> Transform(12) + + >>> Transform(dx=12) + + >>> Transform(yx=12) + + + Transform instances also behave like sequences of length 6: + >>> len(Identity) + 6 + >>> list(Identity) + [1, 0, 0, 1, 0, 0] + >>> tuple(Identity) + (1, 0, 0, 1, 0, 0) + + Transform instances are comparable: + >>> t1 = Identity.scale(2, 3).translate(4, 6) + >>> t2 = Identity.translate(8, 18).scale(2, 3) + >>> t1 == t2 + 1 + + But beware of floating point rounding errors: + >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) + >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) + >>> t1 + + >>> t2 + + >>> t1 == t2 + 0 + + Transform instances are hashable, meaning you can use them as + keys in dictionaries: + >>> d = {Scale(12, 13): None} + >>> d + {: None} + + But again, beware of floating point rounding errors: + >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) + >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) + >>> t1 + + >>> t2 + + >>> d = {t1: None} + >>> d + {: None} + >>> d[t2] + Traceback (most recent call last): + File "", line 1, in ? + KeyError: + """ + + xx: float = 1 + xy: float = 0 + yx: float = 0 + yy: float = 1 + dx: float = 0 + dy: float = 0 + + def transformPoint(self, p): + """Transform a point. + + Example: + >>> t = Transform() + >>> t = t.scale(2.5, 5.5) + >>> t.transformPoint((100, 100)) + (250.0, 550.0) + """ + (x, y) = p + xx, xy, yx, yy, dx, dy = self + return (xx*x + yx*y + dx, xy*x + yy*y + dy) + + def transformPoints(self, points): + """Transform a list of points. + + Example: + >>> t = Scale(2, 3) + >>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)]) + [(0, 0), (0, 300), (200, 300), (200, 0)] + >>> + """ + xx, xy, yx, yy, dx, dy = self + return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points] + + def transformVector(self, v): + """Transform an (dx, dy) vector, treating translation as zero. + + Example: + >>> t = Transform(2, 0, 0, 2, 10, 20) + >>> t.transformVector((3, -4)) + (6, -8) + >>> + """ + (dx, dy) = v + xx, xy, yx, yy = self[:4] + return (xx*dx + yx*dy, xy*dx + yy*dy) + + def transformVectors(self, vectors): + """Transform a list of (dx, dy) vector, treating translation as zero. + + Example: + >>> t = Transform(2, 0, 0, 2, 10, 20) + >>> t.transformVectors([(3, -4), (5, -6)]) + [(6, -8), (10, -12)] + >>> + """ + xx, xy, yx, yy = self[:4] + return [(xx*dx + yx*dy, xy*dx + yy*dy) for dx, dy in vectors] + + def translate(self, x=0, y=0): + """Return a new transformation, translated (offset) by x, y. + + Example: + >>> t = Transform() + >>> t.translate(20, 30) + + >>> + """ + return self.transform((1, 0, 0, 1, x, y)) + + def scale(self, x=1, y=None): + """Return a new transformation, scaled by x, y. The 'y' argument + may be None, which implies to use the x value for y as well. + + Example: + >>> t = Transform() + >>> t.scale(5) + + >>> t.scale(5, 6) + + >>> + """ + if y is None: + y = x + return self.transform((x, 0, 0, y, 0, 0)) + + def rotate(self, angle): + """Return a new transformation, rotated by 'angle' (radians). + + Example: + >>> import math + >>> t = Transform() + >>> t.rotate(math.pi / 2) + + >>> + """ + import math + c = _normSinCos(math.cos(angle)) + s = _normSinCos(math.sin(angle)) + return self.transform((c, s, -s, c, 0, 0)) + + def skew(self, x=0, y=0): + """Return a new transformation, skewed by x and y. + + Example: + >>> import math + >>> t = Transform() + >>> t.skew(math.pi / 4) + + >>> + """ + import math + return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0)) + + def transform(self, other): + """Return a new transformation, transformed by another + transformation. + + Example: + >>> t = Transform(2, 0, 0, 3, 1, 6) + >>> t.transform((4, 3, 2, 1, 5, 6)) + + >>> + """ + xx1, xy1, yx1, yy1, dx1, dy1 = other + xx2, xy2, yx2, yy2, dx2, dy2 = self + return self.__class__( + xx1*xx2 + xy1*yx2, + xx1*xy2 + xy1*yy2, + yx1*xx2 + yy1*yx2, + yx1*xy2 + yy1*yy2, + xx2*dx1 + yx2*dy1 + dx2, + xy2*dx1 + yy2*dy1 + dy2) + + def reverseTransform(self, other): + """Return a new transformation, which is the other transformation + transformed by self. self.reverseTransform(other) is equivalent to + other.transform(self). + + Example: + >>> t = Transform(2, 0, 0, 3, 1, 6) + >>> t.reverseTransform((4, 3, 2, 1, 5, 6)) + + >>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6)) + + >>> + """ + xx1, xy1, yx1, yy1, dx1, dy1 = self + xx2, xy2, yx2, yy2, dx2, dy2 = other + return self.__class__( + xx1*xx2 + xy1*yx2, + xx1*xy2 + xy1*yy2, + yx1*xx2 + yy1*yx2, + yx1*xy2 + yy1*yy2, + xx2*dx1 + yx2*dy1 + dx2, + xy2*dx1 + yy2*dy1 + dy2) + + def inverse(self): + """Return the inverse transformation. + + Example: + >>> t = Identity.translate(2, 3).scale(4, 5) + >>> t.transformPoint((10, 20)) + (42, 103) + >>> it = t.inverse() + >>> it.transformPoint((42, 103)) + (10.0, 20.0) + >>> + """ + if self == Identity: + return self + xx, xy, yx, yy, dx, dy = self + det = xx*yy - yx*xy + xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det + dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy + return self.__class__(xx, xy, yx, yy, dx, dy) + + def toPS(self): + """Return a PostScript representation: + >>> t = Identity.scale(2, 3).translate(4, 5) + >>> t.toPS() + '[2 0 0 3 8 15]' + >>> + """ + return "[%s %s %s %s %s %s]" % self + + def __bool__(self): + """Returns True if transform is not identity, False otherwise. + >>> bool(Identity) + False + >>> bool(Transform()) + False + >>> bool(Scale(1.)) + False + >>> bool(Scale(2)) + True + >>> bool(Offset()) + False + >>> bool(Offset(0)) + False + >>> bool(Offset(2)) + True + """ + return self != Identity + + def __repr__(self): + return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self) + + +Identity = Transform() + +def Offset(x=0, y=0): + """Return the identity transformation offset by x, y. + + Example: + >>> Offset(2, 3) + + >>> + """ + return Transform(1, 0, 0, 1, x, y) + +def Scale(x, y=None): + """Return the identity transformation scaled by x, y. The 'y' argument + may be None, which implies to use the x value for y as well. + + Example: + >>> Scale(2, 3) + + >>> + """ + if y is None: + y = x + return Transform(x, 0, 0, y, 0, 0) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/vector.py b/.venv/lib/python3.9/site-packages/fontTools/misc/vector.py new file mode 100644 index 00000000..81c14841 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/vector.py @@ -0,0 +1,143 @@ +from numbers import Number +import math +import operator +import warnings + + +__all__ = ["Vector"] + + +class Vector(tuple): + + """A math-like vector. + + Represents an n-dimensional numeric vector. ``Vector`` objects support + vector addition and subtraction, scalar multiplication and division, + negation, rounding, and comparison tests. + """ + + __slots__ = () + + def __new__(cls, values, keep=False): + if keep is not False: + warnings.warn( + "the 'keep' argument has been deprecated", + DeprecationWarning, + ) + if type(values) == Vector: + # No need to create a new object + return values + return super().__new__(cls, values) + + def __repr__(self): + return f"{self.__class__.__name__}({super().__repr__()})" + + def _vectorOp(self, other, op): + if isinstance(other, Vector): + assert len(self) == len(other) + return self.__class__(op(a, b) for a, b in zip(self, other)) + if isinstance(other, Number): + return self.__class__(op(v, other) for v in self) + raise NotImplementedError() + + def _scalarOp(self, other, op): + if isinstance(other, Number): + return self.__class__(op(v, other) for v in self) + raise NotImplementedError() + + def _unaryOp(self, op): + return self.__class__(op(v) for v in self) + + def __add__(self, other): + return self._vectorOp(other, operator.add) + + __radd__ = __add__ + + def __sub__(self, other): + return self._vectorOp(other, operator.sub) + + def __rsub__(self, other): + return self._vectorOp(other, _operator_rsub) + + def __mul__(self, other): + return self._scalarOp(other, operator.mul) + + __rmul__ = __mul__ + + def __truediv__(self, other): + return self._scalarOp(other, operator.truediv) + + def __rtruediv__(self, other): + return self._scalarOp(other, _operator_rtruediv) + + def __pos__(self): + return self._unaryOp(operator.pos) + + def __neg__(self): + return self._unaryOp(operator.neg) + + def __round__(self, *, round=round): + return self._unaryOp(round) + + def __eq__(self, other): + if isinstance(other, list): + # bw compat Vector([1, 2, 3]) == [1, 2, 3] + other = tuple(other) + return super().__eq__(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __bool__(self): + return any(self) + + __nonzero__ = __bool__ + + def __abs__(self): + return math.sqrt(sum(x * x for x in self)) + + def length(self): + """Return the length of the vector. Equivalent to abs(vector).""" + return abs(self) + + def normalized(self): + """Return the normalized vector of the vector.""" + return self / abs(self) + + def dot(self, other): + """Performs vector dot product, returning the sum of + ``a[0] * b[0], a[1] * b[1], ...``""" + assert len(self) == len(other) + return sum(a * b for a, b in zip(self, other)) + + # Deprecated methods/properties + + def toInt(self): + warnings.warn( + "the 'toInt' method has been deprecated, use round(vector) instead", + DeprecationWarning, + ) + return self.__round__() + + @property + def values(self): + warnings.warn( + "the 'values' attribute has been deprecated, use " + "the vector object itself instead", + DeprecationWarning, + ) + return list(self) + + @values.setter + def values(self, values): + raise AttributeError( + "can't set attribute, the 'values' attribute has been deprecated", + ) + + +def _operator_rsub(a, b): + return operator.sub(b, a) + + +def _operator_rtruediv(a, b): + return operator.truediv(b, a) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/xmlReader.py b/.venv/lib/python3.9/site-packages/fontTools/misc/xmlReader.py new file mode 100644 index 00000000..6ec50de4 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/xmlReader.py @@ -0,0 +1,171 @@ +from fontTools import ttLib +from fontTools.misc.textTools import safeEval +from fontTools.ttLib.tables.DefaultTable import DefaultTable +import sys +import os +import logging + + +log = logging.getLogger(__name__) + +class TTXParseError(Exception): pass + +BUFSIZE = 0x4000 + + +class XMLReader(object): + + def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False): + if fileOrPath == '-': + fileOrPath = sys.stdin + if not hasattr(fileOrPath, "read"): + self.file = open(fileOrPath, "rb") + self._closeStream = True + else: + # assume readable file object + self.file = fileOrPath + self._closeStream = False + self.ttFont = ttFont + self.progress = progress + if quiet is not None: + from fontTools.misc.loggingTools import deprecateArgument + deprecateArgument("quiet", "configure logging instead") + self.quiet = quiet + self.root = None + self.contentStack = [] + self.contentOnly = contentOnly + self.stackSize = 0 + + def read(self, rootless=False): + if rootless: + self.stackSize += 1 + if self.progress: + self.file.seek(0, 2) + fileSize = self.file.tell() + self.progress.set(0, fileSize // 100 or 1) + self.file.seek(0) + self._parseFile(self.file) + if self._closeStream: + self.close() + if rootless: + self.stackSize -= 1 + + def close(self): + self.file.close() + + def _parseFile(self, file): + from xml.parsers.expat import ParserCreate + parser = ParserCreate() + parser.StartElementHandler = self._startElementHandler + parser.EndElementHandler = self._endElementHandler + parser.CharacterDataHandler = self._characterDataHandler + + pos = 0 + while True: + chunk = file.read(BUFSIZE) + if not chunk: + parser.Parse(chunk, 1) + break + pos = pos + len(chunk) + if self.progress: + self.progress.set(pos // 100) + parser.Parse(chunk, 0) + + def _startElementHandler(self, name, attrs): + if self.stackSize == 1 and self.contentOnly: + # We already know the table we're parsing, skip + # parsing the table tag and continue to + # stack '2' which begins parsing content + self.contentStack.append([]) + self.stackSize = 2 + return + stackSize = self.stackSize + self.stackSize = stackSize + 1 + subFile = attrs.get("src") + if subFile is not None: + if hasattr(self.file, 'name'): + # if file has a name, get its parent directory + dirname = os.path.dirname(self.file.name) + else: + # else fall back to using the current working directory + dirname = os.getcwd() + subFile = os.path.join(dirname, subFile) + if not stackSize: + if name != "ttFont": + raise TTXParseError("illegal root tag: %s" % name) + if self.ttFont.reader is None and not self.ttFont.tables: + sfntVersion = attrs.get("sfntVersion") + if sfntVersion is not None: + if len(sfntVersion) != 4: + sfntVersion = safeEval('"' + sfntVersion + '"') + self.ttFont.sfntVersion = sfntVersion + self.contentStack.append([]) + elif stackSize == 1: + if subFile is not None: + subReader = XMLReader(subFile, self.ttFont, self.progress) + subReader.read() + self.contentStack.append([]) + return + tag = ttLib.xmlToTag(name) + msg = "Parsing '%s' table..." % tag + if self.progress: + self.progress.setLabel(msg) + log.info(msg) + if tag == "GlyphOrder": + tableClass = ttLib.GlyphOrder + elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])): + tableClass = DefaultTable + else: + tableClass = ttLib.getTableClass(tag) + if tableClass is None: + tableClass = DefaultTable + if tag == 'loca' and tag in self.ttFont: + # Special-case the 'loca' table as we need the + # original if the 'glyf' table isn't recompiled. + self.currentTable = self.ttFont[tag] + else: + self.currentTable = tableClass(tag) + self.ttFont[tag] = self.currentTable + self.contentStack.append([]) + elif stackSize == 2 and subFile is not None: + subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True) + subReader.read() + self.contentStack.append([]) + self.root = subReader.root + elif stackSize == 2: + self.contentStack.append([]) + self.root = (name, attrs, self.contentStack[-1]) + else: + l = [] + self.contentStack[-1].append((name, attrs, l)) + self.contentStack.append(l) + + def _characterDataHandler(self, data): + if self.stackSize > 1: + self.contentStack[-1].append(data) + + def _endElementHandler(self, name): + self.stackSize = self.stackSize - 1 + del self.contentStack[-1] + if not self.contentOnly: + if self.stackSize == 1: + self.root = None + elif self.stackSize == 2: + name, attrs, content = self.root + self.currentTable.fromXML(name, attrs, content, self.ttFont) + self.root = None + + +class ProgressPrinter(object): + + def __init__(self, title, maxval=100): + print(title) + + def set(self, val, maxval=None): + pass + + def increment(self, val=1): + pass + + def setLabel(self, text): + print(text) diff --git a/.venv/lib/python3.9/site-packages/fontTools/misc/xmlWriter.py b/.venv/lib/python3.9/site-packages/fontTools/misc/xmlWriter.py new file mode 100644 index 00000000..9e30fa33 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/misc/xmlWriter.py @@ -0,0 +1,194 @@ +"""xmlWriter.py -- Simple XML authoring class""" + +from fontTools.misc.textTools import byteord, strjoin, tobytes, tostr +import sys +import os +import string + +INDENT = " " + + +class XMLWriter(object): + + def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8", + newlinestr="\n"): + if encoding.lower().replace('-','').replace('_','') != 'utf8': + raise Exception('Only UTF-8 encoding is supported.') + if fileOrPath == '-': + fileOrPath = sys.stdout + if not hasattr(fileOrPath, "write"): + self.filename = fileOrPath + self.file = open(fileOrPath, "wb") + self._closeStream = True + else: + self.filename = None + # assume writable file object + self.file = fileOrPath + self._closeStream = False + + # Figure out if writer expects bytes or unicodes + try: + # The bytes check should be first. See: + # https://github.com/fonttools/fonttools/pull/233 + self.file.write(b'') + self.totype = tobytes + except TypeError: + # This better not fail. + self.file.write('') + self.totype = tostr + self.indentwhite = self.totype(indentwhite) + if newlinestr is None: + self.newlinestr = self.totype(os.linesep) + else: + self.newlinestr = self.totype(newlinestr) + self.indentlevel = 0 + self.stack = [] + self.needindent = 1 + self.idlefunc = idlefunc + self.idlecounter = 0 + self._writeraw('') + self.newline() + + def __enter__(self): + return self + + def __exit__(self, exception_type, exception_value, traceback): + self.close() + + def close(self): + if self._closeStream: + self.file.close() + + def write(self, string, indent=True): + """Writes text.""" + self._writeraw(escape(string), indent=indent) + + def writecdata(self, string): + """Writes text in a CDATA section.""" + self._writeraw("") + + def write8bit(self, data, strip=False): + """Writes a bytes() sequence into the XML, escaping + non-ASCII bytes. When this is read in xmlReader, + the original bytes can be recovered by encoding to + 'latin-1'.""" + self._writeraw(escape8bit(data), strip=strip) + + def write_noindent(self, string): + """Writes text without indentation.""" + self._writeraw(escape(string), indent=False) + + def _writeraw(self, data, indent=True, strip=False): + """Writes bytes, possibly indented.""" + if indent and self.needindent: + self.file.write(self.indentlevel * self.indentwhite) + self.needindent = 0 + s = self.totype(data, encoding="utf_8") + if (strip): + s = s.strip() + self.file.write(s) + + def newline(self): + self.file.write(self.newlinestr) + self.needindent = 1 + idlecounter = self.idlecounter + if not idlecounter % 100 and self.idlefunc is not None: + self.idlefunc() + self.idlecounter = idlecounter + 1 + + def comment(self, data): + data = escape(data) + lines = data.split("\n") + self._writeraw("") + + def simpletag(self, _TAG_, *args, **kwargs): + attrdata = self.stringifyattrs(*args, **kwargs) + data = "<%s%s/>" % (_TAG_, attrdata) + self._writeraw(data) + + def begintag(self, _TAG_, *args, **kwargs): + attrdata = self.stringifyattrs(*args, **kwargs) + data = "<%s%s>" % (_TAG_, attrdata) + self._writeraw(data) + self.stack.append(_TAG_) + self.indent() + + def endtag(self, _TAG_): + assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag" + del self.stack[-1] + self.dedent() + data = "" % _TAG_ + self._writeraw(data) + + def dumphex(self, data): + linelength = 16 + hexlinelength = linelength * 2 + chunksize = 8 + for i in range(0, len(data), linelength): + hexline = hexStr(data[i:i+linelength]) + line = "" + white = "" + for j in range(0, hexlinelength, chunksize): + line = line + white + hexline[j:j+chunksize] + white = " " + self._writeraw(line) + self.newline() + + def indent(self): + self.indentlevel = self.indentlevel + 1 + + def dedent(self): + assert self.indentlevel > 0 + self.indentlevel = self.indentlevel - 1 + + def stringifyattrs(self, *args, **kwargs): + if kwargs: + assert not args + attributes = sorted(kwargs.items()) + elif args: + assert len(args) == 1 + attributes = args[0] + else: + return "" + data = "" + for attr, value in attributes: + if not isinstance(value, (bytes, str)): + value = str(value) + data = data + ' %s="%s"' % (attr, escapeattr(value)) + return data + + +def escape(data): + data = tostr(data, 'utf_8') + data = data.replace("&", "&") + data = data.replace("<", "<") + data = data.replace(">", ">") + data = data.replace("\r", " ") + return data + +def escapeattr(data): + data = escape(data) + data = data.replace('"', """) + return data + +def escape8bit(data): + """Input is Unicode string.""" + def escapechar(c): + n = ord(c) + if 32 <= n <= 127 and c not in "<&>": + return c + else: + return "&#" + repr(n) + ";" + return strjoin(map(escapechar, data.decode('latin-1'))) + +def hexStr(s): + h = string.hexdigits + r = '' + for c in s: + i = byteord(c) + r = r + h[(i >> 4) & 0xF] + h[i & 0xF] + return r diff --git a/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__init__.py new file mode 100644 index 00000000..667a216d --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__init__.py @@ -0,0 +1,1230 @@ +#!/usr/bin/python + +# FontDame-to-FontTools for OpenType Layout tables +# +# Source language spec is available at: +# http://monotype.github.io/OpenType_Table_Source/otl_source.html +# https://github.com/Monotype/OpenType_Table_Source/ + +from fontTools import ttLib +from fontTools.ttLib.tables._c_m_a_p import cmap_classes +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict +from fontTools.otlLib import builder as otl +from contextlib import contextmanager +from operator import setitem +import logging + +class MtiLibError(Exception): pass +class ReferenceNotFoundError(MtiLibError): pass +class FeatureNotFoundError(ReferenceNotFoundError): pass +class LookupNotFoundError(ReferenceNotFoundError): pass + + +log = logging.getLogger("fontTools.mtiLib") + + +def makeGlyph(s): + if s[:2] in ['U ', 'u ']: + return ttLib.TTFont._makeGlyphName(int(s[2:], 16)) + elif s[:2] == '# ': + return "glyph%.5d" % int(s[2:]) + assert s.find(' ') < 0, "Space found in glyph name: %s" % s + assert s, "Glyph name is empty" + return s + +def makeGlyphs(l): + return [makeGlyph(g) for g in l] + +def mapLookup(sym, mapping): + # Lookups are addressed by name. So resolved them using a map if available. + # Fallback to parsing as lookup index if a map isn't provided. + if mapping is not None: + try: + idx = mapping[sym] + except KeyError: + raise LookupNotFoundError(sym) + else: + idx = int(sym) + return idx + +def mapFeature(sym, mapping): + # Features are referenced by index according the spec. So, if symbol is an + # integer, use it directly. Otherwise look up in the map if provided. + try: + idx = int(sym) + except ValueError: + try: + idx = mapping[sym] + except KeyError: + raise FeatureNotFoundError(sym) + return idx + +def setReference(mapper, mapping, sym, setter, collection, key): + try: + mapped = mapper(sym, mapping) + except ReferenceNotFoundError as e: + try: + if mapping is not None: + mapping.addDeferredMapping(lambda ref: setter(collection, key, ref), sym, e) + return + except AttributeError: + pass + raise + setter(collection, key, mapped) + +class DeferredMapping(dict): + + def __init__(self): + self._deferredMappings = [] + + def addDeferredMapping(self, setter, sym, e): + log.debug("Adding deferred mapping for symbol '%s' %s", sym, type(e).__name__) + self._deferredMappings.append((setter,sym, e)) + + def applyDeferredMappings(self): + for setter,sym,e in self._deferredMappings: + log.debug("Applying deferred mapping for symbol '%s' %s", sym, type(e).__name__) + try: + mapped = self[sym] + except KeyError: + raise e + setter(mapped) + log.debug("Set to %s", mapped) + self._deferredMappings = [] + + +def parseScriptList(lines, featureMap=None): + self = ot.ScriptList() + records = [] + with lines.between('script table'): + for line in lines: + while len(line) < 4: + line.append('') + scriptTag, langSysTag, defaultFeature, features = line + log.debug("Adding script %s language-system %s", scriptTag, langSysTag) + + langSys = ot.LangSys() + langSys.LookupOrder = None + if defaultFeature: + setReference(mapFeature, featureMap, defaultFeature, setattr, langSys, 'ReqFeatureIndex') + else: + langSys.ReqFeatureIndex = 0xFFFF + syms = stripSplitComma(features) + langSys.FeatureIndex = theList = [3] * len(syms) + for i,sym in enumerate(syms): + setReference(mapFeature, featureMap, sym, setitem, theList, i) + langSys.FeatureCount = len(langSys.FeatureIndex) + + script = [s for s in records if s.ScriptTag == scriptTag] + if script: + script = script[0].Script + else: + scriptRec = ot.ScriptRecord() + scriptRec.ScriptTag = scriptTag + scriptRec.Script = ot.Script() + records.append(scriptRec) + script = scriptRec.Script + script.DefaultLangSys = None + script.LangSysRecord = [] + script.LangSysCount = 0 + + if langSysTag == 'default': + script.DefaultLangSys = langSys + else: + langSysRec = ot.LangSysRecord() + langSysRec.LangSysTag = langSysTag + ' '*(4 - len(langSysTag)) + langSysRec.LangSys = langSys + script.LangSysRecord.append(langSysRec) + script.LangSysCount = len(script.LangSysRecord) + + for script in records: + script.Script.LangSysRecord = sorted(script.Script.LangSysRecord, key=lambda rec: rec.LangSysTag) + self.ScriptRecord = sorted(records, key=lambda rec: rec.ScriptTag) + self.ScriptCount = len(self.ScriptRecord) + return self + +def parseFeatureList(lines, lookupMap=None, featureMap=None): + self = ot.FeatureList() + self.FeatureRecord = [] + with lines.between('feature table'): + for line in lines: + name, featureTag, lookups = line + if featureMap is not None: + assert name not in featureMap, "Duplicate feature name: %s" % name + featureMap[name] = len(self.FeatureRecord) + # If feature name is integer, make sure it matches its index. + try: + assert int(name) == len(self.FeatureRecord), "%d %d" % (name, len(self.FeatureRecord)) + except ValueError: + pass + featureRec = ot.FeatureRecord() + featureRec.FeatureTag = featureTag + featureRec.Feature = ot.Feature() + self.FeatureRecord.append(featureRec) + feature = featureRec.Feature + feature.FeatureParams = None + syms = stripSplitComma(lookups) + feature.LookupListIndex = theList = [None] * len(syms) + for i,sym in enumerate(syms): + setReference(mapLookup, lookupMap, sym, setitem, theList, i) + feature.LookupCount = len(feature.LookupListIndex) + + self.FeatureCount = len(self.FeatureRecord) + return self + +def parseLookupFlags(lines): + flags = 0 + filterset = None + allFlags = [ + 'righttoleft', + 'ignorebaseglyphs', + 'ignoreligatures', + 'ignoremarks', + 'markattachmenttype', + 'markfiltertype', + ] + while lines.peeks()[0].lower() in allFlags: + line = next(lines) + flag = { + 'righttoleft': 0x0001, + 'ignorebaseglyphs': 0x0002, + 'ignoreligatures': 0x0004, + 'ignoremarks': 0x0008, + }.get(line[0].lower()) + if flag: + assert line[1].lower() in ['yes', 'no'], line[1] + if line[1].lower() == 'yes': + flags |= flag + continue + if line[0].lower() == 'markattachmenttype': + flags |= int(line[1]) << 8 + continue + if line[0].lower() == 'markfiltertype': + flags |= 0x10 + filterset = int(line[1]) + return flags, filterset + +def parseSingleSubst(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + assert len(line) == 2, line + line = makeGlyphs(line) + mapping[line[0]] = line[1] + return otl.buildSingleSubstSubtable(mapping) + +def parseMultiple(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + line = makeGlyphs(line) + mapping[line[0]] = line[1:] + return otl.buildMultipleSubstSubtable(mapping) + +def parseAlternate(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + line = makeGlyphs(line) + mapping[line[0]] = line[1:] + return otl.buildAlternateSubstSubtable(mapping) + +def parseLigature(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + assert len(line) >= 2, line + line = makeGlyphs(line) + mapping[tuple(line[1:])] = line[0] + return otl.buildLigatureSubstSubtable(mapping) + +def parseSinglePos(lines, font, _lookupMap=None): + values = {} + for line in lines: + assert len(line) == 3, line + w = line[0].title().replace(' ', '') + assert w in valueRecordFormatDict + g = makeGlyph(line[1]) + v = int(line[2]) + if g not in values: + values[g] = ValueRecord() + assert not hasattr(values[g], w), (g, w) + setattr(values[g], w, v) + return otl.buildSinglePosSubtable(values, font.getReverseGlyphMap()) + +def parsePair(lines, font, _lookupMap=None): + self = ot.PairPos() + self.ValueFormat1 = self.ValueFormat2 = 0 + typ = lines.peeks()[0].split()[0].lower() + if typ in ('left', 'right'): + self.Format = 1 + values = {} + for line in lines: + assert len(line) == 4, line + side = line[0].split()[0].lower() + assert side in ('left', 'right'), side + what = line[0][len(side):].title().replace(' ', '') + mask = valueRecordFormatDict[what][0] + glyph1, glyph2 = makeGlyphs(line[1:3]) + value = int(line[3]) + if not glyph1 in values: values[glyph1] = {} + if not glyph2 in values[glyph1]: values[glyph1][glyph2] = (ValueRecord(),ValueRecord()) + rec2 = values[glyph1][glyph2] + if side == 'left': + self.ValueFormat1 |= mask + vr = rec2[0] + else: + self.ValueFormat2 |= mask + vr = rec2[1] + assert not hasattr(vr, what), (vr, what) + setattr(vr, what, value) + self.Coverage = makeCoverage(set(values.keys()), font) + self.PairSet = [] + for glyph1 in self.Coverage.glyphs: + values1 = values[glyph1] + pairset = ot.PairSet() + records = pairset.PairValueRecord = [] + for glyph2 in sorted(values1.keys(), key=font.getGlyphID): + values2 = values1[glyph2] + pair = ot.PairValueRecord() + pair.SecondGlyph = glyph2 + pair.Value1 = values2[0] + pair.Value2 = values2[1] if self.ValueFormat2 else None + records.append(pair) + pairset.PairValueCount = len(pairset.PairValueRecord) + self.PairSet.append(pairset) + self.PairSetCount = len(self.PairSet) + elif typ.endswith('class'): + self.Format = 2 + classDefs = [None, None] + while lines.peeks()[0].endswith("class definition begin"): + typ = lines.peek()[0][:-len("class definition begin")].lower() + idx,klass = { + 'first': (0,ot.ClassDef1), + 'second': (1,ot.ClassDef2), + }[typ] + assert classDefs[idx] is None + classDefs[idx] = parseClassDef(lines, font, klass=klass) + self.ClassDef1, self.ClassDef2 = classDefs + self.Class1Count, self.Class2Count = (1+max(c.classDefs.values()) for c in classDefs) + self.Class1Record = [ot.Class1Record() for i in range(self.Class1Count)] + for rec1 in self.Class1Record: + rec1.Class2Record = [ot.Class2Record() for j in range(self.Class2Count)] + for rec2 in rec1.Class2Record: + rec2.Value1 = ValueRecord() + rec2.Value2 = ValueRecord() + for line in lines: + assert len(line) == 4, line + side = line[0].split()[0].lower() + assert side in ('left', 'right'), side + what = line[0][len(side):].title().replace(' ', '') + mask = valueRecordFormatDict[what][0] + class1, class2, value = (int(x) for x in line[1:4]) + rec2 = self.Class1Record[class1].Class2Record[class2] + if side == 'left': + self.ValueFormat1 |= mask + vr = rec2.Value1 + else: + self.ValueFormat2 |= mask + vr = rec2.Value2 + assert not hasattr(vr, what), (vr, what) + setattr(vr, what, value) + for rec1 in self.Class1Record: + for rec2 in rec1.Class2Record: + rec2.Value1 = ValueRecord(self.ValueFormat1, rec2.Value1) + rec2.Value2 = ValueRecord(self.ValueFormat2, rec2.Value2) \ + if self.ValueFormat2 else None + + self.Coverage = makeCoverage(set(self.ClassDef1.classDefs.keys()), font) + else: + assert 0, typ + return self + +def parseKernset(lines, font, _lookupMap=None): + typ = lines.peeks()[0].split()[0].lower() + if typ in ('left', 'right'): + with lines.until(("firstclass definition begin", "secondclass definition begin")): + return parsePair(lines, font) + return parsePair(lines, font) + +def makeAnchor(data, klass=ot.Anchor): + assert len(data) <= 2 + anchor = klass() + anchor.Format = 1 + anchor.XCoordinate,anchor.YCoordinate = intSplitComma(data[0]) + if len(data) > 1 and data[1] != '': + anchor.Format = 2 + anchor.AnchorPoint = int(data[1]) + return anchor + +def parseCursive(lines, font, _lookupMap=None): + records = {} + for line in lines: + assert len(line) in [3,4], line + idx,klass = { + 'entry': (0,ot.EntryAnchor), + 'exit': (1,ot.ExitAnchor), + }[line[0]] + glyph = makeGlyph(line[1]) + if glyph not in records: + records[glyph] = [None,None] + assert records[glyph][idx] is None, (glyph, idx) + records[glyph][idx] = makeAnchor(line[2:], klass) + return otl.buildCursivePosSubtable(records, font.getReverseGlyphMap()) + +def makeMarkRecords(data, coverage, c): + records = [] + for glyph in coverage.glyphs: + klass, anchor = data[glyph] + record = c.MarkRecordClass() + record.Class = klass + setattr(record, c.MarkAnchor, anchor) + records.append(record) + return records + +def makeBaseRecords(data, coverage, c, classCount): + records = [] + idx = {} + for glyph in coverage.glyphs: + idx[glyph] = len(records) + record = c.BaseRecordClass() + anchors = [None] * classCount + setattr(record, c.BaseAnchor, anchors) + records.append(record) + for (glyph,klass),anchor in data.items(): + record = records[idx[glyph]] + anchors = getattr(record, c.BaseAnchor) + assert anchors[klass] is None, (glyph, klass) + anchors[klass] = anchor + return records + +def makeLigatureRecords(data, coverage, c, classCount): + records = [None] * len(coverage.glyphs) + idx = {g:i for i,g in enumerate(coverage.glyphs)} + + for (glyph,klass,compIdx,compCount),anchor in data.items(): + record = records[idx[glyph]] + if record is None: + record = records[idx[glyph]] = ot.LigatureAttach() + record.ComponentCount = compCount + record.ComponentRecord = [ot.ComponentRecord() for i in range(compCount)] + for compRec in record.ComponentRecord: + compRec.LigatureAnchor = [None] * classCount + assert record.ComponentCount == compCount, (glyph, record.ComponentCount, compCount) + + anchors = record.ComponentRecord[compIdx - 1].LigatureAnchor + assert anchors[klass] is None, (glyph, compIdx, klass) + anchors[klass] = anchor + return records + +def parseMarkToSomething(lines, font, c): + self = c.Type() + self.Format = 1 + markData = {} + baseData = {} + Data = { + 'mark': (markData, c.MarkAnchorClass), + 'base': (baseData, c.BaseAnchorClass), + 'ligature': (baseData, c.BaseAnchorClass), + } + maxKlass = 0 + for line in lines: + typ = line[0] + assert typ in ('mark', 'base', 'ligature') + glyph = makeGlyph(line[1]) + data, anchorClass = Data[typ] + extraItems = 2 if typ == 'ligature' else 0 + extras = tuple(int(i) for i in line[2:2+extraItems]) + klass = int(line[2+extraItems]) + anchor = makeAnchor(line[3+extraItems:], anchorClass) + if typ == 'mark': + key,value = glyph,(klass,anchor) + else: + key,value = ((glyph,klass)+extras),anchor + assert key not in data, key + data[key] = value + maxKlass = max(maxKlass, klass) + + # Mark + markCoverage = makeCoverage(set(markData.keys()), font, c.MarkCoverageClass) + markArray = c.MarkArrayClass() + markRecords = makeMarkRecords(markData, markCoverage, c) + setattr(markArray, c.MarkRecord, markRecords) + setattr(markArray, c.MarkCount, len(markRecords)) + setattr(self, c.MarkCoverage, markCoverage) + setattr(self, c.MarkArray, markArray) + self.ClassCount = maxKlass + 1 + + # Base + self.classCount = 0 if not baseData else 1+max(k[1] for k,v in baseData.items()) + baseCoverage = makeCoverage(set([k[0] for k in baseData.keys()]), font, c.BaseCoverageClass) + baseArray = c.BaseArrayClass() + if c.Base == 'Ligature': + baseRecords = makeLigatureRecords(baseData, baseCoverage, c, self.classCount) + else: + baseRecords = makeBaseRecords(baseData, baseCoverage, c, self.classCount) + setattr(baseArray, c.BaseRecord, baseRecords) + setattr(baseArray, c.BaseCount, len(baseRecords)) + setattr(self, c.BaseCoverage, baseCoverage) + setattr(self, c.BaseArray, baseArray) + + return self + +class MarkHelper(object): + def __init__(self): + for Which in ('Mark', 'Base'): + for What in ('Coverage', 'Array', 'Count', 'Record', 'Anchor'): + key = Which + What + if Which == 'Mark' and What in ('Count', 'Record', 'Anchor'): + value = key + else: + value = getattr(self, Which) + What + if value == 'LigatureRecord': + value = 'LigatureAttach' + setattr(self, key, value) + if What != 'Count': + klass = getattr(ot, value) + setattr(self, key+'Class', klass) + +class MarkToBaseHelper(MarkHelper): + Mark = 'Mark' + Base = 'Base' + Type = ot.MarkBasePos +class MarkToMarkHelper(MarkHelper): + Mark = 'Mark1' + Base = 'Mark2' + Type = ot.MarkMarkPos +class MarkToLigatureHelper(MarkHelper): + Mark = 'Mark' + Base = 'Ligature' + Type = ot.MarkLigPos + +def parseMarkToBase(lines, font, _lookupMap=None): + return parseMarkToSomething(lines, font, MarkToBaseHelper()) +def parseMarkToMark(lines, font, _lookupMap=None): + return parseMarkToSomething(lines, font, MarkToMarkHelper()) +def parseMarkToLigature(lines, font, _lookupMap=None): + return parseMarkToSomething(lines, font, MarkToLigatureHelper()) + +def stripSplitComma(line): + return [s.strip() for s in line.split(',')] if line else [] + +def intSplitComma(line): + return [int(i) for i in line.split(',')] if line else [] + +# Copied from fontTools.subset +class ContextHelper(object): + def __init__(self, klassName, Format): + if klassName.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klassName.startswith('Chain'): + Chain = 'Chain' + InputIdx = 1 + DataLen = 3 + else: + Chain = '' + InputIdx = 0 + DataLen = 1 + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + self.InputIdx = InputIdx + self.DataLen = DataLen + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(None,) + ChainContextData = lambda r:(None, None, None) + SetContextData = None + SetChainContextData = None + RuleData = lambda r:(r.Input,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Input,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 2: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(r.ClassDef,) + ChainContextData = lambda r:(r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) + def SetContextData(r, d): + (r.ClassDef,) = d + def SetChainContextData(r, d): + (r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) = d + RuleData = lambda r:(r.Class,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Class,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 3: + Coverage = lambda r: r.Coverage[0] + ChainCoverage = lambda r: r.InputCoverage[0] + ContextData = None + ChainContextData = None + SetContextData = None + SetChainContextData = None + RuleData = lambda r: r.Coverage + ChainRuleData = lambda r:(r.BacktrackCoverage + + r.InputCoverage + + r.LookAheadCoverage) + def SetRuleData(r, d): + (r.Coverage,) = d + (r.GlyphCount,) = (len(x) for x in d) + def ChainSetRuleData(r, d): + (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(x) for x in d) + else: + assert 0, "unknown format: %s" % Format + + if Chain: + self.Coverage = ChainCoverage + self.ContextData = ChainContextData + self.SetContextData = SetChainContextData + self.RuleData = ChainRuleData + self.SetRuleData = ChainSetRuleData + else: + self.Coverage = Coverage + self.ContextData = ContextData + self.SetContextData = SetContextData + self.RuleData = RuleData + self.SetRuleData = SetRuleData + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleCount = ChainTyp+'RuleCount' + self.RuleSet = ChainTyp+'RuleSet' + self.RuleSetCount = ChainTyp+'RuleSetCount' + self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleCount = ChainTyp+'ClassRuleCount' + self.RuleSet = ChainTyp+'ClassSet' + self.RuleSetCount = ChainTyp+'ClassSetCount' + self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c + else (set(glyphs) if r == 0 else set())) + + self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' + self.ClassDefIndex = 1 if Chain else 0 + self.Input = 'Input' if Chain else 'Class' + +def parseLookupRecords(items, klassName, lookupMap=None): + klass = getattr(ot, klassName) + lst = [] + for item in items: + rec = klass() + item = stripSplitComma(item) + assert len(item) == 2, item + idx = int(item[0]) + assert idx > 0, idx + rec.SequenceIndex = idx - 1 + setReference(mapLookup, lookupMap, item[1], setattr, rec, 'LookupListIndex') + lst.append(rec) + return lst + +def makeClassDef(classDefs, font, klass=ot.Coverage): + if not classDefs: return None + self = klass() + self.classDefs = dict(classDefs) + return self + +def parseClassDef(lines, font, klass=ot.ClassDef): + classDefs = {} + with lines.between('class definition'): + for line in lines: + glyph = makeGlyph(line[0]) + assert glyph not in classDefs, glyph + classDefs[glyph] = int(line[1]) + return makeClassDef(classDefs, font, klass) + +def makeCoverage(glyphs, font, klass=ot.Coverage): + if not glyphs: return None + if isinstance(glyphs, set): + glyphs = sorted(glyphs) + coverage = klass() + coverage.glyphs = sorted(set(glyphs), key=font.getGlyphID) + return coverage + +def parseCoverage(lines, font, klass=ot.Coverage): + glyphs = [] + with lines.between('coverage definition'): + for line in lines: + glyphs.append(makeGlyph(line[0])) + return makeCoverage(glyphs, font, klass) + +def bucketizeRules(self, c, rules, bucketKeys): + buckets = {} + for seq,recs in rules: + buckets.setdefault(seq[c.InputIdx][0], []).append((tuple(s[1 if i==c.InputIdx else 0:] for i,s in enumerate(seq)), recs)) + + rulesets = [] + for firstGlyph in bucketKeys: + if firstGlyph not in buckets: + rulesets.append(None) + continue + thisRules = [] + for seq,recs in buckets[firstGlyph]: + rule = getattr(ot, c.Rule)() + c.SetRuleData(rule, seq) + setattr(rule, c.Type+'Count', len(recs)) + setattr(rule, c.LookupRecord, recs) + thisRules.append(rule) + + ruleset = getattr(ot, c.RuleSet)() + setattr(ruleset, c.Rule, thisRules) + setattr(ruleset, c.RuleCount, len(thisRules)) + rulesets.append(ruleset) + + setattr(self, c.RuleSet, rulesets) + setattr(self, c.RuleSetCount, len(rulesets)) + +def parseContext(lines, font, Type, lookupMap=None): + self = getattr(ot, Type)() + typ = lines.peeks()[0].split()[0].lower() + if typ == 'glyph': + self.Format = 1 + log.debug("Parsing %s format %s", Type, self.Format) + c = ContextHelper(Type, self.Format) + rules = [] + for line in lines: + assert line[0].lower() == 'glyph', line[0] + while len(line) < 1+c.DataLen: line.append('') + seq = tuple(makeGlyphs(stripSplitComma(i)) for i in line[1:1+c.DataLen]) + recs = parseLookupRecords(line[1+c.DataLen:], c.LookupRecord, lookupMap) + rules.append((seq, recs)) + + firstGlyphs = set(seq[c.InputIdx][0] for seq,recs in rules) + self.Coverage = makeCoverage(firstGlyphs, font) + bucketizeRules(self, c, rules, self.Coverage.glyphs) + elif typ.endswith('class'): + self.Format = 2 + log.debug("Parsing %s format %s", Type, self.Format) + c = ContextHelper(Type, self.Format) + classDefs = [None] * c.DataLen + while lines.peeks()[0].endswith("class definition begin"): + typ = lines.peek()[0][:-len("class definition begin")].lower() + idx,klass = { + 1: { + '': (0,ot.ClassDef), + }, + 3: { + 'backtrack': (0,ot.BacktrackClassDef), + '': (1,ot.InputClassDef), + 'lookahead': (2,ot.LookAheadClassDef), + }, + }[c.DataLen][typ] + assert classDefs[idx] is None, idx + classDefs[idx] = parseClassDef(lines, font, klass=klass) + c.SetContextData(self, classDefs) + rules = [] + for line in lines: + assert line[0].lower().startswith('class'), line[0] + while len(line) < 1+c.DataLen: line.append('') + seq = tuple(intSplitComma(i) for i in line[1:1+c.DataLen]) + recs = parseLookupRecords(line[1+c.DataLen:], c.LookupRecord, lookupMap) + rules.append((seq, recs)) + firstClasses = set(seq[c.InputIdx][0] for seq,recs in rules) + firstGlyphs = set(g for g,c in classDefs[c.InputIdx].classDefs.items() if c in firstClasses) + self.Coverage = makeCoverage(firstGlyphs, font) + bucketizeRules(self, c, rules, range(max(firstClasses) + 1)) + elif typ.endswith('coverage'): + self.Format = 3 + log.debug("Parsing %s format %s", Type, self.Format) + c = ContextHelper(Type, self.Format) + coverages = tuple([] for i in range(c.DataLen)) + while lines.peeks()[0].endswith("coverage definition begin"): + typ = lines.peek()[0][:-len("coverage definition begin")].lower() + idx,klass = { + 1: { + '': (0,ot.Coverage), + }, + 3: { + 'backtrack': (0,ot.BacktrackCoverage), + 'input': (1,ot.InputCoverage), + 'lookahead': (2,ot.LookAheadCoverage), + }, + }[c.DataLen][typ] + coverages[idx].append(parseCoverage(lines, font, klass=klass)) + c.SetRuleData(self, coverages) + lines = list(lines) + assert len(lines) == 1 + line = lines[0] + assert line[0].lower() == 'coverage', line[0] + recs = parseLookupRecords(line[1:], c.LookupRecord, lookupMap) + setattr(self, c.Type+'Count', len(recs)) + setattr(self, c.LookupRecord, recs) + else: + assert 0, typ + return self + +def parseContextSubst(lines, font, lookupMap=None): + return parseContext(lines, font, "ContextSubst", lookupMap=lookupMap) +def parseContextPos(lines, font, lookupMap=None): + return parseContext(lines, font, "ContextPos", lookupMap=lookupMap) +def parseChainedSubst(lines, font, lookupMap=None): + return parseContext(lines, font, "ChainContextSubst", lookupMap=lookupMap) +def parseChainedPos(lines, font, lookupMap=None): + return parseContext(lines, font, "ChainContextPos", lookupMap=lookupMap) + +def parseReverseChainedSubst(lines, font, _lookupMap=None): + self = ot.ReverseChainSingleSubst() + self.Format = 1 + coverages = ([], []) + while lines.peeks()[0].endswith("coverage definition begin"): + typ = lines.peek()[0][:-len("coverage definition begin")].lower() + idx,klass = { + 'backtrack': (0,ot.BacktrackCoverage), + 'lookahead': (1,ot.LookAheadCoverage), + }[typ] + coverages[idx].append(parseCoverage(lines, font, klass=klass)) + self.BacktrackCoverage = coverages[0] + self.BacktrackGlyphCount = len(self.BacktrackCoverage) + self.LookAheadCoverage = coverages[1] + self.LookAheadGlyphCount = len(self.LookAheadCoverage) + mapping = {} + for line in lines: + assert len(line) == 2, line + line = makeGlyphs(line) + mapping[line[0]] = line[1] + self.Coverage = makeCoverage(set(mapping.keys()), font) + self.Substitute = [mapping[k] for k in self.Coverage.glyphs] + self.GlyphCount = len(self.Substitute) + return self + +def parseLookup(lines, tableTag, font, lookupMap=None): + line = lines.expect('lookup') + _, name, typ = line + log.debug("Parsing lookup type %s %s", typ, name) + lookup = ot.Lookup() + lookup.LookupFlag,filterset = parseLookupFlags(lines) + if filterset is not None: + lookup.MarkFilteringSet = filterset + lookup.LookupType, parseLookupSubTable = { + 'GSUB': { + 'single': (1, parseSingleSubst), + 'multiple': (2, parseMultiple), + 'alternate': (3, parseAlternate), + 'ligature': (4, parseLigature), + 'context': (5, parseContextSubst), + 'chained': (6, parseChainedSubst), + 'reversechained':(8, parseReverseChainedSubst), + }, + 'GPOS': { + 'single': (1, parseSinglePos), + 'pair': (2, parsePair), + 'kernset': (2, parseKernset), + 'cursive': (3, parseCursive), + 'mark to base': (4, parseMarkToBase), + 'mark to ligature':(5, parseMarkToLigature), + 'mark to mark': (6, parseMarkToMark), + 'context': (7, parseContextPos), + 'chained': (8, parseChainedPos), + }, + }[tableTag][typ] + + with lines.until('lookup end'): + subtables = [] + + while lines.peek(): + with lines.until(('% subtable', 'subtable end')): + while lines.peek(): + subtable = parseLookupSubTable(lines, font, lookupMap) + assert lookup.LookupType == subtable.LookupType + subtables.append(subtable) + if lines.peeks()[0] in ('% subtable', 'subtable end'): + next(lines) + lines.expect('lookup end') + + lookup.SubTable = subtables + lookup.SubTableCount = len(lookup.SubTable) + if lookup.SubTableCount == 0: + # Remove this return when following is fixed: + # https://github.com/fonttools/fonttools/issues/789 + return None + return lookup + +def parseGSUBGPOS(lines, font, tableTag): + container = ttLib.getTableClass(tableTag)() + lookupMap = DeferredMapping() + featureMap = DeferredMapping() + assert tableTag in ('GSUB', 'GPOS') + log.debug("Parsing %s", tableTag) + self = getattr(ot, tableTag)() + self.Version = 0x00010000 + fields = { + 'script table begin': + ('ScriptList', + lambda lines: parseScriptList (lines, featureMap)), + 'feature table begin': + ('FeatureList', + lambda lines: parseFeatureList (lines, lookupMap, featureMap)), + 'lookup': + ('LookupList', + None), + } + for attr,parser in fields.values(): + setattr(self, attr, None) + while lines.peek() is not None: + typ = lines.peek()[0].lower() + if typ not in fields: + log.debug('Skipping %s', lines.peek()) + next(lines) + continue + attr,parser = fields[typ] + if typ == 'lookup': + if self.LookupList is None: + self.LookupList = ot.LookupList() + self.LookupList.Lookup = [] + _, name, _ = lines.peek() + lookup = parseLookup(lines, tableTag, font, lookupMap) + if lookupMap is not None: + assert name not in lookupMap, "Duplicate lookup name: %s" % name + lookupMap[name] = len(self.LookupList.Lookup) + else: + assert int(name) == len(self.LookupList.Lookup), "%d %d" % (name, len(self.Lookup)) + self.LookupList.Lookup.append(lookup) + else: + assert getattr(self, attr) is None, attr + setattr(self, attr, parser(lines)) + if self.LookupList: + self.LookupList.LookupCount = len(self.LookupList.Lookup) + if lookupMap is not None: + lookupMap.applyDeferredMappings() + if featureMap is not None: + featureMap.applyDeferredMappings() + container.table = self + return container + +def parseGSUB(lines, font): + return parseGSUBGPOS(lines, font, 'GSUB') +def parseGPOS(lines, font): + return parseGSUBGPOS(lines, font, 'GPOS') + +def parseAttachList(lines, font): + points = {} + with lines.between('attachment list'): + for line in lines: + glyph = makeGlyph(line[0]) + assert glyph not in points, glyph + points[glyph] = [int(i) for i in line[1:]] + return otl.buildAttachList(points, font.getReverseGlyphMap()) + +def parseCaretList(lines, font): + carets = {} + with lines.between('carets'): + for line in lines: + glyph = makeGlyph(line[0]) + assert glyph not in carets, glyph + num = int(line[1]) + thisCarets = [int(i) for i in line[2:]] + assert num == len(thisCarets), line + carets[glyph] = thisCarets + return otl.buildLigCaretList(carets, {}, font.getReverseGlyphMap()) + +def makeMarkFilteringSets(sets, font): + self = ot.MarkGlyphSetsDef() + self.MarkSetTableFormat = 1 + self.MarkSetCount = 1 + max(sets.keys()) + self.Coverage = [None] * self.MarkSetCount + for k,v in sorted(sets.items()): + self.Coverage[k] = makeCoverage(set(v), font) + return self + +def parseMarkFilteringSets(lines, font): + sets = {} + with lines.between('set definition'): + for line in lines: + assert len(line) == 2, line + glyph = makeGlyph(line[0]) + # TODO accept set names + st = int(line[1]) + if st not in sets: + sets[st] = [] + sets[st].append(glyph) + return makeMarkFilteringSets(sets, font) + +def parseGDEF(lines, font): + container = ttLib.getTableClass('GDEF')() + log.debug("Parsing GDEF") + self = ot.GDEF() + fields = { + 'class definition begin': + ('GlyphClassDef', + lambda lines, font: parseClassDef(lines, font, klass=ot.GlyphClassDef)), + 'attachment list begin': + ('AttachList', parseAttachList), + 'carets begin': + ('LigCaretList', parseCaretList), + 'mark attachment class definition begin': + ('MarkAttachClassDef', + lambda lines, font: parseClassDef(lines, font, klass=ot.MarkAttachClassDef)), + 'markfilter set definition begin': + ('MarkGlyphSetsDef', parseMarkFilteringSets), + } + for attr,parser in fields.values(): + setattr(self, attr, None) + while lines.peek() is not None: + typ = lines.peek()[0].lower() + if typ not in fields: + log.debug('Skipping %s', typ) + next(lines) + continue + attr,parser = fields[typ] + assert getattr(self, attr) is None, attr + setattr(self, attr, parser(lines, font)) + self.Version = 0x00010000 if self.MarkGlyphSetsDef is None else 0x00010002 + container.table = self + return container + +def parseCmap(lines, font): + container = ttLib.getTableClass('cmap')() + log.debug("Parsing cmap") + tables = [] + while lines.peek() is not None: + lines.expect('cmap subtable %d' % len(tables)) + platId, encId, fmt, lang = [ + parseCmapId(lines, field) + for field in ('platformID', 'encodingID', 'format', 'language')] + table = cmap_classes[fmt](fmt) + table.platformID = platId + table.platEncID = encId + table.language = lang + table.cmap = {} + line = next(lines) + while line[0] != 'end subtable': + table.cmap[int(line[0], 16)] = line[1] + line = next(lines) + tables.append(table) + container.tableVersion = 0 + container.tables = tables + return container + +def parseCmapId(lines, field): + line = next(lines) + assert field == line[0] + return int(line[1]) + +def parseTable(lines, font, tableTag=None): + log.debug("Parsing table") + line = lines.peeks() + tag = None + if line[0].split()[0] == 'FontDame': + tag = line[0].split()[1] + elif ' '.join(line[0].split()[:3]) == 'Font Chef Table': + tag = line[0].split()[3] + if tag is not None: + next(lines) + tag = tag.ljust(4) + if tableTag is None: + tableTag = tag + else: + assert tableTag == tag, (tableTag, tag) + + assert tableTag is not None, "Don't know what table to parse and data doesn't specify" + + return { + 'GSUB': parseGSUB, + 'GPOS': parseGPOS, + 'GDEF': parseGDEF, + 'cmap': parseCmap, + }[tableTag](lines, font) + +class Tokenizer(object): + + def __init__(self, f): + # TODO BytesIO / StringIO as needed? also, figure out whether we work on bytes or unicode + lines = iter(f) + try: + self.filename = f.name + except: + self.filename = None + self.lines = iter(lines) + self.line = '' + self.lineno = 0 + self.stoppers = [] + self.buffer = None + + def __iter__(self): + return self + + def _next_line(self): + self.lineno += 1 + line = self.line = next(self.lines) + line = [s.strip() for s in line.split('\t')] + if len(line) == 1 and not line[0]: + del line[0] + if line and not line[-1]: + log.warning('trailing tab found on line %d: %s' % (self.lineno, self.line)) + while line and not line[-1]: + del line[-1] + return line + + def _next_nonempty(self): + while True: + line = self._next_line() + # Skip comments and empty lines + if line and line[0] and (line[0][0] != '%' or line[0] == '% subtable'): + return line + + def _next_buffered(self): + if self.buffer: + ret = self.buffer + self.buffer = None + return ret + else: + return self._next_nonempty() + + def __next__(self): + line = self._next_buffered() + if line[0].lower() in self.stoppers: + self.buffer = line + raise StopIteration + return line + + def next(self): + return self.__next__() + + def peek(self): + if not self.buffer: + try: + self.buffer = self._next_nonempty() + except StopIteration: + return None + if self.buffer[0].lower() in self.stoppers: + return None + return self.buffer + + def peeks(self): + ret = self.peek() + return ret if ret is not None else ('',) + + @contextmanager + def between(self, tag): + start = tag + ' begin' + end = tag + ' end' + self.expectendswith(start) + self.stoppers.append(end) + yield + del self.stoppers[-1] + self.expect(tag + ' end') + + @contextmanager + def until(self, tags): + if type(tags) is not tuple: + tags = (tags,) + self.stoppers.extend(tags) + yield + del self.stoppers[-len(tags):] + + def expect(self, s): + line = next(self) + tag = line[0].lower() + assert tag == s, "Expected '%s', got '%s'" % (s, tag) + return line + + def expectendswith(self, s): + line = next(self) + tag = line[0].lower() + assert tag.endswith(s), "Expected '*%s', got '%s'" % (s, tag) + return line + +def build(f, font, tableTag=None): + """Convert a Monotype font layout file to an OpenType layout object + + A font object must be passed, but this may be a "dummy" font; it is only + used for sorting glyph sets when making coverage tables and to hold the + OpenType layout table while it is being built. + + Args: + f: A file object. + font (TTFont): A font object. + tableTag (string): If provided, asserts that the file contains data for the + given OpenType table. + + Returns: + An object representing the table. (e.g. ``table_G_S_U_B_``) + """ + lines = Tokenizer(f) + return parseTable(lines, font, tableTag=tableTag) + + +def main(args=None, font=None): + """Convert a FontDame OTL file to TTX XML. + + Writes XML output to stdout. + + Args: + args: Command line arguments (``--font``, ``--table``, input files). + """ + import sys + from fontTools import configLogger + from fontTools.misc.testTools import MockFont + + if args is None: + args = sys.argv[1:] + + # configure the library logger (for >= WARNING) + configLogger() + # comment this out to enable debug messages from mtiLib's logger + # log.setLevel(logging.DEBUG) + + import argparse + parser = argparse.ArgumentParser( + "fonttools mtiLib", + description=main.__doc__, + ) + + parser.add_argument('--font', '-f', metavar='FILE', dest="font", + help="Input TTF files (used for glyph classes and sorting coverage tables)") + parser.add_argument('--table', '-t', metavar='TABLE', dest="tableTag", + help="Table to fill (sniffed from input file if not provided)") + parser.add_argument('inputs', metavar='FILE', type=str, nargs='+', + help="Input FontDame .txt files") + + args = parser.parse_args(args) + + if font is None: + if args.font: + font = ttLib.TTFont(args.font) + else: + font = MockFont() + + for f in args.inputs: + log.debug("Processing %s", f) + with open(f, 'rt', encoding="utf-8") as f: + table = build(f, font, tableTag=args.tableTag) + blob = table.compile(font) # Make sure it compiles + decompiled = table.__class__() + decompiled.decompile(blob, font) # Make sure it decompiles! + + #continue + from fontTools.misc import xmlWriter + tag = table.tableTag + writer = xmlWriter.XMLWriter(sys.stdout) + writer.begintag(tag) + writer.newline() + #table.toXML(writer, font) + decompiled.toXML(writer, font) + writer.endtag(tag) + writer.newline() + + +if __name__ == '__main__': + import sys + sys.exit(main()) diff --git a/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__main__.py b/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__main__.py new file mode 100644 index 00000000..fe6b638b --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__main__.py @@ -0,0 +1,5 @@ +import sys +from fontTools.mtiLib import main + +if __name__ == '__main__': + sys.exit(main()) diff --git a/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..8ed02edb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__pycache__/__main__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__pycache__/__main__.cpython-39.pyc new file mode 100644 index 00000000..578bb9c2 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/mtiLib/__pycache__/__main__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__init__.py new file mode 100644 index 00000000..12e414fc --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__init__.py @@ -0,0 +1 @@ +"""OpenType Layout-related functionality.""" diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..ac904921 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/builder.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/builder.cpython-39.pyc new file mode 100644 index 00000000..9bd34ae5 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/builder.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/error.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/error.cpython-39.pyc new file mode 100644 index 00000000..cfbdb07b Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/error.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/maxContextCalc.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/maxContextCalc.cpython-39.pyc new file mode 100644 index 00000000..3d2529a2 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/otlLib/__pycache__/maxContextCalc.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/builder.py b/.venv/lib/python3.9/site-packages/fontTools/otlLib/builder.py new file mode 100644 index 00000000..6e98e4b2 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/otlLib/builder.py @@ -0,0 +1,2853 @@ +from collections import namedtuple, OrderedDict +import os +from fontTools.misc.fixedTools import fixedToFloat +from fontTools import ttLib +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables.otBase import ( + ValueRecord, + valueRecordFormatDict, + OTTableWriter, + CountReference, +) +from fontTools.ttLib.tables import otBase +from fontTools.feaLib.ast import STATNameStatement +from fontTools.otlLib.optimize.gpos import GPOS_COMPACT_MODE_DEFAULT, GPOS_COMPACT_MODE_ENV_KEY, compact_lookup +from fontTools.otlLib.error import OpenTypeLibError +from functools import reduce +import logging +import copy + + +log = logging.getLogger(__name__) + + +def buildCoverage(glyphs, glyphMap): + """Builds a coverage table. + + Coverage tables (as defined in the `OpenType spec `_) + are used in all OpenType Layout lookups apart from the Extension type, and + define the glyphs involved in a layout subtable. This allows shaping engines + to compare the glyph stream with the coverage table and quickly determine + whether a subtable should be involved in a shaping operation. + + This function takes a list of glyphs and a glyphname-to-ID map, and + returns a ``Coverage`` object representing the coverage table. + + Example:: + + glyphMap = font.getReverseGlyphMap() + glyphs = [ "A", "B", "C" ] + coverage = buildCoverage(glyphs, glyphMap) + + Args: + glyphs: a sequence of glyph names. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + An ``otTables.Coverage`` object or ``None`` if there are no glyphs + supplied. + """ + + if not glyphs: + return None + self = ot.Coverage() + self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__) + return self + + +LOOKUP_FLAG_RIGHT_TO_LEFT = 0x0001 +LOOKUP_FLAG_IGNORE_BASE_GLYPHS = 0x0002 +LOOKUP_FLAG_IGNORE_LIGATURES = 0x0004 +LOOKUP_FLAG_IGNORE_MARKS = 0x0008 +LOOKUP_FLAG_USE_MARK_FILTERING_SET = 0x0010 + + +def buildLookup(subtables, flags=0, markFilterSet=None): + """Turns a collection of rules into a lookup. + + A Lookup (as defined in the `OpenType Spec `_) + wraps the individual rules in a layout operation (substitution or + positioning) in a data structure expressing their overall lookup type - + for example, single substitution, mark-to-base attachment, and so on - + as well as the lookup flags and any mark filtering sets. You may import + the following constants to express lookup flags: + + - ``LOOKUP_FLAG_RIGHT_TO_LEFT`` + - ``LOOKUP_FLAG_IGNORE_BASE_GLYPHS`` + - ``LOOKUP_FLAG_IGNORE_LIGATURES`` + - ``LOOKUP_FLAG_IGNORE_MARKS`` + - ``LOOKUP_FLAG_USE_MARK_FILTERING_SET`` + + Args: + subtables: A list of layout subtable objects (e.g. + ``MultipleSubst``, ``PairPos``, etc.) or ``None``. + flags (int): This lookup's flags. + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + + Returns: + An ``otTables.Lookup`` object or ``None`` if there are no subtables + supplied. + """ + if subtables is None: + return None + subtables = [st for st in subtables if st is not None] + if not subtables: + return None + assert all( + t.LookupType == subtables[0].LookupType for t in subtables + ), "all subtables must have the same LookupType; got %s" % repr( + [t.LookupType for t in subtables] + ) + self = ot.Lookup() + self.LookupType = subtables[0].LookupType + self.LookupFlag = flags + self.SubTable = subtables + self.SubTableCount = len(self.SubTable) + if markFilterSet is not None: + self.LookupFlag |= LOOKUP_FLAG_USE_MARK_FILTERING_SET + assert isinstance(markFilterSet, int), markFilterSet + self.MarkFilteringSet = markFilterSet + else: + assert (self.LookupFlag & LOOKUP_FLAG_USE_MARK_FILTERING_SET) == 0, ( + "if markFilterSet is None, flags must not set " + "LOOKUP_FLAG_USE_MARK_FILTERING_SET; flags=0x%04x" % flags + ) + return self + + +class LookupBuilder(object): + SUBTABLE_BREAK_ = "SUBTABLE_BREAK" + + def __init__(self, font, location, table, lookup_type): + self.font = font + self.glyphMap = font.getReverseGlyphMap() + self.location = location + self.table, self.lookup_type = table, lookup_type + self.lookupflag = 0 + self.markFilterSet = None + self.lookup_index = None # assigned when making final tables + assert table in ("GPOS", "GSUB") + + def equals(self, other): + return ( + isinstance(other, self.__class__) + and self.table == other.table + and self.lookupflag == other.lookupflag + and self.markFilterSet == other.markFilterSet + ) + + def inferGlyphClasses(self): + """Infers glyph glasses for the GDEF table, such as {"cedilla":3}.""" + return {} + + def getAlternateGlyphs(self): + """Helper for building 'aalt' features.""" + return {} + + def buildLookup_(self, subtables): + return buildLookup(subtables, self.lookupflag, self.markFilterSet) + + def buildMarkClasses_(self, marks): + """{"cedilla": ("BOTTOM", ast.Anchor), ...} --> {"BOTTOM":0, "TOP":1} + + Helper for MarkBasePostBuilder, MarkLigPosBuilder, and + MarkMarkPosBuilder. Seems to return the same numeric IDs + for mark classes as the AFDKO makeotf tool. + """ + ids = {} + for mark in sorted(marks.keys(), key=self.font.getGlyphID): + markClassName, _markAnchor = marks[mark] + if markClassName not in ids: + ids[markClassName] = len(ids) + return ids + + def setBacktrackCoverage_(self, prefix, subtable): + subtable.BacktrackGlyphCount = len(prefix) + subtable.BacktrackCoverage = [] + for p in reversed(prefix): + coverage = buildCoverage(p, self.glyphMap) + subtable.BacktrackCoverage.append(coverage) + + def setLookAheadCoverage_(self, suffix, subtable): + subtable.LookAheadGlyphCount = len(suffix) + subtable.LookAheadCoverage = [] + for s in suffix: + coverage = buildCoverage(s, self.glyphMap) + subtable.LookAheadCoverage.append(coverage) + + def setInputCoverage_(self, glyphs, subtable): + subtable.InputGlyphCount = len(glyphs) + subtable.InputCoverage = [] + for g in glyphs: + coverage = buildCoverage(g, self.glyphMap) + subtable.InputCoverage.append(coverage) + + def setCoverage_(self, glyphs, subtable): + subtable.GlyphCount = len(glyphs) + subtable.Coverage = [] + for g in glyphs: + coverage = buildCoverage(g, self.glyphMap) + subtable.Coverage.append(coverage) + + def build_subst_subtables(self, mapping, klass): + substitutions = [{}] + for key in mapping: + if key[0] == self.SUBTABLE_BREAK_: + substitutions.append({}) + else: + substitutions[-1][key] = mapping[key] + subtables = [klass(s) for s in substitutions] + return subtables + + def add_subtable_break(self, location): + """Add an explicit subtable break. + + Args: + location: A string or tuple representing the location in the + original source which produced this break, or ``None`` if + no location is provided. + """ + log.warning( + OpenTypeLibError( + 'unsupported "subtable" statement for lookup type', location + ) + ) + + +class AlternateSubstBuilder(LookupBuilder): + """Builds an Alternate Substitution (GSUB3) lookup. + + Users are expected to manually add alternate glyph substitutions to + the ``alternates`` attribute after the object has been initialized, + e.g.:: + + builder.alternates["A"] = ["A.alt1", "A.alt2"] + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + alternates: An ordered dictionary of alternates, mapping glyph names + to a list of names of alternates. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GSUB", 3) + self.alternates = OrderedDict() + + def equals(self, other): + return LookupBuilder.equals(self, other) and self.alternates == other.alternates + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the alternate + substitution lookup. + """ + subtables = self.build_subst_subtables( + self.alternates, buildAlternateSubstSubtable + ) + return self.buildLookup_(subtables) + + def getAlternateGlyphs(self): + return self.alternates + + def add_subtable_break(self, location): + self.alternates[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_ + + +class ChainContextualRule( + namedtuple("ChainContextualRule", ["prefix", "glyphs", "suffix", "lookups"]) +): + @property + def is_subtable_break(self): + return self.prefix == LookupBuilder.SUBTABLE_BREAK_ + + +class ChainContextualRuleset: + def __init__(self): + self.rules = [] + + def addRule(self, rule): + self.rules.append(rule) + + @property + def hasPrefixOrSuffix(self): + # Do we have any prefixes/suffixes? If this is False for all + # rulesets, we can express the whole lookup as GPOS5/GSUB7. + for rule in self.rules: + if len(rule.prefix) > 0 or len(rule.suffix) > 0: + return True + return False + + @property + def hasAnyGlyphClasses(self): + # Do we use glyph classes anywhere in the rules? If this is False + # we can express this subtable as a Format 1. + for rule in self.rules: + for coverage in (rule.prefix, rule.glyphs, rule.suffix): + if any(len(x) > 1 for x in coverage): + return True + return False + + def format2ClassDefs(self): + PREFIX, GLYPHS, SUFFIX = 0, 1, 2 + classDefBuilders = [] + for ix in [PREFIX, GLYPHS, SUFFIX]: + context = [] + for r in self.rules: + context.append(r[ix]) + classes = self._classBuilderForContext(context) + if not classes: + return None + classDefBuilders.append(classes) + return classDefBuilders + + def _classBuilderForContext(self, context): + classdefbuilder = ClassDefBuilder(useClass0=False) + for position in context: + for glyphset in position: + glyphs = set(glyphset) + if not classdefbuilder.canAdd(glyphs): + return None + classdefbuilder.add(glyphs) + return classdefbuilder + + +class ChainContextualBuilder(LookupBuilder): + def equals(self, other): + return LookupBuilder.equals(self, other) and self.rules == other.rules + + def rulesets(self): + # Return a list of ChainContextRuleset objects, taking explicit + # subtable breaks into account + ruleset = [ChainContextualRuleset()] + for rule in self.rules: + if rule.is_subtable_break: + ruleset.append(ChainContextualRuleset()) + continue + ruleset[-1].addRule(rule) + # Squish any empty subtables + return [x for x in ruleset if len(x.rules) > 0] + + def getCompiledSize_(self, subtables): + size = 0 + for st in subtables: + w = OTTableWriter() + w["LookupType"] = CountReference( + {"LookupType": st.LookupType}, "LookupType" + ) + # We need to make a copy here because compiling + # modifies the subtable (finalizing formats etc.) + copy.deepcopy(st).compile(w, self.font) + size += len(w.getAllData()) + return size + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the chained + contextual positioning lookup. + """ + subtables = [] + chaining = False + rulesets = self.rulesets() + chaining = any(ruleset.hasPrefixOrSuffix for ruleset in rulesets) + for ruleset in rulesets: + # Determine format strategy. We try to build formats 1, 2 and 3 + # subtables and then work out which is best. candidates list holds + # the subtables in each format for this ruleset (including a dummy + # "format 0" to make the addressing match the format numbers). + + # We can always build a format 3 lookup by accumulating each of + # the rules into a list, so start with that. + candidates = [None, None, None, []] + for rule in ruleset.rules: + candidates[3].append(self.buildFormat3Subtable(rule, chaining)) + + # Can we express the whole ruleset as a format 2 subtable? + classdefs = ruleset.format2ClassDefs() + if classdefs: + candidates[2] = [ + self.buildFormat2Subtable(ruleset, classdefs, chaining) + ] + + if not ruleset.hasAnyGlyphClasses: + candidates[1] = [self.buildFormat1Subtable(ruleset, chaining)] + + for i in [1, 2, 3]: + if candidates[i]: + try: + self.getCompiledSize_(candidates[i]) + except Exception as e: + log.warning( + "Contextual format %i at %s overflowed (%s)" + % (i, str(self.location), e) + ) + candidates[i] = None + + candidates = [x for x in candidates if x is not None] + if not candidates: + raise OpenTypeLibError("All candidates overflowed", self.location) + + winner = min(candidates, key=self.getCompiledSize_) + subtables.extend(winner) + + # If we are not chaining, lookup type will be automatically fixed by + # buildLookup_ + return self.buildLookup_(subtables) + + def buildFormat1Subtable(self, ruleset, chaining=True): + st = self.newSubtable_(chaining=chaining) + st.Format = 1 + st.populateDefaults() + coverage = set() + rulesetsByFirstGlyph = {} + ruleAttr = self.ruleAttr_(format=1, chaining=chaining) + + for rule in ruleset.rules: + ruleAsSubtable = self.newRule_(format=1, chaining=chaining) + + if chaining: + ruleAsSubtable.BacktrackGlyphCount = len(rule.prefix) + ruleAsSubtable.LookAheadGlyphCount = len(rule.suffix) + ruleAsSubtable.Backtrack = [list(x)[0] for x in reversed(rule.prefix)] + ruleAsSubtable.LookAhead = [list(x)[0] for x in rule.suffix] + + ruleAsSubtable.InputGlyphCount = len(rule.glyphs) + else: + ruleAsSubtable.GlyphCount = len(rule.glyphs) + + ruleAsSubtable.Input = [list(x)[0] for x in rule.glyphs[1:]] + + self.buildLookupList(rule, ruleAsSubtable) + + firstGlyph = list(rule.glyphs[0])[0] + if firstGlyph not in rulesetsByFirstGlyph: + coverage.add(firstGlyph) + rulesetsByFirstGlyph[firstGlyph] = [] + rulesetsByFirstGlyph[firstGlyph].append(ruleAsSubtable) + + st.Coverage = buildCoverage(coverage, self.glyphMap) + ruleSets = [] + for g in st.Coverage.glyphs: + ruleSet = self.newRuleSet_(format=1, chaining=chaining) + setattr(ruleSet, ruleAttr, rulesetsByFirstGlyph[g]) + setattr(ruleSet, f"{ruleAttr}Count", len(rulesetsByFirstGlyph[g])) + ruleSets.append(ruleSet) + + setattr(st, self.ruleSetAttr_(format=1, chaining=chaining), ruleSets) + setattr( + st, self.ruleSetAttr_(format=1, chaining=chaining) + "Count", len(ruleSets) + ) + + return st + + def buildFormat2Subtable(self, ruleset, classdefs, chaining=True): + st = self.newSubtable_(chaining=chaining) + st.Format = 2 + st.populateDefaults() + + if chaining: + ( + st.BacktrackClassDef, + st.InputClassDef, + st.LookAheadClassDef, + ) = [c.build() for c in classdefs] + else: + st.ClassDef = classdefs[1].build() + + inClasses = classdefs[1].classes() + + classSets = [] + for _ in inClasses: + classSet = self.newRuleSet_(format=2, chaining=chaining) + classSets.append(classSet) + + coverage = set() + classRuleAttr = self.ruleAttr_(format=2, chaining=chaining) + + for rule in ruleset.rules: + ruleAsSubtable = self.newRule_(format=2, chaining=chaining) + if chaining: + ruleAsSubtable.BacktrackGlyphCount = len(rule.prefix) + ruleAsSubtable.LookAheadGlyphCount = len(rule.suffix) + # The glyphs in the rule may be list, tuple, odict_keys... + # Order is not important anyway because they are guaranteed + # to be members of the same class. + ruleAsSubtable.Backtrack = [ + st.BacktrackClassDef.classDefs[list(x)[0]] + for x in reversed(rule.prefix) + ] + ruleAsSubtable.LookAhead = [ + st.LookAheadClassDef.classDefs[list(x)[0]] for x in rule.suffix + ] + + ruleAsSubtable.InputGlyphCount = len(rule.glyphs) + ruleAsSubtable.Input = [ + st.InputClassDef.classDefs[list(x)[0]] for x in rule.glyphs[1:] + ] + setForThisRule = classSets[ + st.InputClassDef.classDefs[list(rule.glyphs[0])[0]] + ] + else: + ruleAsSubtable.GlyphCount = len(rule.glyphs) + ruleAsSubtable.Class = [ # The spec calls this InputSequence + st.ClassDef.classDefs[list(x)[0]] for x in rule.glyphs[1:] + ] + setForThisRule = classSets[ + st.ClassDef.classDefs[list(rule.glyphs[0])[0]] + ] + + self.buildLookupList(rule, ruleAsSubtable) + coverage |= set(rule.glyphs[0]) + + getattr(setForThisRule, classRuleAttr).append(ruleAsSubtable) + setattr( + setForThisRule, + f"{classRuleAttr}Count", + getattr(setForThisRule, f"{classRuleAttr}Count") + 1, + ) + setattr(st, self.ruleSetAttr_(format=2, chaining=chaining), classSets) + setattr( + st, self.ruleSetAttr_(format=2, chaining=chaining) + "Count", len(classSets) + ) + st.Coverage = buildCoverage(coverage, self.glyphMap) + return st + + def buildFormat3Subtable(self, rule, chaining=True): + st = self.newSubtable_(chaining=chaining) + st.Format = 3 + if chaining: + self.setBacktrackCoverage_(rule.prefix, st) + self.setLookAheadCoverage_(rule.suffix, st) + self.setInputCoverage_(rule.glyphs, st) + else: + self.setCoverage_(rule.glyphs, st) + self.buildLookupList(rule, st) + return st + + def buildLookupList(self, rule, st): + for sequenceIndex, lookupList in enumerate(rule.lookups): + if lookupList is not None: + if not isinstance(lookupList, list): + # Can happen with synthesised lookups + lookupList = [lookupList] + for l in lookupList: + if l.lookup_index is None: + if isinstance(self, ChainContextPosBuilder): + other = "substitution" + else: + other = "positioning" + raise OpenTypeLibError( + "Missing index of the specified " + f"lookup, might be a {other} lookup", + self.location, + ) + rec = self.newLookupRecord_(st) + rec.SequenceIndex = sequenceIndex + rec.LookupListIndex = l.lookup_index + + def add_subtable_break(self, location): + self.rules.append( + ChainContextualRule( + self.SUBTABLE_BREAK_, + self.SUBTABLE_BREAK_, + self.SUBTABLE_BREAK_, + [self.SUBTABLE_BREAK_], + ) + ) + + def newSubtable_(self, chaining=True): + subtablename = f"Context{self.subtable_type}" + if chaining: + subtablename = "Chain" + subtablename + st = getattr(ot, subtablename)() # ot.ChainContextPos()/ot.ChainSubst()/etc. + setattr(st, f"{self.subtable_type}Count", 0) + setattr(st, f"{self.subtable_type}LookupRecord", []) + return st + + # Format 1 and format 2 GSUB5/GSUB6/GPOS7/GPOS8 rulesets and rules form a family: + # + # format 1 ruleset format 1 rule format 2 ruleset format 2 rule + # GSUB5 SubRuleSet SubRule SubClassSet SubClassRule + # GSUB6 ChainSubRuleSet ChainSubRule ChainSubClassSet ChainSubClassRule + # GPOS7 PosRuleSet PosRule PosClassSet PosClassRule + # GPOS8 ChainPosRuleSet ChainPosRule ChainPosClassSet ChainPosClassRule + # + # The following functions generate the attribute names and subtables according + # to this naming convention. + def ruleSetAttr_(self, format=1, chaining=True): + if format == 1: + formatType = "Rule" + elif format == 2: + formatType = "Class" + else: + raise AssertionError(formatType) + subtablename = f"{self.subtable_type[0:3]}{formatType}Set" # Sub, not Subst. + if chaining: + subtablename = "Chain" + subtablename + return subtablename + + def ruleAttr_(self, format=1, chaining=True): + if format == 1: + formatType = "" + elif format == 2: + formatType = "Class" + else: + raise AssertionError(formatType) + subtablename = f"{self.subtable_type[0:3]}{formatType}Rule" # Sub, not Subst. + if chaining: + subtablename = "Chain" + subtablename + return subtablename + + def newRuleSet_(self, format=1, chaining=True): + st = getattr( + ot, self.ruleSetAttr_(format, chaining) + )() # ot.ChainPosRuleSet()/ot.SubRuleSet()/etc. + st.populateDefaults() + return st + + def newRule_(self, format=1, chaining=True): + st = getattr( + ot, self.ruleAttr_(format, chaining) + )() # ot.ChainPosClassRule()/ot.SubClassRule()/etc. + st.populateDefaults() + return st + + def attachSubtableWithCount_( + self, st, subtable_name, count_name, existing=None, index=None, chaining=False + ): + if chaining: + subtable_name = "Chain" + subtable_name + count_name = "Chain" + count_name + + if not hasattr(st, count_name): + setattr(st, count_name, 0) + setattr(st, subtable_name, []) + + if existing: + new_subtable = existing + else: + # Create a new, empty subtable from otTables + new_subtable = getattr(ot, subtable_name)() + + setattr(st, count_name, getattr(st, count_name) + 1) + + if index: + getattr(st, subtable_name).insert(index, new_subtable) + else: + getattr(st, subtable_name).append(new_subtable) + + return new_subtable + + def newLookupRecord_(self, st): + return self.attachSubtableWithCount_( + st, + f"{self.subtable_type}LookupRecord", + f"{self.subtable_type}Count", + chaining=False, + ) # Oddly, it isn't ChainSubstLookupRecord + + +class ChainContextPosBuilder(ChainContextualBuilder): + """Builds a Chained Contextual Positioning (GPOS8) lookup. + + Users are expected to manually add rules to the ``rules`` attribute after + the object has been initialized, e.g.:: + + # pos [A B] [C D] x' lookup lu1 y' z' lookup lu2 E; + + prefix = [ ["A", "B"], ["C", "D"] ] + suffix = [ ["E"] ] + glyphs = [ ["x"], ["y"], ["z"] ] + lookups = [ [lu1], None, [lu2] ] + builder.rules.append( (prefix, glyphs, suffix, lookups) ) + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + rules: A list of tuples representing the rules in this lookup. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GPOS", 8) + self.rules = [] + self.subtable_type = "Pos" + + def find_chainable_single_pos(self, lookups, glyphs, value): + """Helper for add_single_pos_chained_()""" + res = None + for lookup in lookups[::-1]: + if lookup == self.SUBTABLE_BREAK_: + return res + if isinstance(lookup, SinglePosBuilder) and all( + lookup.can_add(glyph, value) for glyph in glyphs + ): + res = lookup + return res + + +class ChainContextSubstBuilder(ChainContextualBuilder): + """Builds a Chained Contextual Substitution (GSUB6) lookup. + + Users are expected to manually add rules to the ``rules`` attribute after + the object has been initialized, e.g.:: + + # sub [A B] [C D] x' lookup lu1 y' z' lookup lu2 E; + + prefix = [ ["A", "B"], ["C", "D"] ] + suffix = [ ["E"] ] + glyphs = [ ["x"], ["y"], ["z"] ] + lookups = [ [lu1], None, [lu2] ] + builder.rules.append( (prefix, glyphs, suffix, lookups) ) + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + rules: A list of tuples representing the rules in this lookup. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GSUB", 6) + self.rules = [] # (prefix, input, suffix, lookups) + self.subtable_type = "Subst" + + def getAlternateGlyphs(self): + result = {} + for rule in self.rules: + if rule.is_subtable_break: + continue + for lookups in rule.lookups: + if not isinstance(lookups, list): + lookups = [lookups] + for lookup in lookups: + if lookup is not None: + alts = lookup.getAlternateGlyphs() + for glyph, replacements in alts.items(): + result.setdefault(glyph, set()).update(replacements) + return result + + def find_chainable_single_subst(self, glyphs): + """Helper for add_single_subst_chained_()""" + res = None + for rule in self.rules[::-1]: + if rule.is_subtable_break: + return res + for sub in rule.lookups: + if isinstance(sub, SingleSubstBuilder) and not any( + g in glyphs for g in sub.mapping.keys() + ): + res = sub + return res + + +class LigatureSubstBuilder(LookupBuilder): + """Builds a Ligature Substitution (GSUB4) lookup. + + Users are expected to manually add ligatures to the ``ligatures`` + attribute after the object has been initialized, e.g.:: + + # sub f i by f_i; + builder.ligatures[("f","f","i")] = "f_f_i" + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + ligatures: An ordered dictionary mapping a tuple of glyph names to the + ligature glyphname. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GSUB", 4) + self.ligatures = OrderedDict() # {('f','f','i'): 'f_f_i'} + + def equals(self, other): + return LookupBuilder.equals(self, other) and self.ligatures == other.ligatures + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the ligature + substitution lookup. + """ + subtables = self.build_subst_subtables( + self.ligatures, buildLigatureSubstSubtable + ) + return self.buildLookup_(subtables) + + def add_subtable_break(self, location): + self.ligatures[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_ + + +class MultipleSubstBuilder(LookupBuilder): + """Builds a Multiple Substitution (GSUB2) lookup. + + Users are expected to manually add substitutions to the ``mapping`` + attribute after the object has been initialized, e.g.:: + + # sub uni06C0 by uni06D5.fina hamza.above; + builder.mapping["uni06C0"] = [ "uni06D5.fina", "hamza.above"] + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + mapping: An ordered dictionary mapping a glyph name to a list of + substituted glyph names. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GSUB", 2) + self.mapping = OrderedDict() + + def equals(self, other): + return LookupBuilder.equals(self, other) and self.mapping == other.mapping + + def build(self): + subtables = self.build_subst_subtables(self.mapping, buildMultipleSubstSubtable) + return self.buildLookup_(subtables) + + def add_subtable_break(self, location): + self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_ + + +class CursivePosBuilder(LookupBuilder): + """Builds a Cursive Positioning (GPOS3) lookup. + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + attachments: An ordered dictionary mapping a glyph name to a two-element + tuple of ``otTables.Anchor`` objects. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GPOS", 3) + self.attachments = {} + + def equals(self, other): + return ( + LookupBuilder.equals(self, other) and self.attachments == other.attachments + ) + + def add_attachment(self, location, glyphs, entryAnchor, exitAnchor): + """Adds attachment information to the cursive positioning lookup. + + Args: + location: A string or tuple representing the location in the + original source which produced this lookup. (Unused.) + glyphs: A list of glyph names sharing these entry and exit + anchor locations. + entryAnchor: A ``otTables.Anchor`` object representing the + entry anchor, or ``None`` if no entry anchor is present. + exitAnchor: A ``otTables.Anchor`` object representing the + exit anchor, or ``None`` if no exit anchor is present. + """ + for glyph in glyphs: + self.attachments[glyph] = (entryAnchor, exitAnchor) + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the cursive + positioning lookup. + """ + st = buildCursivePosSubtable(self.attachments, self.glyphMap) + return self.buildLookup_([st]) + + +class MarkBasePosBuilder(LookupBuilder): + """Builds a Mark-To-Base Positioning (GPOS4) lookup. + + Users are expected to manually add marks and bases to the ``marks`` + and ``bases`` attributes after the object has been initialized, e.g.:: + + builder.marks["acute"] = (0, a1) + builder.marks["grave"] = (0, a1) + builder.marks["cedilla"] = (1, a2) + builder.bases["a"] = {0: a3, 1: a5} + builder.bases["b"] = {0: a4, 1: a5} + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + marks: An dictionary mapping a glyph name to a two-element + tuple containing a mark class ID and ``otTables.Anchor`` object. + bases: An dictionary mapping a glyph name to a dictionary of + mark class IDs and ``otTables.Anchor`` object. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GPOS", 4) + self.marks = {} # glyphName -> (markClassName, anchor) + self.bases = {} # glyphName -> {markClassName: anchor} + + def equals(self, other): + return ( + LookupBuilder.equals(self, other) + and self.marks == other.marks + and self.bases == other.bases + ) + + def inferGlyphClasses(self): + result = {glyph: 1 for glyph in self.bases} + result.update({glyph: 3 for glyph in self.marks}) + return result + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the mark-to-base + positioning lookup. + """ + markClasses = self.buildMarkClasses_(self.marks) + marks = {} + for mark, (mc, anchor) in self.marks.items(): + if mc not in markClasses: + raise ValueError("Mark class %s not found for mark glyph %s" % (mc, mark)) + marks[mark] = (markClasses[mc], anchor) + bases = {} + for glyph, anchors in self.bases.items(): + bases[glyph] = {} + for mc, anchor in anchors.items(): + if mc not in markClasses: + raise ValueError("Mark class %s not found for base glyph %s" % (mc, mark)) + bases[glyph][markClasses[mc]] = anchor + subtables = buildMarkBasePos(marks, bases, self.glyphMap) + return self.buildLookup_(subtables) + + +class MarkLigPosBuilder(LookupBuilder): + """Builds a Mark-To-Ligature Positioning (GPOS5) lookup. + + Users are expected to manually add marks and bases to the ``marks`` + and ``ligatures`` attributes after the object has been initialized, e.g.:: + + builder.marks["acute"] = (0, a1) + builder.marks["grave"] = (0, a1) + builder.marks["cedilla"] = (1, a2) + builder.ligatures["f_i"] = [ + { 0: a3, 1: a5 }, # f + { 0: a4, 1: a5 } # i + ] + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + marks: An dictionary mapping a glyph name to a two-element + tuple containing a mark class ID and ``otTables.Anchor`` object. + ligatures: An dictionary mapping a glyph name to an array with one + element for each ligature component. Each array element should be + a dictionary mapping mark class IDs to ``otTables.Anchor`` objects. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GPOS", 5) + self.marks = {} # glyphName -> (markClassName, anchor) + self.ligatures = {} # glyphName -> [{markClassName: anchor}, ...] + + def equals(self, other): + return ( + LookupBuilder.equals(self, other) + and self.marks == other.marks + and self.ligatures == other.ligatures + ) + + def inferGlyphClasses(self): + result = {glyph: 2 for glyph in self.ligatures} + result.update({glyph: 3 for glyph in self.marks}) + return result + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the mark-to-ligature + positioning lookup. + """ + markClasses = self.buildMarkClasses_(self.marks) + marks = { + mark: (markClasses[mc], anchor) for mark, (mc, anchor) in self.marks.items() + } + ligs = {} + for lig, components in self.ligatures.items(): + ligs[lig] = [] + for c in components: + ligs[lig].append({markClasses[mc]: a for mc, a in c.items()}) + subtables = buildMarkLigPos(marks, ligs, self.glyphMap) + return self.buildLookup_(subtables) + + +class MarkMarkPosBuilder(LookupBuilder): + """Builds a Mark-To-Mark Positioning (GPOS6) lookup. + + Users are expected to manually add marks and bases to the ``marks`` + and ``baseMarks`` attributes after the object has been initialized, e.g.:: + + builder.marks["acute"] = (0, a1) + builder.marks["grave"] = (0, a1) + builder.marks["cedilla"] = (1, a2) + builder.baseMarks["acute"] = {0: a3} + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + marks: An dictionary mapping a glyph name to a two-element + tuple containing a mark class ID and ``otTables.Anchor`` object. + baseMarks: An dictionary mapping a glyph name to a dictionary + containing one item: a mark class ID and a ``otTables.Anchor`` object. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GPOS", 6) + self.marks = {} # glyphName -> (markClassName, anchor) + self.baseMarks = {} # glyphName -> {markClassName: anchor} + + def equals(self, other): + return ( + LookupBuilder.equals(self, other) + and self.marks == other.marks + and self.baseMarks == other.baseMarks + ) + + def inferGlyphClasses(self): + result = {glyph: 3 for glyph in self.baseMarks} + result.update({glyph: 3 for glyph in self.marks}) + return result + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the mark-to-mark + positioning lookup. + """ + markClasses = self.buildMarkClasses_(self.marks) + markClassList = sorted(markClasses.keys(), key=markClasses.get) + marks = { + mark: (markClasses[mc], anchor) for mark, (mc, anchor) in self.marks.items() + } + + st = ot.MarkMarkPos() + st.Format = 1 + st.ClassCount = len(markClasses) + st.Mark1Coverage = buildCoverage(marks, self.glyphMap) + st.Mark2Coverage = buildCoverage(self.baseMarks, self.glyphMap) + st.Mark1Array = buildMarkArray(marks, self.glyphMap) + st.Mark2Array = ot.Mark2Array() + st.Mark2Array.Mark2Count = len(st.Mark2Coverage.glyphs) + st.Mark2Array.Mark2Record = [] + for base in st.Mark2Coverage.glyphs: + anchors = [self.baseMarks[base].get(mc) for mc in markClassList] + st.Mark2Array.Mark2Record.append(buildMark2Record(anchors)) + return self.buildLookup_([st]) + + +class ReverseChainSingleSubstBuilder(LookupBuilder): + """Builds a Reverse Chaining Contextual Single Substitution (GSUB8) lookup. + + Users are expected to manually add substitutions to the ``substitutions`` + attribute after the object has been initialized, e.g.:: + + # reversesub [a e n] d' by d.alt; + prefix = [ ["a", "e", "n"] ] + suffix = [] + mapping = { "d": "d.alt" } + builder.substitutions.append( (prefix, suffix, mapping) ) + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + substitutions: A three-element tuple consisting of a prefix sequence, + a suffix sequence, and a dictionary of single substitutions. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GSUB", 8) + self.rules = [] # (prefix, suffix, mapping) + + def equals(self, other): + return LookupBuilder.equals(self, other) and self.rules == other.rules + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the chained + contextual substitution lookup. + """ + subtables = [] + for prefix, suffix, mapping in self.rules: + st = ot.ReverseChainSingleSubst() + st.Format = 1 + self.setBacktrackCoverage_(prefix, st) + self.setLookAheadCoverage_(suffix, st) + st.Coverage = buildCoverage(mapping.keys(), self.glyphMap) + st.GlyphCount = len(mapping) + st.Substitute = [mapping[g] for g in st.Coverage.glyphs] + subtables.append(st) + return self.buildLookup_(subtables) + + def add_subtable_break(self, location): + # Nothing to do here, each substitution is in its own subtable. + pass + + +class SingleSubstBuilder(LookupBuilder): + """Builds a Single Substitution (GSUB1) lookup. + + Users are expected to manually add substitutions to the ``mapping`` + attribute after the object has been initialized, e.g.:: + + # sub x by y; + builder.mapping["x"] = "y" + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + mapping: A dictionary mapping a single glyph name to another glyph name. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GSUB", 1) + self.mapping = OrderedDict() + + def equals(self, other): + return LookupBuilder.equals(self, other) and self.mapping == other.mapping + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the multiple + substitution lookup. + """ + subtables = self.build_subst_subtables(self.mapping, buildSingleSubstSubtable) + return self.buildLookup_(subtables) + + def getAlternateGlyphs(self): + return {glyph: set([repl]) for glyph, repl in self.mapping.items()} + + def add_subtable_break(self, location): + self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_ + + +class ClassPairPosSubtableBuilder(object): + """Builds class-based Pair Positioning (GPOS2 format 2) subtables. + + Note that this does *not* build a GPOS2 ``otTables.Lookup`` directly, + but builds a list of ``otTables.PairPos`` subtables. It is used by the + :class:`PairPosBuilder` below. + + Attributes: + builder (PairPosBuilder): A pair positioning lookup builder. + """ + + def __init__(self, builder): + self.builder_ = builder + self.classDef1_, self.classDef2_ = None, None + self.values_ = {} # (glyphclass1, glyphclass2) --> (value1, value2) + self.forceSubtableBreak_ = False + self.subtables_ = [] + + def addPair(self, gc1, value1, gc2, value2): + """Add a pair positioning rule. + + Args: + gc1: A set of glyph names for the "left" glyph + value1: An ``otTables.ValueRecord`` object for the left glyph's + positioning. + gc2: A set of glyph names for the "right" glyph + value2: An ``otTables.ValueRecord`` object for the right glyph's + positioning. + """ + mergeable = ( + not self.forceSubtableBreak_ + and self.classDef1_ is not None + and self.classDef1_.canAdd(gc1) + and self.classDef2_ is not None + and self.classDef2_.canAdd(gc2) + ) + if not mergeable: + self.flush_() + self.classDef1_ = ClassDefBuilder(useClass0=True) + self.classDef2_ = ClassDefBuilder(useClass0=False) + self.values_ = {} + self.classDef1_.add(gc1) + self.classDef2_.add(gc2) + self.values_[(gc1, gc2)] = (value1, value2) + + def addSubtableBreak(self): + """Add an explicit subtable break at this point.""" + self.forceSubtableBreak_ = True + + def subtables(self): + """Return the list of ``otTables.PairPos`` subtables constructed.""" + self.flush_() + return self.subtables_ + + def flush_(self): + if self.classDef1_ is None or self.classDef2_ is None: + return + st = buildPairPosClassesSubtable(self.values_, self.builder_.glyphMap) + if st.Coverage is None: + return + self.subtables_.append(st) + self.forceSubtableBreak_ = False + + +class PairPosBuilder(LookupBuilder): + """Builds a Pair Positioning (GPOS2) lookup. + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + pairs: An array of class-based pair positioning tuples. Usually + manipulated with the :meth:`addClassPair` method below. + glyphPairs: A dictionary mapping a tuple of glyph names to a tuple + of ``otTables.ValueRecord`` objects. Usually manipulated with the + :meth:`addGlyphPair` method below. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GPOS", 2) + self.pairs = [] # [(gc1, value1, gc2, value2)*] + self.glyphPairs = {} # (glyph1, glyph2) --> (value1, value2) + self.locations = {} # (gc1, gc2) --> (filepath, line, column) + + def addClassPair(self, location, glyphclass1, value1, glyphclass2, value2): + """Add a class pair positioning rule to the current lookup. + + Args: + location: A string or tuple representing the location in the + original source which produced this rule. Unused. + glyphclass1: A set of glyph names for the "left" glyph in the pair. + value1: A ``otTables.ValueRecord`` for positioning the left glyph. + glyphclass2: A set of glyph names for the "right" glyph in the pair. + value2: A ``otTables.ValueRecord`` for positioning the right glyph. + """ + self.pairs.append((glyphclass1, value1, glyphclass2, value2)) + + def addGlyphPair(self, location, glyph1, value1, glyph2, value2): + """Add a glyph pair positioning rule to the current lookup. + + Args: + location: A string or tuple representing the location in the + original source which produced this rule. + glyph1: A glyph name for the "left" glyph in the pair. + value1: A ``otTables.ValueRecord`` for positioning the left glyph. + glyph2: A glyph name for the "right" glyph in the pair. + value2: A ``otTables.ValueRecord`` for positioning the right glyph. + """ + key = (glyph1, glyph2) + oldValue = self.glyphPairs.get(key, None) + if oldValue is not None: + # the Feature File spec explicitly allows specific pairs generated + # by an 'enum' rule to be overridden by preceding single pairs + otherLoc = self.locations[key] + log.debug( + "Already defined position for pair %s %s at %s; " + "choosing the first value", + glyph1, + glyph2, + otherLoc, + ) + else: + self.glyphPairs[key] = (value1, value2) + self.locations[key] = location + + def add_subtable_break(self, location): + self.pairs.append( + ( + self.SUBTABLE_BREAK_, + self.SUBTABLE_BREAK_, + self.SUBTABLE_BREAK_, + self.SUBTABLE_BREAK_, + ) + ) + + def equals(self, other): + return ( + LookupBuilder.equals(self, other) + and self.glyphPairs == other.glyphPairs + and self.pairs == other.pairs + ) + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the pair positioning + lookup. + """ + builders = {} + builder = None + for glyphclass1, value1, glyphclass2, value2 in self.pairs: + if glyphclass1 is self.SUBTABLE_BREAK_: + if builder is not None: + builder.addSubtableBreak() + continue + valFormat1, valFormat2 = 0, 0 + if value1: + valFormat1 = value1.getFormat() + if value2: + valFormat2 = value2.getFormat() + builder = builders.get((valFormat1, valFormat2)) + if builder is None: + builder = ClassPairPosSubtableBuilder(self) + builders[(valFormat1, valFormat2)] = builder + builder.addPair(glyphclass1, value1, glyphclass2, value2) + subtables = [] + if self.glyphPairs: + subtables.extend(buildPairPosGlyphs(self.glyphPairs, self.glyphMap)) + for key in sorted(builders.keys()): + subtables.extend(builders[key].subtables()) + lookup = self.buildLookup_(subtables) + + # Compact the lookup + # This is a good moment to do it because the compaction should create + # smaller subtables, which may prevent overflows from happening. + mode = os.environ.get(GPOS_COMPACT_MODE_ENV_KEY, GPOS_COMPACT_MODE_DEFAULT) + if mode and mode != "0": + log.info("Compacting GPOS...") + compact_lookup(self.font, mode, lookup) + + return lookup + + +class SinglePosBuilder(LookupBuilder): + """Builds a Single Positioning (GPOS1) lookup. + + Attributes: + font (``fontTools.TTLib.TTFont``): A font object. + location: A string or tuple representing the location in the original + source which produced this lookup. + mapping: A dictionary mapping a glyph name to a ``otTables.ValueRecord`` + objects. Usually manipulated with the :meth:`add_pos` method below. + lookupflag (int): The lookup's flag + markFilterSet: Either ``None`` if no mark filtering set is used, or + an integer representing the filtering set to be used for this + lookup. If a mark filtering set is provided, + `LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's + flags. + """ + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, "GPOS", 1) + self.locations = {} # glyph -> (filename, line, column) + self.mapping = {} # glyph -> ot.ValueRecord + + def add_pos(self, location, glyph, otValueRecord): + """Add a single positioning rule. + + Args: + location: A string or tuple representing the location in the + original source which produced this lookup. + glyph: A glyph name. + otValueRection: A ``otTables.ValueRecord`` used to position the + glyph. + """ + if not self.can_add(glyph, otValueRecord): + otherLoc = self.locations[glyph] + raise OpenTypeLibError( + 'Already defined different position for glyph "%s" at %s' + % (glyph, otherLoc), + location, + ) + if otValueRecord: + self.mapping[glyph] = otValueRecord + self.locations[glyph] = location + + def can_add(self, glyph, value): + assert isinstance(value, ValueRecord) + curValue = self.mapping.get(glyph) + return curValue is None or curValue == value + + def equals(self, other): + return LookupBuilder.equals(self, other) and self.mapping == other.mapping + + def build(self): + """Build the lookup. + + Returns: + An ``otTables.Lookup`` object representing the single positioning + lookup. + """ + subtables = buildSinglePos(self.mapping, self.glyphMap) + return self.buildLookup_(subtables) + + +# GSUB + + +def buildSingleSubstSubtable(mapping): + """Builds a single substitution (GSUB1) subtable. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.SingleSubstBuilder` instead. + + Args: + mapping: A dictionary mapping input glyph names to output glyph names. + + Returns: + An ``otTables.SingleSubst`` object, or ``None`` if the mapping dictionary + is empty. + """ + if not mapping: + return None + self = ot.SingleSubst() + self.mapping = dict(mapping) + return self + + +def buildMultipleSubstSubtable(mapping): + """Builds a multiple substitution (GSUB2) subtable. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.MultipleSubstBuilder` instead. + + Example:: + + # sub uni06C0 by uni06D5.fina hamza.above + # sub uni06C2 by uni06C1.fina hamza.above; + + subtable = buildMultipleSubstSubtable({ + "uni06C0": [ "uni06D5.fina", "hamza.above"], + "uni06C2": [ "uni06D1.fina", "hamza.above"] + }) + + Args: + mapping: A dictionary mapping input glyph names to a list of output + glyph names. + + Returns: + An ``otTables.MultipleSubst`` object or ``None`` if the mapping dictionary + is empty. + """ + if not mapping: + return None + self = ot.MultipleSubst() + self.mapping = dict(mapping) + return self + + +def buildAlternateSubstSubtable(mapping): + """Builds an alternate substitution (GSUB3) subtable. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.AlternateSubstBuilder` instead. + + Args: + mapping: A dictionary mapping input glyph names to a list of output + glyph names. + + Returns: + An ``otTables.AlternateSubst`` object or ``None`` if the mapping dictionary + is empty. + """ + if not mapping: + return None + self = ot.AlternateSubst() + self.alternates = dict(mapping) + return self + + +def _getLigatureKey(components): + # Computes a key for ordering ligatures in a GSUB Type-4 lookup. + + # When building the OpenType lookup, we need to make sure that + # the longest sequence of components is listed first, so we + # use the negative length as the primary key for sorting. + # To make buildLigatureSubstSubtable() deterministic, we use the + # component sequence as the secondary key. + + # For example, this will sort (f,f,f) < (f,f,i) < (f,f) < (f,i) < (f,l). + return (-len(components), components) + + +def buildLigatureSubstSubtable(mapping): + """Builds a ligature substitution (GSUB4) subtable. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.LigatureSubstBuilder` instead. + + Example:: + + # sub f f i by f_f_i; + # sub f i by f_i; + + subtable = buildLigatureSubstSubtable({ + ("f", "f", "i"): "f_f_i", + ("f", "i"): "f_i", + }) + + Args: + mapping: A dictionary mapping tuples of glyph names to output + glyph names. + + Returns: + An ``otTables.LigatureSubst`` object or ``None`` if the mapping dictionary + is empty. + """ + + if not mapping: + return None + self = ot.LigatureSubst() + # The following single line can replace the rest of this function + # with fontTools >= 3.1: + # self.ligatures = dict(mapping) + self.ligatures = {} + for components in sorted(mapping.keys(), key=_getLigatureKey): + ligature = ot.Ligature() + ligature.Component = components[1:] + ligature.CompCount = len(ligature.Component) + 1 + ligature.LigGlyph = mapping[components] + firstGlyph = components[0] + self.ligatures.setdefault(firstGlyph, []).append(ligature) + return self + + +# GPOS + + +def buildAnchor(x, y, point=None, deviceX=None, deviceY=None): + """Builds an Anchor table. + + This determines the appropriate anchor format based on the passed parameters. + + Args: + x (int): X coordinate. + y (int): Y coordinate. + point (int): Index of glyph contour point, if provided. + deviceX (``otTables.Device``): X coordinate device table, if provided. + deviceY (``otTables.Device``): Y coordinate device table, if provided. + + Returns: + An ``otTables.Anchor`` object. + """ + self = ot.Anchor() + self.XCoordinate, self.YCoordinate = x, y + self.Format = 1 + if point is not None: + self.AnchorPoint = point + self.Format = 2 + if deviceX is not None or deviceY is not None: + assert ( + self.Format == 1 + ), "Either point, or both of deviceX/deviceY, must be None." + self.XDeviceTable = deviceX + self.YDeviceTable = deviceY + self.Format = 3 + return self + + +def buildBaseArray(bases, numMarkClasses, glyphMap): + """Builds a base array record. + + As part of building mark-to-base positioning rules, you will need to define + a ``BaseArray`` record, which "defines for each base glyph an array of + anchors, one for each mark class." This function builds the base array + subtable. + + Example:: + + bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}} + basearray = buildBaseArray(bases, 2, font.getReverseGlyphMap()) + + Args: + bases (dict): A dictionary mapping anchors to glyphs; the keys being + glyph names, and the values being dictionaries mapping mark class ID + to the appropriate ``otTables.Anchor`` object used for attaching marks + of that class. + numMarkClasses (int): The total number of mark classes for which anchors + are defined. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + An ``otTables.BaseArray`` object. + """ + self = ot.BaseArray() + self.BaseRecord = [] + for base in sorted(bases, key=glyphMap.__getitem__): + b = bases[base] + anchors = [b.get(markClass) for markClass in range(numMarkClasses)] + self.BaseRecord.append(buildBaseRecord(anchors)) + self.BaseCount = len(self.BaseRecord) + return self + + +def buildBaseRecord(anchors): + # [otTables.Anchor, otTables.Anchor, ...] --> otTables.BaseRecord + self = ot.BaseRecord() + self.BaseAnchor = anchors + return self + + +def buildComponentRecord(anchors): + """Builds a component record. + + As part of building mark-to-ligature positioning rules, you will need to + define ``ComponentRecord`` objects, which contain "an array of offsets... + to the Anchor tables that define all the attachment points used to attach + marks to the component." This function builds the component record. + + Args: + anchors: A list of ``otTables.Anchor`` objects or ``None``. + + Returns: + A ``otTables.ComponentRecord`` object or ``None`` if no anchors are + supplied. + """ + if not anchors: + return None + self = ot.ComponentRecord() + self.LigatureAnchor = anchors + return self + + +def buildCursivePosSubtable(attach, glyphMap): + """Builds a cursive positioning (GPOS3) subtable. + + Cursive positioning lookups are made up of a coverage table of glyphs, + and a set of ``EntryExitRecord`` records containing the anchors for + each glyph. This function builds the cursive positioning subtable. + + Example:: + + subtable = buildCursivePosSubtable({ + "AlifIni": (None, buildAnchor(0, 50)), + "BehMed": (buildAnchor(500,250), buildAnchor(0,50)), + # ... + }, font.getReverseGlyphMap()) + + Args: + attach (dict): A mapping between glyph names and a tuple of two + ``otTables.Anchor`` objects representing entry and exit anchors. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + An ``otTables.CursivePos`` object, or ``None`` if the attachment + dictionary was empty. + """ + if not attach: + return None + self = ot.CursivePos() + self.Format = 1 + self.Coverage = buildCoverage(attach.keys(), glyphMap) + self.EntryExitRecord = [] + for glyph in self.Coverage.glyphs: + entryAnchor, exitAnchor = attach[glyph] + rec = ot.EntryExitRecord() + rec.EntryAnchor = entryAnchor + rec.ExitAnchor = exitAnchor + self.EntryExitRecord.append(rec) + self.EntryExitCount = len(self.EntryExitRecord) + return self + + +def buildDevice(deltas): + """Builds a Device record as part of a ValueRecord or Anchor. + + Device tables specify size-specific adjustments to value records + and anchors to reflect changes based on the resolution of the output. + For example, one could specify that an anchor's Y position should be + increased by 1 pixel when displayed at 8 pixels per em. This routine + builds device records. + + Args: + deltas: A dictionary mapping pixels-per-em sizes to the delta + adjustment in pixels when the font is displayed at that size. + + Returns: + An ``otTables.Device`` object if any deltas were supplied, or + ``None`` otherwise. + """ + if not deltas: + return None + self = ot.Device() + keys = deltas.keys() + self.StartSize = startSize = min(keys) + self.EndSize = endSize = max(keys) + assert 0 <= startSize <= endSize + self.DeltaValue = deltaValues = [ + deltas.get(size, 0) for size in range(startSize, endSize + 1) + ] + maxDelta = max(deltaValues) + minDelta = min(deltaValues) + assert minDelta > -129 and maxDelta < 128 + if minDelta > -3 and maxDelta < 2: + self.DeltaFormat = 1 + elif minDelta > -9 and maxDelta < 8: + self.DeltaFormat = 2 + else: + self.DeltaFormat = 3 + return self + + +def buildLigatureArray(ligs, numMarkClasses, glyphMap): + """Builds a LigatureArray subtable. + + As part of building a mark-to-ligature lookup, you will need to define + the set of anchors (for each mark class) on each component of the ligature + where marks can be attached. For example, for an Arabic divine name ligature + (lam lam heh), you may want to specify mark attachment positioning for + superior marks (fatha, etc.) and inferior marks (kasra, etc.) on each glyph + of the ligature. This routine builds the ligature array record. + + Example:: + + buildLigatureArray({ + "lam-lam-heh": [ + { 0: superiorAnchor1, 1: inferiorAnchor1 }, # attach points for lam1 + { 0: superiorAnchor2, 1: inferiorAnchor2 }, # attach points for lam2 + { 0: superiorAnchor3, 1: inferiorAnchor3 }, # attach points for heh + ] + }, 2, font.getReverseGlyphMap()) + + Args: + ligs (dict): A mapping of ligature names to an array of dictionaries: + for each component glyph in the ligature, an dictionary mapping + mark class IDs to anchors. + numMarkClasses (int): The number of mark classes. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + An ``otTables.LigatureArray`` object if deltas were supplied. + """ + self = ot.LigatureArray() + self.LigatureAttach = [] + for lig in sorted(ligs, key=glyphMap.__getitem__): + anchors = [] + for component in ligs[lig]: + anchors.append([component.get(mc) for mc in range(numMarkClasses)]) + self.LigatureAttach.append(buildLigatureAttach(anchors)) + self.LigatureCount = len(self.LigatureAttach) + return self + + +def buildLigatureAttach(components): + # [[Anchor, Anchor], [Anchor, Anchor, Anchor]] --> LigatureAttach + self = ot.LigatureAttach() + self.ComponentRecord = [buildComponentRecord(c) for c in components] + self.ComponentCount = len(self.ComponentRecord) + return self + + +def buildMarkArray(marks, glyphMap): + """Builds a mark array subtable. + + As part of building mark-to-* positioning rules, you will need to define + a MarkArray subtable, which "defines the class and the anchor point + for a mark glyph." This function builds the mark array subtable. + + Example:: + + mark = { + "acute": (0, buildAnchor(300,712)), + # ... + } + markarray = buildMarkArray(marks, font.getReverseGlyphMap()) + + Args: + marks (dict): A dictionary mapping anchors to glyphs; the keys being + glyph names, and the values being a tuple of mark class number and + an ``otTables.Anchor`` object representing the mark's attachment + point. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + An ``otTables.MarkArray`` object. + """ + self = ot.MarkArray() + self.MarkRecord = [] + for mark in sorted(marks.keys(), key=glyphMap.__getitem__): + markClass, anchor = marks[mark] + markrec = buildMarkRecord(markClass, anchor) + self.MarkRecord.append(markrec) + self.MarkCount = len(self.MarkRecord) + return self + + +def buildMarkBasePos(marks, bases, glyphMap): + """Build a list of MarkBasePos (GPOS4) subtables. + + This routine turns a set of marks and bases into a list of mark-to-base + positioning subtables. Currently the list will contain a single subtable + containing all marks and bases, although at a later date it may return the + optimal list of subtables subsetting the marks and bases into groups which + save space. See :func:`buildMarkBasePosSubtable` below. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.MarkBasePosBuilder` instead. + + Example:: + + # a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + + marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} + bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}} + markbaseposes = buildMarkBasePos(marks, bases, font.getReverseGlyphMap()) + + Args: + marks (dict): A dictionary mapping anchors to glyphs; the keys being + glyph names, and the values being a tuple of mark class number and + an ``otTables.Anchor`` object representing the mark's attachment + point. (See :func:`buildMarkArray`.) + bases (dict): A dictionary mapping anchors to glyphs; the keys being + glyph names, and the values being dictionaries mapping mark class ID + to the appropriate ``otTables.Anchor`` object used for attaching marks + of that class. (See :func:`buildBaseArray`.) + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + A list of ``otTables.MarkBasePos`` objects. + """ + # TODO: Consider emitting multiple subtables to save space. + # Partition the marks and bases into disjoint subsets, so that + # MarkBasePos rules would only access glyphs from a single + # subset. This would likely lead to smaller mark/base + # matrices, so we might be able to omit many of the empty + # anchor tables that we currently produce. Of course, this + # would only work if the MarkBasePos rules of real-world fonts + # allow partitioning into multiple subsets. We should find out + # whether this is the case; if so, implement the optimization. + # On the other hand, a very large number of subtables could + # slow down layout engines; so this would need profiling. + return [buildMarkBasePosSubtable(marks, bases, glyphMap)] + + +def buildMarkBasePosSubtable(marks, bases, glyphMap): + """Build a single MarkBasePos (GPOS4) subtable. + + This builds a mark-to-base lookup subtable containing all of the referenced + marks and bases. See :func:`buildMarkBasePos`. + + Args: + marks (dict): A dictionary mapping anchors to glyphs; the keys being + glyph names, and the values being a tuple of mark class number and + an ``otTables.Anchor`` object representing the mark's attachment + point. (See :func:`buildMarkArray`.) + bases (dict): A dictionary mapping anchors to glyphs; the keys being + glyph names, and the values being dictionaries mapping mark class ID + to the appropriate ``otTables.Anchor`` object used for attaching marks + of that class. (See :func:`buildBaseArray`.) + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + A ``otTables.MarkBasePos`` object. + """ + self = ot.MarkBasePos() + self.Format = 1 + self.MarkCoverage = buildCoverage(marks, glyphMap) + self.MarkArray = buildMarkArray(marks, glyphMap) + self.ClassCount = max([mc for mc, _ in marks.values()]) + 1 + self.BaseCoverage = buildCoverage(bases, glyphMap) + self.BaseArray = buildBaseArray(bases, self.ClassCount, glyphMap) + return self + + +def buildMarkLigPos(marks, ligs, glyphMap): + """Build a list of MarkLigPos (GPOS5) subtables. + + This routine turns a set of marks and ligatures into a list of mark-to-ligature + positioning subtables. Currently the list will contain a single subtable + containing all marks and ligatures, although at a later date it may return + the optimal list of subtables subsetting the marks and ligatures into groups + which save space. See :func:`buildMarkLigPosSubtable` below. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.MarkLigPosBuilder` instead. + + Example:: + + # a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + marks = { + "acute": (0, a1), + "grave": (0, a1), + "cedilla": (1, a2) + } + ligs = { + "f_i": [ + { 0: a3, 1: a5 }, # f + { 0: a4, 1: a5 } # i + ], + # "c_t": [{...}, {...}] + } + markligposes = buildMarkLigPos(marks, ligs, + font.getReverseGlyphMap()) + + Args: + marks (dict): A dictionary mapping anchors to glyphs; the keys being + glyph names, and the values being a tuple of mark class number and + an ``otTables.Anchor`` object representing the mark's attachment + point. (See :func:`buildMarkArray`.) + ligs (dict): A mapping of ligature names to an array of dictionaries: + for each component glyph in the ligature, an dictionary mapping + mark class IDs to anchors. (See :func:`buildLigatureArray`.) + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + A list of ``otTables.MarkLigPos`` objects. + + """ + # TODO: Consider splitting into multiple subtables to save space, + # as with MarkBasePos, this would be a trade-off that would need + # profiling. And, depending on how typical fonts are structured, + # it might not be worth doing at all. + return [buildMarkLigPosSubtable(marks, ligs, glyphMap)] + + +def buildMarkLigPosSubtable(marks, ligs, glyphMap): + """Build a single MarkLigPos (GPOS5) subtable. + + This builds a mark-to-base lookup subtable containing all of the referenced + marks and bases. See :func:`buildMarkLigPos`. + + Args: + marks (dict): A dictionary mapping anchors to glyphs; the keys being + glyph names, and the values being a tuple of mark class number and + an ``otTables.Anchor`` object representing the mark's attachment + point. (See :func:`buildMarkArray`.) + ligs (dict): A mapping of ligature names to an array of dictionaries: + for each component glyph in the ligature, an dictionary mapping + mark class IDs to anchors. (See :func:`buildLigatureArray`.) + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + A ``otTables.MarkLigPos`` object. + """ + self = ot.MarkLigPos() + self.Format = 1 + self.MarkCoverage = buildCoverage(marks, glyphMap) + self.MarkArray = buildMarkArray(marks, glyphMap) + self.ClassCount = max([mc for mc, _ in marks.values()]) + 1 + self.LigatureCoverage = buildCoverage(ligs, glyphMap) + self.LigatureArray = buildLigatureArray(ligs, self.ClassCount, glyphMap) + return self + + +def buildMarkRecord(classID, anchor): + assert isinstance(classID, int) + assert isinstance(anchor, ot.Anchor) + self = ot.MarkRecord() + self.Class = classID + self.MarkAnchor = anchor + return self + + +def buildMark2Record(anchors): + # [otTables.Anchor, otTables.Anchor, ...] --> otTables.Mark2Record + self = ot.Mark2Record() + self.Mark2Anchor = anchors + return self + + +def _getValueFormat(f, values, i): + # Helper for buildPairPos{Glyphs|Classes}Subtable. + if f is not None: + return f + mask = 0 + for value in values: + if value is not None and value[i] is not None: + mask |= value[i].getFormat() + return mask + + +def buildPairPosClassesSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2=None): + """Builds a class pair adjustment (GPOS2 format 2) subtable. + + Kerning tables are generally expressed as pair positioning tables using + class-based pair adjustments. This routine builds format 2 PairPos + subtables. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.ClassPairPosSubtableBuilder` + instead, as this takes care of ensuring that the supplied pairs can be + formed into non-overlapping classes and emitting individual subtables + whenever the non-overlapping requirement means that a new subtable is + required. + + Example:: + + pairs = {} + + pairs[( + [ "K", "X" ], + [ "W", "V" ] + )] = ( buildValue(xAdvance=+5), buildValue() ) + # pairs[(... , ...)] = (..., ...) + + pairpos = buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap()) + + Args: + pairs (dict): Pair positioning data; the keys being a two-element + tuple of lists of glyphnames, and the values being a two-element + tuple of ``otTables.ValueRecord`` objects. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + valueFormat1: Force the "left" value records to the given format. + valueFormat2: Force the "right" value records to the given format. + + Returns: + A ``otTables.PairPos`` object. + """ + coverage = set() + classDef1 = ClassDefBuilder(useClass0=True) + classDef2 = ClassDefBuilder(useClass0=False) + for gc1, gc2 in sorted(pairs): + coverage.update(gc1) + classDef1.add(gc1) + classDef2.add(gc2) + self = ot.PairPos() + self.Format = 2 + valueFormat1 = self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0) + valueFormat2 = self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1) + self.Coverage = buildCoverage(coverage, glyphMap) + self.ClassDef1 = classDef1.build() + self.ClassDef2 = classDef2.build() + classes1 = classDef1.classes() + classes2 = classDef2.classes() + self.Class1Record = [] + for c1 in classes1: + rec1 = ot.Class1Record() + rec1.Class2Record = [] + self.Class1Record.append(rec1) + for c2 in classes2: + rec2 = ot.Class2Record() + val1, val2 = pairs.get((c1, c2), (None, None)) + rec2.Value1 = ValueRecord(src=val1, valueFormat=valueFormat1) if valueFormat1 else None + rec2.Value2 = ValueRecord(src=val2, valueFormat=valueFormat2) if valueFormat2 else None + rec1.Class2Record.append(rec2) + self.Class1Count = len(self.Class1Record) + self.Class2Count = len(classes2) + return self + + +def buildPairPosGlyphs(pairs, glyphMap): + """Builds a list of glyph-based pair adjustment (GPOS2 format 1) subtables. + + This organises a list of pair positioning adjustments into subtables based + on common value record formats. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.PairPosBuilder` + instead. + + Example:: + + pairs = { + ("K", "W"): ( buildValue(xAdvance=+5), buildValue() ), + ("K", "V"): ( buildValue(xAdvance=+5), buildValue() ), + # ... + } + + subtables = buildPairPosGlyphs(pairs, font.getReverseGlyphMap()) + + Args: + pairs (dict): Pair positioning data; the keys being a two-element + tuple of glyphnames, and the values being a two-element + tuple of ``otTables.ValueRecord`` objects. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + A list of ``otTables.PairPos`` objects. + """ + + p = {} # (formatA, formatB) --> {(glyphA, glyphB): (valA, valB)} + for (glyphA, glyphB), (valA, valB) in pairs.items(): + formatA = valA.getFormat() if valA is not None else 0 + formatB = valB.getFormat() if valB is not None else 0 + pos = p.setdefault((formatA, formatB), {}) + pos[(glyphA, glyphB)] = (valA, valB) + return [ + buildPairPosGlyphsSubtable(pos, glyphMap, formatA, formatB) + for ((formatA, formatB), pos) in sorted(p.items()) + ] + + +def buildPairPosGlyphsSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2=None): + """Builds a single glyph-based pair adjustment (GPOS2 format 1) subtable. + + This builds a PairPos subtable from a dictionary of glyph pairs and + their positioning adjustments. See also :func:`buildPairPosGlyphs`. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.PairPosBuilder` instead. + + Example:: + + pairs = { + ("K", "W"): ( buildValue(xAdvance=+5), buildValue() ), + ("K", "V"): ( buildValue(xAdvance=+5), buildValue() ), + # ... + } + + pairpos = buildPairPosGlyphsSubtable(pairs, font.getReverseGlyphMap()) + + Args: + pairs (dict): Pair positioning data; the keys being a two-element + tuple of glyphnames, and the values being a two-element + tuple of ``otTables.ValueRecord`` objects. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + valueFormat1: Force the "left" value records to the given format. + valueFormat2: Force the "right" value records to the given format. + + Returns: + A ``otTables.PairPos`` object. + """ + self = ot.PairPos() + self.Format = 1 + valueFormat1 = self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0) + valueFormat2 = self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1) + p = {} + for (glyphA, glyphB), (valA, valB) in pairs.items(): + p.setdefault(glyphA, []).append((glyphB, valA, valB)) + self.Coverage = buildCoverage({g for g, _ in pairs.keys()}, glyphMap) + self.PairSet = [] + for glyph in self.Coverage.glyphs: + ps = ot.PairSet() + ps.PairValueRecord = [] + self.PairSet.append(ps) + for glyph2, val1, val2 in sorted(p[glyph], key=lambda x: glyphMap[x[0]]): + pvr = ot.PairValueRecord() + pvr.SecondGlyph = glyph2 + pvr.Value1 = ValueRecord(src=val1, valueFormat=valueFormat1) if valueFormat1 else None + pvr.Value2 = ValueRecord(src=val2, valueFormat=valueFormat2) if valueFormat2 else None + ps.PairValueRecord.append(pvr) + ps.PairValueCount = len(ps.PairValueRecord) + self.PairSetCount = len(self.PairSet) + return self + + +def buildSinglePos(mapping, glyphMap): + """Builds a list of single adjustment (GPOS1) subtables. + + This builds a list of SinglePos subtables from a dictionary of glyph + names and their positioning adjustments. The format of the subtables are + determined to optimize the size of the resulting subtables. + See also :func:`buildSinglePosSubtable`. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.SinglePosBuilder` instead. + + Example:: + + mapping = { + "V": buildValue({ "xAdvance" : +5 }), + # ... + } + + subtables = buildSinglePos(pairs, font.getReverseGlyphMap()) + + Args: + mapping (dict): A mapping between glyphnames and + ``otTables.ValueRecord`` objects. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + A list of ``otTables.SinglePos`` objects. + """ + result, handled = [], set() + # In SinglePos format 1, the covered glyphs all share the same ValueRecord. + # In format 2, each glyph has its own ValueRecord, but these records + # all have the same properties (eg., all have an X but no Y placement). + coverages, masks, values = {}, {}, {} + for glyph, value in mapping.items(): + key = _getSinglePosValueKey(value) + coverages.setdefault(key, []).append(glyph) + masks.setdefault(key[0], []).append(key) + values[key] = value + + # If a ValueRecord is shared between multiple glyphs, we generate + # a SinglePos format 1 subtable; that is the most compact form. + for key, glyphs in coverages.items(): + # 5 ushorts is the length of introducing another sublookup + if len(glyphs) * _getSinglePosValueSize(key) > 5: + format1Mapping = {g: values[key] for g in glyphs} + result.append(buildSinglePosSubtable(format1Mapping, glyphMap)) + handled.add(key) + + # In the remaining ValueRecords, look for those whose valueFormat + # (the set of used properties) is shared between multiple records. + # These will get encoded in format 2. + for valueFormat, keys in masks.items(): + f2 = [k for k in keys if k not in handled] + if len(f2) > 1: + format2Mapping = {} + for k in f2: + format2Mapping.update((g, values[k]) for g in coverages[k]) + result.append(buildSinglePosSubtable(format2Mapping, glyphMap)) + handled.update(f2) + + # The remaining ValueRecords are only used by a few glyphs, normally + # one. We encode these in format 1 again. + for key, glyphs in coverages.items(): + if key not in handled: + for g in glyphs: + st = buildSinglePosSubtable({g: values[key]}, glyphMap) + result.append(st) + + # When the OpenType layout engine traverses the subtables, it will + # stop after the first matching subtable. Therefore, we sort the + # resulting subtables by decreasing coverage size; this increases + # the chance that the layout engine can do an early exit. (Of course, + # this would only be true if all glyphs were equally frequent, which + # is not really the case; but we do not know their distribution). + # If two subtables cover the same number of glyphs, we sort them + # by glyph ID so that our output is deterministic. + result.sort(key=lambda t: _getSinglePosTableKey(t, glyphMap)) + return result + + +def buildSinglePosSubtable(values, glyphMap): + """Builds a single adjustment (GPOS1) subtable. + + This builds a list of SinglePos subtables from a dictionary of glyph + names and their positioning adjustments. The format of the subtable is + determined to optimize the size of the output. + See also :func:`buildSinglePos`. + + Note that if you are implementing a layout compiler, you may find it more + flexible to use + :py:class:`fontTools.otlLib.lookupBuilders.SinglePosBuilder` instead. + + Example:: + + mapping = { + "V": buildValue({ "xAdvance" : +5 }), + # ... + } + + subtable = buildSinglePos(pairs, font.getReverseGlyphMap()) + + Args: + mapping (dict): A mapping between glyphnames and + ``otTables.ValueRecord`` objects. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + A ``otTables.SinglePos`` object. + """ + self = ot.SinglePos() + self.Coverage = buildCoverage(values.keys(), glyphMap) + valueFormat = self.ValueFormat = reduce(int.__or__, [v.getFormat() for v in values.values()], 0) + valueRecords = [ValueRecord(src=values[g], valueFormat=valueFormat) for g in self.Coverage.glyphs] + if all(v == valueRecords[0] for v in valueRecords): + self.Format = 1 + if self.ValueFormat != 0: + self.Value = valueRecords[0] + else: + self.Value = None + else: + self.Format = 2 + self.Value = valueRecords + self.ValueCount = len(self.Value) + return self + + +def _getSinglePosTableKey(subtable, glyphMap): + assert isinstance(subtable, ot.SinglePos), subtable + glyphs = subtable.Coverage.glyphs + return (-len(glyphs), glyphMap[glyphs[0]]) + + +def _getSinglePosValueKey(valueRecord): + # otBase.ValueRecord --> (2, ("YPlacement": 12)) + assert isinstance(valueRecord, ValueRecord), valueRecord + valueFormat, result = 0, [] + for name, value in valueRecord.__dict__.items(): + if isinstance(value, ot.Device): + result.append((name, _makeDeviceTuple(value))) + else: + result.append((name, value)) + valueFormat |= valueRecordFormatDict[name][0] + result.sort() + result.insert(0, valueFormat) + return tuple(result) + + +_DeviceTuple = namedtuple("_DeviceTuple", "DeltaFormat StartSize EndSize DeltaValue") + + +def _makeDeviceTuple(device): + # otTables.Device --> tuple, for making device tables unique + return _DeviceTuple( + device.DeltaFormat, + device.StartSize, + device.EndSize, + () if device.DeltaFormat & 0x8000 else tuple(device.DeltaValue), + ) + + +def _getSinglePosValueSize(valueKey): + # Returns how many ushorts this valueKey (short form of ValueRecord) takes up + count = 0 + for _, v in valueKey[1:]: + if isinstance(v, _DeviceTuple): + count += len(v.DeltaValue) + 3 + else: + count += 1 + return count + + +def buildValue(value): + """Builds a positioning value record. + + Value records are used to specify coordinates and adjustments for + positioning and attaching glyphs. Many of the positioning functions + in this library take ``otTables.ValueRecord`` objects as arguments. + This function builds value records from dictionaries. + + Args: + value (dict): A dictionary with zero or more of the following keys: + - ``xPlacement`` + - ``yPlacement`` + - ``xAdvance`` + - ``yAdvance`` + - ``xPlaDevice`` + - ``yPlaDevice`` + - ``xAdvDevice`` + - ``yAdvDevice`` + + Returns: + An ``otTables.ValueRecord`` object. + """ + self = ValueRecord() + for k, v in value.items(): + setattr(self, k, v) + return self + + +# GDEF + + +def buildAttachList(attachPoints, glyphMap): + """Builds an AttachList subtable. + + A GDEF table may contain an Attachment Point List table (AttachList) + which stores the contour indices of attachment points for glyphs with + attachment points. This routine builds AttachList subtables. + + Args: + attachPoints (dict): A mapping between glyph names and a list of + contour indices. + + Returns: + An ``otTables.AttachList`` object if attachment points are supplied, + or ``None`` otherwise. + """ + if not attachPoints: + return None + self = ot.AttachList() + self.Coverage = buildCoverage(attachPoints.keys(), glyphMap) + self.AttachPoint = [buildAttachPoint(attachPoints[g]) for g in self.Coverage.glyphs] + self.GlyphCount = len(self.AttachPoint) + return self + + +def buildAttachPoint(points): + # [4, 23, 41] --> otTables.AttachPoint + # Only used by above. + if not points: + return None + self = ot.AttachPoint() + self.PointIndex = sorted(set(points)) + self.PointCount = len(self.PointIndex) + return self + + +def buildCaretValueForCoord(coord): + # 500 --> otTables.CaretValue, format 1 + self = ot.CaretValue() + self.Format = 1 + self.Coordinate = coord + return self + + +def buildCaretValueForPoint(point): + # 4 --> otTables.CaretValue, format 2 + self = ot.CaretValue() + self.Format = 2 + self.CaretValuePoint = point + return self + + +def buildLigCaretList(coords, points, glyphMap): + """Builds a ligature caret list table. + + Ligatures appear as a single glyph representing multiple characters; however + when, for example, editing text containing a ``f_i`` ligature, the user may + want to place the cursor between the ``f`` and the ``i``. The ligature caret + list in the GDEF table specifies the position to display the "caret" (the + character insertion indicator, typically a flashing vertical bar) "inside" + the ligature to represent an insertion point. The insertion positions may + be specified either by coordinate or by contour point. + + Example:: + + coords = { + "f_f_i": [300, 600] # f|fi cursor at 300 units, ff|i cursor at 600. + } + points = { + "c_t": [28] # c|t cursor appears at coordinate of contour point 28. + } + ligcaretlist = buildLigCaretList(coords, points, font.getReverseGlyphMap()) + + Args: + coords: A mapping between glyph names and a list of coordinates for + the insertion point of each ligature component after the first one. + points: A mapping between glyph names and a list of contour points for + the insertion point of each ligature component after the first one. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns: + A ``otTables.LigCaretList`` object if any carets are present, or + ``None`` otherwise.""" + glyphs = set(coords.keys()) if coords else set() + if points: + glyphs.update(points.keys()) + carets = {g: buildLigGlyph(coords.get(g), points.get(g)) for g in glyphs} + carets = {g: c for g, c in carets.items() if c is not None} + if not carets: + return None + self = ot.LigCaretList() + self.Coverage = buildCoverage(carets.keys(), glyphMap) + self.LigGlyph = [carets[g] for g in self.Coverage.glyphs] + self.LigGlyphCount = len(self.LigGlyph) + return self + + +def buildLigGlyph(coords, points): + # ([500], [4]) --> otTables.LigGlyph; None for empty coords/points + carets = [] + if coords: + carets.extend([buildCaretValueForCoord(c) for c in sorted(coords)]) + if points: + carets.extend([buildCaretValueForPoint(p) for p in sorted(points)]) + if not carets: + return None + self = ot.LigGlyph() + self.CaretValue = carets + self.CaretCount = len(self.CaretValue) + return self + + +def buildMarkGlyphSetsDef(markSets, glyphMap): + """Builds a mark glyph sets definition table. + + OpenType Layout lookups may choose to use mark filtering sets to consider + or ignore particular combinations of marks. These sets are specified by + setting a flag on the lookup, but the mark filtering sets are defined in + the ``GDEF`` table. This routine builds the subtable containing the mark + glyph set definitions. + + Example:: + + set0 = set("acute", "grave") + set1 = set("caron", "grave") + + markglyphsets = buildMarkGlyphSetsDef([set0, set1], font.getReverseGlyphMap()) + + Args: + + markSets: A list of sets of glyphnames. + glyphMap: a glyph name to ID map, typically returned from + ``font.getReverseGlyphMap()``. + + Returns + An ``otTables.MarkGlyphSetsDef`` object. + """ + if not markSets: + return None + self = ot.MarkGlyphSetsDef() + self.MarkSetTableFormat = 1 + self.Coverage = [buildCoverage(m, glyphMap) for m in markSets] + self.MarkSetCount = len(self.Coverage) + return self + + +class ClassDefBuilder(object): + """Helper for building ClassDef tables.""" + + def __init__(self, useClass0): + self.classes_ = set() + self.glyphs_ = {} + self.useClass0_ = useClass0 + + def canAdd(self, glyphs): + if isinstance(glyphs, (set, frozenset)): + glyphs = sorted(glyphs) + glyphs = tuple(glyphs) + if glyphs in self.classes_: + return True + for glyph in glyphs: + if glyph in self.glyphs_: + return False + return True + + def add(self, glyphs): + if isinstance(glyphs, (set, frozenset)): + glyphs = sorted(glyphs) + glyphs = tuple(glyphs) + if glyphs in self.classes_: + return + self.classes_.add(glyphs) + for glyph in glyphs: + if glyph in self.glyphs_: + raise OpenTypeLibError( + f"Glyph {glyph} is already present in class.", None + ) + self.glyphs_[glyph] = glyphs + + def classes(self): + # In ClassDef1 tables, class id #0 does not need to be encoded + # because zero is the default. Therefore, we use id #0 for the + # glyph class that has the largest number of members. However, + # in other tables than ClassDef1, 0 means "every other glyph" + # so we should not use that ID for any real glyph classes; + # we implement this by inserting an empty set at position 0. + # + # TODO: Instead of counting the number of glyphs in each class, + # we should determine the encoded size. If the glyphs in a large + # class form a contiguous range, the encoding is actually quite + # compact, whereas a non-contiguous set might need a lot of bytes + # in the output file. We don't get this right with the key below. + result = sorted(self.classes_, key=lambda s: (len(s), s), reverse=True) + if not self.useClass0_: + result.insert(0, frozenset()) + return result + + def build(self): + glyphClasses = {} + for classID, glyphs in enumerate(self.classes()): + if classID == 0: + continue + for glyph in glyphs: + glyphClasses[glyph] = classID + classDef = ot.ClassDef() + classDef.classDefs = glyphClasses + return classDef + + +AXIS_VALUE_NEGATIVE_INFINITY = fixedToFloat(-0x80000000, 16) +AXIS_VALUE_POSITIVE_INFINITY = fixedToFloat(0x7FFFFFFF, 16) + + +def buildStatTable(ttFont, axes, locations=None, elidedFallbackName=2): + """Add a 'STAT' table to 'ttFont'. + + 'axes' is a list of dictionaries describing axes and their + values. + + Example:: + + axes = [ + dict( + tag="wght", + name="Weight", + ordering=0, # optional + values=[ + dict(value=100, name='Thin'), + dict(value=300, name='Light'), + dict(value=400, name='Regular', flags=0x2), + dict(value=900, name='Black'), + ], + ) + ] + + Each axis dict must have 'tag' and 'name' items. 'tag' maps + to the 'AxisTag' field. 'name' can be a name ID (int), a string, + or a dictionary containing multilingual names (see the + addMultilingualName() name table method), and will translate to + the AxisNameID field. + + An axis dict may contain an 'ordering' item that maps to the + AxisOrdering field. If omitted, the order of the axes list is + used to calculate AxisOrdering fields. + + The axis dict may contain a 'values' item, which is a list of + dictionaries describing AxisValue records belonging to this axis. + + Each value dict must have a 'name' item, which can be a name ID + (int), a string, or a dictionary containing multilingual names, + like the axis name. It translates to the ValueNameID field. + + Optionally the value dict can contain a 'flags' item. It maps to + the AxisValue Flags field, and will be 0 when omitted. + + The format of the AxisValue is determined by the remaining contents + of the value dictionary: + + If the value dict contains a 'value' item, an AxisValue record + Format 1 is created. If in addition to the 'value' item it contains + a 'linkedValue' item, an AxisValue record Format 3 is built. + + If the value dict contains a 'nominalValue' item, an AxisValue + record Format 2 is built. Optionally it may contain 'rangeMinValue' + and 'rangeMaxValue' items. These map to -Infinity and +Infinity + respectively if omitted. + + You cannot specify Format 4 AxisValue tables this way, as they are + not tied to a single axis, and specify a name for a location that + is defined by multiple axes values. Instead, you need to supply the + 'locations' argument. + + The optional 'locations' argument specifies AxisValue Format 4 + tables. It should be a list of dicts, where each dict has a 'name' + item, which works just like the value dicts above, an optional + 'flags' item (defaulting to 0x0), and a 'location' dict. A + location dict key is an axis tag, and the associated value is the + location on the specified axis. They map to the AxisIndex and Value + fields of the AxisValueRecord. + + Example:: + + locations = [ + dict(name='Regular ABCD', location=dict(wght=300, ABCD=100)), + dict(name='Bold ABCD XYZ', location=dict(wght=600, ABCD=200)), + ] + + The optional 'elidedFallbackName' argument can be a name ID (int), + a string, a dictionary containing multilingual names, or a list of + STATNameStatements. It translates to the ElidedFallbackNameID field. + + The 'ttFont' argument must be a TTFont instance that already has a + 'name' table. If a 'STAT' table already exists, it will be + overwritten by the newly created one. + """ + ttFont["STAT"] = ttLib.newTable("STAT") + statTable = ttFont["STAT"].table = ot.STAT() + nameTable = ttFont["name"] + statTable.ElidedFallbackNameID = _addName(nameTable, elidedFallbackName) + + # 'locations' contains data for AxisValue Format 4 + axisRecords, axisValues = _buildAxisRecords(axes, nameTable) + if not locations: + statTable.Version = 0x00010001 + else: + # We'll be adding Format 4 AxisValue records, which + # requires a higher table version + statTable.Version = 0x00010002 + multiAxisValues = _buildAxisValuesFormat4(locations, axes, nameTable) + axisValues = multiAxisValues + axisValues + + # Store AxisRecords + axisRecordArray = ot.AxisRecordArray() + axisRecordArray.Axis = axisRecords + # XXX these should not be hard-coded but computed automatically + statTable.DesignAxisRecordSize = 8 + statTable.DesignAxisRecord = axisRecordArray + statTable.DesignAxisCount = len(axisRecords) + + if axisValues: + # Store AxisValueRecords + axisValueArray = ot.AxisValueArray() + axisValueArray.AxisValue = axisValues + statTable.AxisValueArray = axisValueArray + statTable.AxisValueCount = len(axisValues) + + +def _buildAxisRecords(axes, nameTable): + axisRecords = [] + axisValues = [] + for axisRecordIndex, axisDict in enumerate(axes): + axis = ot.AxisRecord() + axis.AxisTag = axisDict["tag"] + axis.AxisNameID = _addName(nameTable, axisDict["name"], 256) + axis.AxisOrdering = axisDict.get("ordering", axisRecordIndex) + axisRecords.append(axis) + + for axisVal in axisDict.get("values", ()): + axisValRec = ot.AxisValue() + axisValRec.AxisIndex = axisRecordIndex + axisValRec.Flags = axisVal.get("flags", 0) + axisValRec.ValueNameID = _addName(nameTable, axisVal["name"]) + + if "value" in axisVal: + axisValRec.Value = axisVal["value"] + if "linkedValue" in axisVal: + axisValRec.Format = 3 + axisValRec.LinkedValue = axisVal["linkedValue"] + else: + axisValRec.Format = 1 + elif "nominalValue" in axisVal: + axisValRec.Format = 2 + axisValRec.NominalValue = axisVal["nominalValue"] + axisValRec.RangeMinValue = axisVal.get( + "rangeMinValue", AXIS_VALUE_NEGATIVE_INFINITY + ) + axisValRec.RangeMaxValue = axisVal.get( + "rangeMaxValue", AXIS_VALUE_POSITIVE_INFINITY + ) + else: + raise ValueError("Can't determine format for AxisValue") + + axisValues.append(axisValRec) + return axisRecords, axisValues + + +def _buildAxisValuesFormat4(locations, axes, nameTable): + axisTagToIndex = {} + for axisRecordIndex, axisDict in enumerate(axes): + axisTagToIndex[axisDict["tag"]] = axisRecordIndex + + axisValues = [] + for axisLocationDict in locations: + axisValRec = ot.AxisValue() + axisValRec.Format = 4 + axisValRec.ValueNameID = _addName(nameTable, axisLocationDict["name"]) + axisValRec.Flags = axisLocationDict.get("flags", 0) + axisValueRecords = [] + for tag, value in axisLocationDict["location"].items(): + avr = ot.AxisValueRecord() + avr.AxisIndex = axisTagToIndex[tag] + avr.Value = value + axisValueRecords.append(avr) + axisValueRecords.sort(key=lambda avr: avr.AxisIndex) + axisValRec.AxisCount = len(axisValueRecords) + axisValRec.AxisValueRecord = axisValueRecords + axisValues.append(axisValRec) + return axisValues + + +def _addName(nameTable, value, minNameID=0): + if isinstance(value, int): + # Already a nameID + return value + if isinstance(value, str): + names = dict(en=value) + elif isinstance(value, dict): + names = value + elif isinstance(value, list): + nameID = nameTable._findUnusedNameID() + for nameRecord in value: + if isinstance(nameRecord, STATNameStatement): + nameTable.setName( + nameRecord.string, + nameID, + nameRecord.platformID, + nameRecord.platEncID, + nameRecord.langID, + ) + else: + raise TypeError("value must be a list of STATNameStatements") + return nameID + else: + raise TypeError("value must be int, str, dict or list") + return nameTable.addMultilingualName(names, minNameID=minNameID) diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/error.py b/.venv/lib/python3.9/site-packages/fontTools/otlLib/error.py new file mode 100644 index 00000000..1cbef578 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/otlLib/error.py @@ -0,0 +1,11 @@ +class OpenTypeLibError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + return f"{self.location}: {message}" + else: + return message diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/maxContextCalc.py b/.venv/lib/python3.9/site-packages/fontTools/otlLib/maxContextCalc.py new file mode 100644 index 00000000..03e7561b --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/otlLib/maxContextCalc.py @@ -0,0 +1,96 @@ +__all__ = ["maxCtxFont"] + + +def maxCtxFont(font): + """Calculate the usMaxContext value for an entire font.""" + + maxCtx = 0 + for tag in ("GSUB", "GPOS"): + if tag not in font: + continue + table = font[tag].table + if not table.LookupList: + continue + for lookup in table.LookupList.Lookup: + for st in lookup.SubTable: + maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st) + return maxCtx + + +def maxCtxSubtable(maxCtx, tag, lookupType, st): + """Calculate usMaxContext based on a single lookup table (and an existing + max value). + """ + + # single positioning, single / multiple substitution + if (tag == "GPOS" and lookupType == 1) or ( + tag == "GSUB" and lookupType in (1, 2, 3) + ): + maxCtx = max(maxCtx, 1) + + # pair positioning + elif tag == "GPOS" and lookupType == 2: + maxCtx = max(maxCtx, 2) + + # ligatures + elif tag == "GSUB" and lookupType == 4: + for ligatures in st.ligatures.values(): + for ligature in ligatures: + maxCtx = max(maxCtx, ligature.CompCount) + + # context + elif (tag == "GPOS" and lookupType == 7) or (tag == "GSUB" and lookupType == 5): + maxCtx = maxCtxContextualSubtable(maxCtx, st, "Pos" if tag == "GPOS" else "Sub") + + # chained context + elif (tag == "GPOS" and lookupType == 8) or (tag == "GSUB" and lookupType == 6): + maxCtx = maxCtxContextualSubtable( + maxCtx, st, "Pos" if tag == "GPOS" else "Sub", "Chain" + ) + + # extensions + elif (tag == "GPOS" and lookupType == 9) or (tag == "GSUB" and lookupType == 7): + maxCtx = maxCtxSubtable(maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable) + + # reverse-chained context + elif tag == "GSUB" and lookupType == 8: + maxCtx = maxCtxContextualRule(maxCtx, st, "Reverse") + + return maxCtx + + +def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=""): + """Calculate usMaxContext based on a contextual feature subtable.""" + + if st.Format == 1: + for ruleset in getattr(st, "%s%sRuleSet" % (chain, ruleType)): + if ruleset is None: + continue + for rule in getattr(ruleset, "%s%sRule" % (chain, ruleType)): + if rule is None: + continue + maxCtx = maxCtxContextualRule(maxCtx, rule, chain) + + elif st.Format == 2: + for ruleset in getattr(st, "%s%sClassSet" % (chain, ruleType)): + if ruleset is None: + continue + for rule in getattr(ruleset, "%s%sClassRule" % (chain, ruleType)): + if rule is None: + continue + maxCtx = maxCtxContextualRule(maxCtx, rule, chain) + + elif st.Format == 3: + maxCtx = maxCtxContextualRule(maxCtx, st, chain) + + return maxCtx + + +def maxCtxContextualRule(maxCtx, st, chain): + """Calculate usMaxContext based on a contextual feature rule.""" + + if not chain: + return max(maxCtx, st.GlyphCount) + elif chain == "Reverse": + return max(maxCtx, st.GlyphCount + st.LookAheadGlyphCount) + return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount) diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__init__.py new file mode 100644 index 00000000..5c007e89 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__init__.py @@ -0,0 +1,68 @@ +from argparse import RawTextHelpFormatter +from textwrap import dedent + +from fontTools.ttLib import TTFont +from fontTools.otlLib.optimize.gpos import compact, GPOS_COMPACT_MODE_DEFAULT + +def main(args=None): + """Optimize the layout tables of an existing font.""" + from argparse import ArgumentParser + from fontTools import configLogger + + parser = ArgumentParser(prog="otlLib.optimize", description=main.__doc__, formatter_class=RawTextHelpFormatter) + parser.add_argument("font") + parser.add_argument( + "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file" + ) + parser.add_argument( + "--gpos-compact-mode", + help=dedent( + f"""\ + GPOS Lookup type 2 (PairPos) compaction mode: + 0 = do not attempt to compact PairPos lookups; + 1 to 8 = create at most 1 to 8 new subtables for each existing + subtable, provided that it would yield a 50%% file size saving; + 9 = create as many new subtables as needed to yield a file size saving. + Default: {GPOS_COMPACT_MODE_DEFAULT}. + + This compaction aims to save file size, by splitting large class + kerning subtables (Format 2) that contain many zero values into + smaller and denser subtables. It's a trade-off between the overhead + of several subtables versus the sparseness of one big subtable. + + See the pull request: https://github.com/fonttools/fonttools/pull/2326 + """ + ), + default=int(GPOS_COMPACT_MODE_DEFAULT), + choices=list(range(10)), + type=int, + ) + logging_group = parser.add_mutually_exclusive_group(required=False) + logging_group.add_argument( + "-v", "--verbose", action="store_true", help="Run more verbosely." + ) + logging_group.add_argument( + "-q", "--quiet", action="store_true", help="Turn verbosity off." + ) + options = parser.parse_args(args) + + configLogger( + level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO") + ) + + font = TTFont(options.font) + # TODO: switch everything to have type(mode) = int when using the Config class + compact(font, str(options.gpos_compact_mode)) + font.save(options.outfile or options.font) + + + +if __name__ == "__main__": + import sys + + if len(sys.argv) > 1: + sys.exit(main()) + import doctest + + sys.exit(doctest.testmod().failed) + diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__main__.py b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__main__.py new file mode 100644 index 00000000..03027ecd --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__main__.py @@ -0,0 +1,6 @@ +import sys +from fontTools.otlLib.optimize import main + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..85937bd7 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__pycache__/__main__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__pycache__/__main__.cpython-39.pyc new file mode 100644 index 00000000..623303a4 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__pycache__/__main__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__pycache__/gpos.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__pycache__/gpos.cpython-39.pyc new file mode 100644 index 00000000..7d741d7d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/__pycache__/gpos.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/gpos.py b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/gpos.py new file mode 100644 index 00000000..79873fad --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/otlLib/optimize/gpos.py @@ -0,0 +1,439 @@ +import logging +from collections import defaultdict, namedtuple +from functools import reduce +from itertools import chain +from math import log2 +from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple + +from fontTools.misc.intTools import bit_count, bit_indices +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables import otBase, otTables + +# NOTE: activating this optimization via the environment variable is +# experimental and may not be supported once an alternative mechanism +# is in place. See: https://github.com/fonttools/fonttools/issues/2349 +GPOS_COMPACT_MODE_ENV_KEY = "FONTTOOLS_GPOS_COMPACT_MODE" +GPOS_COMPACT_MODE_DEFAULT = "0" + +log = logging.getLogger("fontTools.otlLib.optimize.gpos") + + +def compact(font: TTFont, mode: str) -> TTFont: + # Ideal plan: + # 1. Find lookups of Lookup Type 2: Pair Adjustment Positioning Subtable + # https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable + # 2. Extract glyph-glyph kerning and class-kerning from all present subtables + # 3. Regroup into different subtable arrangements + # 4. Put back into the lookup + # + # Actual implementation: + # 2. Only class kerning is optimized currently + # 3. If the input kerning is already in several subtables, the subtables + # are not grouped together first; instead each subtable is treated + # independently, so currently this step is: + # Split existing subtables into more smaller subtables + gpos = font["GPOS"] + for lookup in gpos.table.LookupList.Lookup: + if lookup.LookupType == 2: + compact_lookup(font, mode, lookup) + elif lookup.LookupType == 9 and lookup.SubTable[0].ExtensionLookupType == 2: + compact_ext_lookup(font, mode, lookup) + return font + + +def compact_lookup(font: TTFont, mode: str, lookup: otTables.Lookup) -> None: + new_subtables = compact_pair_pos(font, mode, lookup.SubTable) + lookup.SubTable = new_subtables + lookup.SubTableCount = len(new_subtables) + + +def compact_ext_lookup(font: TTFont, mode: str, lookup: otTables.Lookup) -> None: + new_subtables = compact_pair_pos( + font, mode, [ext_subtable.ExtSubTable for ext_subtable in lookup.SubTable] + ) + new_ext_subtables = [] + for subtable in new_subtables: + ext_subtable = otTables.ExtensionPos() + ext_subtable.Format = 1 + ext_subtable.ExtSubTable = subtable + new_ext_subtables.append(ext_subtable) + lookup.SubTable = new_ext_subtables + lookup.SubTableCount = len(new_ext_subtables) + + +def compact_pair_pos( + font: TTFont, mode: str, subtables: Sequence[otTables.PairPos] +) -> Sequence[otTables.PairPos]: + new_subtables = [] + for subtable in subtables: + if subtable.Format == 1: + # Not doing anything to Format 1 (yet?) + new_subtables.append(subtable) + elif subtable.Format == 2: + new_subtables.extend(compact_class_pairs(font, mode, subtable)) + return new_subtables + + +def compact_class_pairs( + font: TTFont, mode: str, subtable: otTables.PairPos +) -> List[otTables.PairPos]: + from fontTools.otlLib.builder import buildPairPosClassesSubtable + + subtables = [] + classes1: DefaultDict[int, List[str]] = defaultdict(list) + for g in subtable.Coverage.glyphs: + classes1[subtable.ClassDef1.classDefs.get(g, 0)].append(g) + classes2: DefaultDict[int, List[str]] = defaultdict(list) + for g, i in subtable.ClassDef2.classDefs.items(): + classes2[i].append(g) + all_pairs = {} + for i, class1 in enumerate(subtable.Class1Record): + for j, class2 in enumerate(class1.Class2Record): + if is_really_zero(class2): + continue + all_pairs[(tuple(sorted(classes1[i])), tuple(sorted(classes2[j])))] = ( + getattr(class2, "Value1", None), + getattr(class2, "Value2", None), + ) + + if len(mode) == 1 and mode in "123456789": + grouped_pairs = cluster_pairs_by_class2_coverage_custom_cost( + font, all_pairs, int(mode) + ) + for pairs in grouped_pairs: + subtables.append( + buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap()) + ) + else: + raise ValueError(f"Bad {GPOS_COMPACT_MODE_ENV_KEY}={mode}") + return subtables + + +def is_really_zero(class2: otTables.Class2Record) -> bool: + v1 = getattr(class2, "Value1", None) + v2 = getattr(class2, "Value2", None) + return (v1 is None or v1.getEffectiveFormat() == 0) and ( + v2 is None or v2.getEffectiveFormat() == 0 + ) + + +Pairs = Dict[ + Tuple[Tuple[str, ...], Tuple[str, ...]], + Tuple[otBase.ValueRecord, otBase.ValueRecord], +] + +# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L935-L958 +def _getClassRanges(glyphIDs: Iterable[int]): + glyphIDs = sorted(glyphIDs) + last = glyphIDs[0] + ranges = [[last]] + for glyphID in glyphIDs[1:]: + if glyphID != last + 1: + ranges[-1].append(last) + ranges.append([glyphID]) + last = glyphID + ranges[-1].append(last) + return ranges, glyphIDs[0], glyphIDs[-1] + + +# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L960-L989 +def _classDef_bytes( + class_data: List[Tuple[List[Tuple[int, int]], int, int]], + class_ids: List[int], + coverage=False, +): + if not class_ids: + return 0 + first_ranges, min_glyph_id, max_glyph_id = class_data[class_ids[0]] + range_count = len(first_ranges) + for i in class_ids[1:]: + data = class_data[i] + range_count += len(data[0]) + min_glyph_id = min(min_glyph_id, data[1]) + max_glyph_id = max(max_glyph_id, data[2]) + glyphCount = max_glyph_id - min_glyph_id + 1 + # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-1 + format1_bytes = 6 + glyphCount * 2 + # https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-2 + format2_bytes = 4 + range_count * 6 + return min(format1_bytes, format2_bytes) + + +ClusteringContext = namedtuple( + "ClusteringContext", + [ + "lines", + "all_class1", + "all_class1_data", + "all_class2_data", + "valueFormat1_bytes", + "valueFormat2_bytes", + ], +) + + +class Cluster: + # TODO(Python 3.7): Turn this into a dataclass + # ctx: ClusteringContext + # indices: int + # Caches + # TODO(Python 3.8): use functools.cached_property instead of the + # manually cached properties, and remove the cache fields listed below. + # _indices: Optional[List[int]] = None + # _column_indices: Optional[List[int]] = None + # _cost: Optional[int] = None + + __slots__ = "ctx", "indices_bitmask", "_indices", "_column_indices", "_cost" + + def __init__(self, ctx: ClusteringContext, indices_bitmask: int): + self.ctx = ctx + self.indices_bitmask = indices_bitmask + self._indices = None + self._column_indices = None + self._cost = None + + @property + def indices(self): + if self._indices is None: + self._indices = bit_indices(self.indices_bitmask) + return self._indices + + @property + def column_indices(self): + if self._column_indices is None: + # Indices of columns that have a 1 in at least 1 line + # => binary OR all the lines + bitmask = reduce(int.__or__, (self.ctx.lines[i] for i in self.indices)) + self._column_indices = bit_indices(bitmask) + return self._column_indices + + @property + def width(self): + # Add 1 because Class2=0 cannot be used but needs to be encoded. + return len(self.column_indices) + 1 + + @property + def cost(self): + if self._cost is None: + self._cost = ( + # 2 bytes to store the offset to this subtable in the Lookup table above + 2 + # Contents of the subtable + # From: https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#pair-adjustment-positioning-format-2-class-pair-adjustment + # uint16 posFormat Format identifier: format = 2 + + 2 + # Offset16 coverageOffset Offset to Coverage table, from beginning of PairPos subtable. + + 2 + + self.coverage_bytes + # uint16 valueFormat1 ValueRecord definition — for the first glyph of the pair (may be zero). + + 2 + # uint16 valueFormat2 ValueRecord definition — for the second glyph of the pair (may be zero). + + 2 + # Offset16 classDef1Offset Offset to ClassDef table, from beginning of PairPos subtable — for the first glyph of the pair. + + 2 + + self.classDef1_bytes + # Offset16 classDef2Offset Offset to ClassDef table, from beginning of PairPos subtable — for the second glyph of the pair. + + 2 + + self.classDef2_bytes + # uint16 class1Count Number of classes in classDef1 table — includes Class 0. + + 2 + # uint16 class2Count Number of classes in classDef2 table — includes Class 0. + + 2 + # Class1Record class1Records[class1Count] Array of Class1 records, ordered by classes in classDef1. + + (self.ctx.valueFormat1_bytes + self.ctx.valueFormat2_bytes) + * len(self.indices) + * self.width + ) + return self._cost + + @property + def coverage_bytes(self): + format1_bytes = ( + # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-1 + # uint16 coverageFormat Format identifier — format = 1 + # uint16 glyphCount Number of glyphs in the glyph array + 4 + # uint16 glyphArray[glyphCount] Array of glyph IDs — in numerical order + + sum(len(self.ctx.all_class1[i]) for i in self.indices) * 2 + ) + ranges = sorted( + chain.from_iterable(self.ctx.all_class1_data[i][0] for i in self.indices) + ) + merged_range_count = 0 + last = None + for (start, end) in ranges: + if last is not None and start != last + 1: + merged_range_count += 1 + last = end + format2_bytes = ( + # From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-2 + # uint16 coverageFormat Format identifier — format = 2 + # uint16 rangeCount Number of RangeRecords + 4 + # RangeRecord rangeRecords[rangeCount] Array of glyph ranges — ordered by startGlyphID. + # uint16 startGlyphID First glyph ID in the range + # uint16 endGlyphID Last glyph ID in the range + # uint16 startCoverageIndex Coverage Index of first glyph ID in range + + merged_range_count * 6 + ) + return min(format1_bytes, format2_bytes) + + @property + def classDef1_bytes(self): + # We can skip encoding one of the Class1 definitions, and use + # Class1=0 to represent it instead, because Class1 is gated by the + # Coverage definition. Use Class1=0 for the highest byte savings. + # Going through all options takes too long, pick the biggest class + # = what happens in otlLib.builder.ClassDefBuilder.classes() + biggest_index = max(self.indices, key=lambda i: len(self.ctx.all_class1[i])) + return _classDef_bytes( + self.ctx.all_class1_data, [i for i in self.indices if i != biggest_index] + ) + + @property + def classDef2_bytes(self): + # All Class2 need to be encoded because we can't use Class2=0 + return _classDef_bytes(self.ctx.all_class2_data, self.column_indices) + + +def cluster_pairs_by_class2_coverage_custom_cost( + font: TTFont, + pairs: Pairs, + compression: int = 5, +) -> List[Pairs]: + if not pairs: + # The subtable was actually empty? + return [pairs] + + # Sorted for reproducibility/determinism + all_class1 = sorted(set(pair[0] for pair in pairs)) + all_class2 = sorted(set(pair[1] for pair in pairs)) + + # Use Python's big ints for binary vectors representing each line + lines = [ + sum( + 1 << i if (class1, class2) in pairs else 0 + for i, class2 in enumerate(all_class2) + ) + for class1 in all_class1 + ] + + # Map glyph names to ids and work with ints throughout for ClassDef formats + name_to_id = font.getReverseGlyphMap() + # Each entry in the arrays below is (range_count, min_glyph_id, max_glyph_id) + all_class1_data = [ + _getClassRanges(name_to_id[name] for name in cls) for cls in all_class1 + ] + all_class2_data = [ + _getClassRanges(name_to_id[name] for name in cls) for cls in all_class2 + ] + + format1 = 0 + format2 = 0 + for pair, value in pairs.items(): + format1 |= value[0].getEffectiveFormat() if value[0] else 0 + format2 |= value[1].getEffectiveFormat() if value[1] else 0 + valueFormat1_bytes = bit_count(format1) * 2 + valueFormat2_bytes = bit_count(format2) * 2 + + ctx = ClusteringContext( + lines, + all_class1, + all_class1_data, + all_class2_data, + valueFormat1_bytes, + valueFormat2_bytes, + ) + + cluster_cache: Dict[int, Cluster] = {} + + def make_cluster(indices: int) -> Cluster: + cluster = cluster_cache.get(indices, None) + if cluster is not None: + return cluster + cluster = Cluster(ctx, indices) + cluster_cache[indices] = cluster + return cluster + + def merge(cluster: Cluster, other: Cluster) -> Cluster: + return make_cluster(cluster.indices_bitmask | other.indices_bitmask) + + # Agglomerative clustering by hand, checking the cost gain of the new + # cluster against the previously separate clusters + # Start with 1 cluster per line + # cluster = set of lines = new subtable + clusters = [make_cluster(1 << i) for i in range(len(lines))] + + # Cost of 1 cluster with everything + # `(1 << len) - 1` gives a bitmask full of 1's of length `len` + cost_before_splitting = make_cluster((1 << len(lines)) - 1).cost + log.debug(f" len(clusters) = {len(clusters)}") + + while len(clusters) > 1: + lowest_cost_change = None + best_cluster_index = None + best_other_index = None + best_merged = None + for i, cluster in enumerate(clusters): + for j, other in enumerate(clusters[i + 1 :]): + merged = merge(cluster, other) + cost_change = merged.cost - cluster.cost - other.cost + if lowest_cost_change is None or cost_change < lowest_cost_change: + lowest_cost_change = cost_change + best_cluster_index = i + best_other_index = i + 1 + j + best_merged = merged + assert lowest_cost_change is not None + assert best_cluster_index is not None + assert best_other_index is not None + assert best_merged is not None + + # If the best merge we found is still taking down the file size, then + # there's no question: we must do it, because it's beneficial in both + # ways (lower file size and lower number of subtables). However, if the + # best merge we found is not reducing file size anymore, then we need to + # look at the other stop criteria = the compression factor. + if lowest_cost_change > 0: + # Stop critera: check whether we should keep merging. + # Compute size reduction brought by splitting + cost_after_splitting = sum(c.cost for c in clusters) + # size_reduction so that after = before * (1 - size_reduction) + # E.g. before = 1000, after = 800, 1 - 800/1000 = 0.2 + size_reduction = 1 - cost_after_splitting / cost_before_splitting + + # Force more merging by taking into account the compression number. + # Target behaviour: compression number = 1 to 9, default 5 like gzip + # - 1 = accept to add 1 subtable to reduce size by 50% + # - 5 = accept to add 5 subtables to reduce size by 50% + # See https://github.com/harfbuzz/packtab/blob/master/Lib/packTab/__init__.py#L690-L691 + # Given the size reduction we have achieved so far, compute how many + # new subtables are acceptable. + max_new_subtables = -log2(1 - size_reduction) * compression + log.debug( + f" len(clusters) = {len(clusters):3d} size_reduction={size_reduction:5.2f} max_new_subtables={max_new_subtables}", + ) + if compression == 9: + # Override level 9 to mean: create any number of subtables + max_new_subtables = len(clusters) + + # If we have managed to take the number of new subtables below the + # threshold, then we can stop. + if len(clusters) <= max_new_subtables + 1: + break + + # No reason to stop yet, do the merge and move on to the next. + del clusters[best_other_index] + clusters[best_cluster_index] = best_merged + + # All clusters are final; turn bitmasks back into the "Pairs" format + pairs_by_class1: Dict[Tuple[str, ...], Pairs] = defaultdict(dict) + for pair, values in pairs.items(): + pairs_by_class1[pair[0]][pair] = values + pairs_groups: List[Pairs] = [] + for cluster in clusters: + pairs_group: Pairs = dict() + for i in cluster.indices: + class1 = all_class1[i] + pairs_group.update(pairs_by_class1[class1]) + pairs_groups.append(pairs_group) + return pairs_groups diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/pens/__init__.py new file mode 100644 index 00000000..156cb232 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/__init__.py @@ -0,0 +1 @@ +"""Empty __init__.py file to signal Python this directory is a package.""" diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..b8b462dd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/areaPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/areaPen.cpython-39.pyc new file mode 100644 index 00000000..d5878beb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/areaPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/basePen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/basePen.cpython-39.pyc new file mode 100644 index 00000000..9082aae7 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/basePen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/boundsPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/boundsPen.cpython-39.pyc new file mode 100644 index 00000000..1a9dd830 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/boundsPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/cocoaPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/cocoaPen.cpython-39.pyc new file mode 100644 index 00000000..bf2f4be3 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/cocoaPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/cu2quPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/cu2quPen.cpython-39.pyc new file mode 100644 index 00000000..95766e78 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/cu2quPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/filterPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/filterPen.cpython-39.pyc new file mode 100644 index 00000000..a93979bc Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/filterPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/hashPointPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/hashPointPen.cpython-39.pyc new file mode 100644 index 00000000..9e73ada5 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/hashPointPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/momentsPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/momentsPen.cpython-39.pyc new file mode 100644 index 00000000..7ca6f0bd Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/momentsPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/perimeterPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/perimeterPen.cpython-39.pyc new file mode 100644 index 00000000..53fd99e5 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/perimeterPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/pointInsidePen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/pointInsidePen.cpython-39.pyc new file mode 100644 index 00000000..a7ca8243 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/pointInsidePen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/pointPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/pointPen.cpython-39.pyc new file mode 100644 index 00000000..50ac7562 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/pointPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/qtPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/qtPen.cpython-39.pyc new file mode 100644 index 00000000..3e906674 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/qtPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/quartzPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/quartzPen.cpython-39.pyc new file mode 100644 index 00000000..ac0a7909 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/quartzPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/recordingPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/recordingPen.cpython-39.pyc new file mode 100644 index 00000000..96807a0e Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/recordingPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/reportLabPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/reportLabPen.cpython-39.pyc new file mode 100644 index 00000000..930204bb Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/reportLabPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/reverseContourPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/reverseContourPen.cpython-39.pyc new file mode 100644 index 00000000..77de35c7 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/reverseContourPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/roundingPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/roundingPen.cpython-39.pyc new file mode 100644 index 00000000..27bd74c3 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/roundingPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/statisticsPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/statisticsPen.cpython-39.pyc new file mode 100644 index 00000000..96a5cb95 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/statisticsPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/svgPathPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/svgPathPen.cpython-39.pyc new file mode 100644 index 00000000..c9d6cb51 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/svgPathPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/t2CharStringPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/t2CharStringPen.cpython-39.pyc new file mode 100644 index 00000000..009f02ab Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/t2CharStringPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/teePen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/teePen.cpython-39.pyc new file mode 100644 index 00000000..0d8b4fcc Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/teePen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/transformPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/transformPen.cpython-39.pyc new file mode 100644 index 00000000..315c3829 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/transformPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/ttGlyphPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/ttGlyphPen.cpython-39.pyc new file mode 100644 index 00000000..bd9b578b Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/ttGlyphPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/wxPen.cpython-39.pyc b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/wxPen.cpython-39.pyc new file mode 100644 index 00000000..e28b555d Binary files /dev/null and b/.venv/lib/python3.9/site-packages/fontTools/pens/__pycache__/wxPen.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/areaPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/areaPen.py new file mode 100644 index 00000000..403afe7b --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/areaPen.py @@ -0,0 +1,57 @@ +"""Calculate the area of a glyph.""" + +from fontTools.pens.basePen import BasePen + + +__all__ = ["AreaPen"] + + +class AreaPen(BasePen): + + def __init__(self, glyphset=None): + BasePen.__init__(self, glyphset) + self.value = 0 + + def _moveTo(self, p0): + self._p0 = self._startPoint = p0 + + def _lineTo(self, p1): + x0, y0 = self._p0 + x1, y1 = p1 + self.value -= (x1 - x0) * (y1 + y0) * .5 + self._p0 = p1 + + def _qCurveToOne(self, p1, p2): + # https://github.com/Pomax/bezierinfo/issues/44 + p0 = self._p0 + x0, y0 = p0[0], p0[1] + x1, y1 = p1[0] - x0, p1[1] - y0 + x2, y2 = p2[0] - x0, p2[1] - y0 + self.value -= (x2 * y1 - x1 * y2) / 3 + self._lineTo(p2) + self._p0 = p2 + + def _curveToOne(self, p1, p2, p3): + # https://github.com/Pomax/bezierinfo/issues/44 + p0 = self._p0 + x0, y0 = p0[0], p0[1] + x1, y1 = p1[0] - x0, p1[1] - y0 + x2, y2 = p2[0] - x0, p2[1] - y0 + x3, y3 = p3[0] - x0, p3[1] - y0 + self.value -= ( + x1 * ( - y2 - y3) + + x2 * (y1 - 2*y3) + + x3 * (y1 + 2*y2 ) + ) * 0.15 + self._lineTo(p3) + self._p0 = p3 + + def _closePath(self): + self._lineTo(self._startPoint) + del self._p0, self._startPoint + + def _endPath(self): + if self._p0 != self._startPoint: + # Area is not defined for open contours. + raise NotImplementedError + del self._p0, self._startPoint diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/basePen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/basePen.py new file mode 100644 index 00000000..72e3918b --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/basePen.py @@ -0,0 +1,408 @@ +"""fontTools.pens.basePen.py -- Tools and base classes to build pen objects. + +The Pen Protocol + +A Pen is a kind of object that standardizes the way how to "draw" outlines: +it is a middle man between an outline and a drawing. In other words: +it is an abstraction for drawing outlines, making sure that outline objects +don't need to know the details about how and where they're being drawn, and +that drawings don't need to know the details of how outlines are stored. + +The most basic pattern is this: + + outline.draw(pen) # 'outline' draws itself onto 'pen' + +Pens can be used to render outlines to the screen, but also to construct +new outlines. Eg. an outline object can be both a drawable object (it has a +draw() method) as well as a pen itself: you *build* an outline using pen +methods. + +The AbstractPen class defines the Pen protocol. It implements almost +nothing (only no-op closePath() and endPath() methods), but is useful +for documentation purposes. Subclassing it basically tells the reader: +"this class implements the Pen protocol.". An examples of an AbstractPen +subclass is fontTools.pens.transformPen.TransformPen. + +The BasePen class is a base implementation useful for pens that actually +draw (for example a pen renders outlines using a native graphics engine). +BasePen contains a lot of base functionality, making it very easy to build +a pen that fully conforms to the pen protocol. Note that if you subclass +BasePen, you _don't_ override moveTo(), lineTo(), etc., but _moveTo(), +_lineTo(), etc. See the BasePen doc string for details. Examples of +BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and +fontTools.pens.cocoaPen.CocoaPen. + +Coordinates are usually expressed as (x, y) tuples, but generally any +sequence of length 2 will do. +""" + +from typing import Tuple + +from fontTools.misc.loggingTools import LogMixin + +__all__ = ["AbstractPen", "NullPen", "BasePen", "PenError", + "decomposeSuperBezierSegment", "decomposeQuadraticSegment"] + + +class PenError(Exception): + """Represents an error during penning.""" + + +class AbstractPen: + + def moveTo(self, pt: Tuple[float, float]) -> None: + """Begin a new sub path, set the current point to 'pt'. You must + end each sub path with a call to pen.closePath() or pen.endPath(). + """ + raise NotImplementedError + + def lineTo(self, pt: Tuple[float, float]) -> None: + """Draw a straight line from the current point to 'pt'.""" + raise NotImplementedError + + def curveTo(self, *points: Tuple[float, float]) -> None: + """Draw a cubic bezier with an arbitrary number of control points. + + The last point specified is on-curve, all others are off-curve + (control) points. If the number of control points is > 2, the + segment is split into multiple bezier segments. This works + like this: + + Let n be the number of control points (which is the number of + arguments to this call minus 1). If n==2, a plain vanilla cubic + bezier is drawn. If n==1, we fall back to a quadratic segment and + if n==0 we draw a straight line. It gets interesting when n>2: + n-1 PostScript-style cubic segments will be drawn as if it were + one curve. See decomposeSuperBezierSegment(). + + The conversion algorithm used for n>2 is inspired by NURB + splines, and is conceptually equivalent to the TrueType "implied + points" principle. See also decomposeQuadraticSegment(). + """ + raise NotImplementedError + + def qCurveTo(self, *points: Tuple[float, float]) -> None: + """Draw a whole string of quadratic curve segments. + + The last point specified is on-curve, all others are off-curve + points. + + This method implements TrueType-style curves, breaking up curves + using 'implied points': between each two consequtive off-curve points, + there is one implied point exactly in the middle between them. See + also decomposeQuadraticSegment(). + + The last argument (normally the on-curve point) may be None. + This is to support contours that have NO on-curve points (a rarely + seen feature of TrueType outlines). + """ + raise NotImplementedError + + def closePath(self) -> None: + """Close the current sub path. You must call either pen.closePath() + or pen.endPath() after each sub path. + """ + pass + + def endPath(self) -> None: + """End the current sub path, but don't close it. You must call + either pen.closePath() or pen.endPath() after each sub path. + """ + pass + + def addComponent( + self, + glyphName: str, + transformation: Tuple[float, float, float, float, float, float] + ) -> None: + """Add a sub glyph. The 'transformation' argument must be a 6-tuple + containing an affine transformation, or a Transform object from the + fontTools.misc.transform module. More precisely: it should be a + sequence containing 6 numbers. + """ + raise NotImplementedError + + +class NullPen(AbstractPen): + + """A pen that does nothing. + """ + + def moveTo(self, pt): + pass + + def lineTo(self, pt): + pass + + def curveTo(self, *points): + pass + + def qCurveTo(self, *points): + pass + + def closePath(self): + pass + + def endPath(self): + pass + + def addComponent(self, glyphName, transformation): + pass + + +class LoggingPen(LogMixin, AbstractPen): + """A pen with a `log` property (see fontTools.misc.loggingTools.LogMixin) + """ + pass + + +class MissingComponentError(KeyError): + """Indicates a component pointing to a non-existent glyph in the glyphset.""" + + +class DecomposingPen(LoggingPen): + + """ Implements a 'addComponent' method that decomposes components + (i.e. draws them onto self as simple contours). + It can also be used as a mixin class (e.g. see ContourRecordingPen). + + You must override moveTo, lineTo, curveTo and qCurveTo. You may + additionally override closePath, endPath and addComponent. + + By default a warning message is logged when a base glyph is missing; + set the class variable ``skipMissingComponents`` to False if you want + to raise a :class:`MissingComponentError` exception. + """ + + skipMissingComponents = True + + def __init__(self, glyphSet): + """ Takes a single 'glyphSet' argument (dict), in which the glyphs + that are referenced as components are looked up by their name. + """ + super(DecomposingPen, self).__init__() + self.glyphSet = glyphSet + + def addComponent(self, glyphName, transformation): + """ Transform the points of the base glyph and draw it onto self. + """ + from fontTools.pens.transformPen import TransformPen + try: + glyph = self.glyphSet[glyphName] + except KeyError: + if not self.skipMissingComponents: + raise MissingComponentError(glyphName) + self.log.warning( + "glyph '%s' is missing from glyphSet; skipped" % glyphName) + else: + tPen = TransformPen(self, transformation) + glyph.draw(tPen) + + +class BasePen(DecomposingPen): + + """Base class for drawing pens. You must override _moveTo, _lineTo and + _curveToOne. You may additionally override _closePath, _endPath, + addComponent and/or _qCurveToOne. You should not override any other + methods. + """ + + def __init__(self, glyphSet=None): + super(BasePen, self).__init__(glyphSet) + self.__currentPoint = None + + # must override + + def _moveTo(self, pt): + raise NotImplementedError + + def _lineTo(self, pt): + raise NotImplementedError + + def _curveToOne(self, pt1, pt2, pt3): + raise NotImplementedError + + # may override + + def _closePath(self): + pass + + def _endPath(self): + pass + + def _qCurveToOne(self, pt1, pt2): + """This method implements the basic quadratic curve type. The + default implementation delegates the work to the cubic curve + function. Optionally override with a native implementation. + """ + pt0x, pt0y = self.__currentPoint + pt1x, pt1y = pt1 + pt2x, pt2y = pt2 + mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x) + mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y) + mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x) + mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y) + self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2) + + # don't override + + def _getCurrentPoint(self): + """Return the current point. This is not part of the public + interface, yet is useful for subclasses. + """ + return self.__currentPoint + + def closePath(self): + self._closePath() + self.__currentPoint = None + + def endPath(self): + self._endPath() + self.__currentPoint = None + + def moveTo(self, pt): + self._moveTo(pt) + self.__currentPoint = pt + + def lineTo(self, pt): + self._lineTo(pt) + self.__currentPoint = pt + + def curveTo(self, *points): + n = len(points) - 1 # 'n' is the number of control points + assert n >= 0 + if n == 2: + # The common case, we have exactly two BCP's, so this is a standard + # cubic bezier. Even though decomposeSuperBezierSegment() handles + # this case just fine, we special-case it anyway since it's so + # common. + self._curveToOne(*points) + self.__currentPoint = points[-1] + elif n > 2: + # n is the number of control points; split curve into n-1 cubic + # bezier segments. The algorithm used here is inspired by NURB + # splines and the TrueType "implied point" principle, and ensures + # the smoothest possible connection between two curve segments, + # with no disruption in the curvature. It is practical since it + # allows one to construct multiple bezier segments with a much + # smaller amount of points. + _curveToOne = self._curveToOne + for pt1, pt2, pt3 in decomposeSuperBezierSegment(points): + _curveToOne(pt1, pt2, pt3) + self.__currentPoint = pt3 + elif n == 1: + self.qCurveTo(*points) + elif n == 0: + self.lineTo(points[0]) + else: + raise AssertionError("can't get there from here") + + def qCurveTo(self, *points): + n = len(points) - 1 # 'n' is the number of control points + assert n >= 0 + if points[-1] is None: + # Special case for TrueType quadratics: it is possible to + # define a contour with NO on-curve points. BasePen supports + # this by allowing the final argument (the expected on-curve + # point) to be None. We simulate the feature by making the implied + # on-curve point between the last and the first off-curve points + # explicit. + x, y = points[-2] # last off-curve point + nx, ny = points[0] # first off-curve point + impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny)) + self.__currentPoint = impliedStartPoint + self._moveTo(impliedStartPoint) + points = points[:-1] + (impliedStartPoint,) + if n > 0: + # Split the string of points into discrete quadratic curve + # segments. Between any two consecutive off-curve points + # there's an implied on-curve point exactly in the middle. + # This is where the segment splits. + _qCurveToOne = self._qCurveToOne + for pt1, pt2 in decomposeQuadraticSegment(points): + _qCurveToOne(pt1, pt2) + self.__currentPoint = pt2 + else: + self.lineTo(points[0]) + + +def decomposeSuperBezierSegment(points): + """Split the SuperBezier described by 'points' into a list of regular + bezier segments. The 'points' argument must be a sequence with length + 3 or greater, containing (x, y) coordinates. The last point is the + destination on-curve point, the rest of the points are off-curve points. + The start point should not be supplied. + + This function returns a list of (pt1, pt2, pt3) tuples, which each + specify a regular curveto-style bezier segment. + """ + n = len(points) - 1 + assert n > 1 + bezierSegments = [] + pt1, pt2, pt3 = points[0], None, None + for i in range(2, n+1): + # calculate points in between control points. + nDivisions = min(i, 3, n-i+2) + for j in range(1, nDivisions): + factor = j / nDivisions + temp1 = points[i-1] + temp2 = points[i-2] + temp = (temp2[0] + factor * (temp1[0] - temp2[0]), + temp2[1] + factor * (temp1[1] - temp2[1])) + if pt2 is None: + pt2 = temp + else: + pt3 = (0.5 * (pt2[0] + temp[0]), + 0.5 * (pt2[1] + temp[1])) + bezierSegments.append((pt1, pt2, pt3)) + pt1, pt2, pt3 = temp, None, None + bezierSegments.append((pt1, points[-2], points[-1])) + return bezierSegments + + +def decomposeQuadraticSegment(points): + """Split the quadratic curve segment described by 'points' into a list + of "atomic" quadratic segments. The 'points' argument must be a sequence + with length 2 or greater, containing (x, y) coordinates. The last point + is the destination on-curve point, the rest of the points are off-curve + points. The start point should not be supplied. + + This function returns a list of (pt1, pt2) tuples, which each specify a + plain quadratic bezier segment. + """ + n = len(points) - 1 + assert n > 0 + quadSegments = [] + for i in range(n - 1): + x, y = points[i] + nx, ny = points[i+1] + impliedPt = (0.5 * (x + nx), 0.5 * (y + ny)) + quadSegments.append((points[i], impliedPt)) + quadSegments.append((points[-2], points[-1])) + return quadSegments + + +class _TestPen(BasePen): + """Test class that prints PostScript to stdout.""" + def _moveTo(self, pt): + print("%s %s moveto" % (pt[0], pt[1])) + def _lineTo(self, pt): + print("%s %s lineto" % (pt[0], pt[1])) + def _curveToOne(self, bcp1, bcp2, pt): + print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1], + bcp2[0], bcp2[1], pt[0], pt[1])) + def _closePath(self): + print("closepath") + + +if __name__ == "__main__": + pen = _TestPen(None) + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.closePath() + + pen = _TestPen(None) + # testing the "no on-curve point" scenario + pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None) + pen.closePath() diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/boundsPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/boundsPen.py new file mode 100644 index 00000000..810715ca --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/boundsPen.py @@ -0,0 +1,96 @@ +from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect +from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds +from fontTools.pens.basePen import BasePen + + +__all__ = ["BoundsPen", "ControlBoundsPen"] + + +class ControlBoundsPen(BasePen): + + """Pen to calculate the "control bounds" of a shape. This is the + bounding box of all control points, so may be larger than the + actual bounding box if there are curves that don't have points + on their extremes. + + When the shape has been drawn, the bounds are available as the + 'bounds' attribute of the pen object. It's a 4-tuple: + (xMin, yMin, xMax, yMax). + + If 'ignoreSinglePoints' is True, single points are ignored. + """ + + def __init__(self, glyphSet, ignoreSinglePoints=False): + BasePen.__init__(self, glyphSet) + self.ignoreSinglePoints = ignoreSinglePoints + self.init() + + def init(self): + self.bounds = None + self._start = None + + def _moveTo(self, pt): + self._start = pt + if not self.ignoreSinglePoints: + self._addMoveTo() + + def _addMoveTo(self): + if self._start is None: + return + bounds = self.bounds + if bounds: + self.bounds = updateBounds(bounds, self._start) + else: + x, y = self._start + self.bounds = (x, y, x, y) + self._start = None + + def _lineTo(self, pt): + self._addMoveTo() + self.bounds = updateBounds(self.bounds, pt) + + def _curveToOne(self, bcp1, bcp2, pt): + self._addMoveTo() + bounds = self.bounds + bounds = updateBounds(bounds, bcp1) + bounds = updateBounds(bounds, bcp2) + bounds = updateBounds(bounds, pt) + self.bounds = bounds + + def _qCurveToOne(self, bcp, pt): + self._addMoveTo() + bounds = self.bounds + bounds = updateBounds(bounds, bcp) + bounds = updateBounds(bounds, pt) + self.bounds = bounds + + +class BoundsPen(ControlBoundsPen): + + """Pen to calculate the bounds of a shape. It calculates the + correct bounds even when the shape contains curves that don't + have points on their extremes. This is somewhat slower to compute + than the "control bounds". + + When the shape has been drawn, the bounds are available as the + 'bounds' attribute of the pen object. It's a 4-tuple: + (xMin, yMin, xMax, yMax) + """ + + def _curveToOne(self, bcp1, bcp2, pt): + self._addMoveTo() + bounds = self.bounds + bounds = updateBounds(bounds, pt) + if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds): + bounds = unionRect(bounds, calcCubicBounds( + self._getCurrentPoint(), bcp1, bcp2, pt)) + self.bounds = bounds + + def _qCurveToOne(self, bcp, pt): + self._addMoveTo() + bounds = self.bounds + bounds = updateBounds(bounds, pt) + if not pointInRect(bcp, bounds): + bounds = unionRect(bounds, calcQuadraticBounds( + self._getCurrentPoint(), bcp, pt)) + self.bounds = bounds diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/cocoaPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/cocoaPen.py new file mode 100644 index 00000000..67482b4d --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/cocoaPen.py @@ -0,0 +1,26 @@ +from fontTools.pens.basePen import BasePen + + +__all__ = ["CocoaPen"] + + +class CocoaPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + from AppKit import NSBezierPath + path = NSBezierPath.bezierPath() + self.path = path + + def _moveTo(self, p): + self.path.moveToPoint_(p) + + def _lineTo(self, p): + self.path.lineToPoint_(p) + + def _curveToOne(self, p1, p2, p3): + self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2) + + def _closePath(self): + self.path.closePath() diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/cu2quPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/cu2quPen.py new file mode 100644 index 00000000..497585bc --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/cu2quPen.py @@ -0,0 +1,257 @@ +# Copyright 2016 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from fontTools.cu2qu import curve_to_quadratic +from fontTools.pens.basePen import AbstractPen, decomposeSuperBezierSegment +from fontTools.pens.reverseContourPen import ReverseContourPen +from fontTools.pens.pointPen import BasePointToSegmentPen +from fontTools.pens.pointPen import ReverseContourPointPen + + +class Cu2QuPen(AbstractPen): + """ A filter pen to convert cubic bezier curves to quadratic b-splines + using the FontTools SegmentPen protocol. + + other_pen: another SegmentPen used to draw the transformed outline. + max_err: maximum approximation error in font units. For optimal results, + if you know the UPEM of the font, we recommend setting this to a + value equal, or close to UPEM / 1000. + reverse_direction: flip the contours' direction but keep starting point. + stats: a dictionary counting the point numbers of quadratic segments. + ignore_single_points: don't emit contours containing only a single point + + NOTE: The "ignore_single_points" argument is deprecated since v1.3.0, + which dropped Robofab subpport. It's no longer needed to special-case + UFO2-style anchors (aka "named points") when using ufoLib >= 2.0, + as these are no longer drawn onto pens as single-point contours, + but are handled separately as anchors. + """ + + def __init__(self, other_pen, max_err, reverse_direction=False, + stats=None, ignore_single_points=False): + if reverse_direction: + self.pen = ReverseContourPen(other_pen) + else: + self.pen = other_pen + self.max_err = max_err + self.stats = stats + if ignore_single_points: + import warnings + warnings.warn("ignore_single_points is deprecated and " + "will be removed in future versions", + UserWarning, stacklevel=2) + self.ignore_single_points = ignore_single_points + self.start_pt = None + self.current_pt = None + + def _check_contour_is_open(self): + if self.current_pt is None: + raise AssertionError("moveTo is required") + + def _check_contour_is_closed(self): + if self.current_pt is not None: + raise AssertionError("closePath or endPath is required") + + def _add_moveTo(self): + if self.start_pt is not None: + self.pen.moveTo(self.start_pt) + self.start_pt = None + + def moveTo(self, pt): + self._check_contour_is_closed() + self.start_pt = self.current_pt = pt + if not self.ignore_single_points: + self._add_moveTo() + + def lineTo(self, pt): + self._check_contour_is_open() + self._add_moveTo() + self.pen.lineTo(pt) + self.current_pt = pt + + def qCurveTo(self, *points): + self._check_contour_is_open() + n = len(points) + if n == 1: + self.lineTo(points[0]) + elif n > 1: + self._add_moveTo() + self.pen.qCurveTo(*points) + self.current_pt = points[-1] + else: + raise AssertionError("illegal qcurve segment point count: %d" % n) + + def _curve_to_quadratic(self, pt1, pt2, pt3): + curve = (self.current_pt, pt1, pt2, pt3) + quadratic = curve_to_quadratic(curve, self.max_err) + if self.stats is not None: + n = str(len(quadratic) - 2) + self.stats[n] = self.stats.get(n, 0) + 1 + self.qCurveTo(*quadratic[1:]) + + def curveTo(self, *points): + self._check_contour_is_open() + n = len(points) + if n == 3: + # this is the most common case, so we special-case it + self._curve_to_quadratic(*points) + elif n > 3: + for segment in decomposeSuperBezierSegment(points): + self._curve_to_quadratic(*segment) + elif n == 2: + self.qCurveTo(*points) + elif n == 1: + self.lineTo(points[0]) + else: + raise AssertionError("illegal curve segment point count: %d" % n) + + def closePath(self): + self._check_contour_is_open() + if self.start_pt is None: + # if 'start_pt' is _not_ None, we are ignoring single-point paths + self.pen.closePath() + self.current_pt = self.start_pt = None + + def endPath(self): + self._check_contour_is_open() + if self.start_pt is None: + self.pen.endPath() + self.current_pt = self.start_pt = None + + def addComponent(self, glyphName, transformation): + self._check_contour_is_closed() + self.pen.addComponent(glyphName, transformation) + + +class Cu2QuPointPen(BasePointToSegmentPen): + """ A filter pen to convert cubic bezier curves to quadratic b-splines + using the RoboFab PointPen protocol. + + other_point_pen: another PointPen used to draw the transformed outline. + max_err: maximum approximation error in font units. For optimal results, + if you know the UPEM of the font, we recommend setting this to a + value equal, or close to UPEM / 1000. + reverse_direction: reverse the winding direction of all contours. + stats: a dictionary counting the point numbers of quadratic segments. + """ + + def __init__(self, other_point_pen, max_err, reverse_direction=False, + stats=None): + BasePointToSegmentPen.__init__(self) + if reverse_direction: + self.pen = ReverseContourPointPen(other_point_pen) + else: + self.pen = other_point_pen + self.max_err = max_err + self.stats = stats + + def _flushContour(self, segments): + assert len(segments) >= 1 + closed = segments[0][0] != "move" + new_segments = [] + prev_points = segments[-1][1] + prev_on_curve = prev_points[-1][0] + for segment_type, points in segments: + if segment_type == 'curve': + for sub_points in self._split_super_bezier_segments(points): + on_curve, smooth, name, kwargs = sub_points[-1] + bcp1, bcp2 = sub_points[0][0], sub_points[1][0] + cubic = [prev_on_curve, bcp1, bcp2, on_curve] + quad = curve_to_quadratic(cubic, self.max_err) + if self.stats is not None: + n = str(len(quad) - 2) + self.stats[n] = self.stats.get(n, 0) + 1 + new_points = [(pt, False, None, {}) for pt in quad[1:-1]] + new_points.append((on_curve, smooth, name, kwargs)) + new_segments.append(["qcurve", new_points]) + prev_on_curve = sub_points[-1][0] + else: + new_segments.append([segment_type, points]) + prev_on_curve = points[-1][0] + if closed: + # the BasePointToSegmentPen.endPath method that calls _flushContour + # rotates the point list of closed contours so that they end with + # the first on-curve point. We restore the original starting point. + new_segments = new_segments[-1:] + new_segments[:-1] + self._drawPoints(new_segments) + + def _split_super_bezier_segments(self, points): + sub_segments = [] + # n is the number of control points + n = len(points) - 1 + if n == 2: + # a simple bezier curve segment + sub_segments.append(points) + elif n > 2: + # a "super" bezier; decompose it + on_curve, smooth, name, kwargs = points[-1] + num_sub_segments = n - 1 + for i, sub_points in enumerate(decomposeSuperBezierSegment([ + pt for pt, _, _, _ in points])): + new_segment = [] + for point in sub_points[:-1]: + new_segment.append((point, False, None, {})) + if i == (num_sub_segments - 1): + # the last on-curve keeps its original attributes + new_segment.append((on_curve, smooth, name, kwargs)) + else: + # on-curves of sub-segments are always "smooth" + new_segment.append((sub_points[-1], True, None, {})) + sub_segments.append(new_segment) + else: + raise AssertionError( + "expected 2 control points, found: %d" % n) + return sub_segments + + def _drawPoints(self, segments): + pen = self.pen + pen.beginPath() + last_offcurves = [] + for i, (segment_type, points) in enumerate(segments): + if segment_type in ("move", "line"): + assert len(points) == 1, ( + "illegal line segment point count: %d" % len(points)) + pt, smooth, name, kwargs = points[0] + pen.addPoint(pt, segment_type, smooth, name, **kwargs) + elif segment_type == "qcurve": + assert len(points) >= 2, ( + "illegal qcurve segment point count: %d" % len(points)) + offcurves = points[:-1] + if offcurves: + if i == 0: + # any off-curve points preceding the first on-curve + # will be appended at the end of the contour + last_offcurves = offcurves + else: + for (pt, smooth, name, kwargs) in offcurves: + pen.addPoint(pt, None, smooth, name, **kwargs) + pt, smooth, name, kwargs = points[-1] + if pt is None: + # special quadratic contour with no on-curve points: + # we need to skip the "None" point. See also the Pen + # protocol's qCurveTo() method and fontTools.pens.basePen + pass + else: + pen.addPoint(pt, segment_type, smooth, name, **kwargs) + else: + # 'curve' segments must have been converted to 'qcurve' by now + raise AssertionError( + "unexpected segment type: %r" % segment_type) + for (pt, smooth, name, kwargs) in last_offcurves: + pen.addPoint(pt, None, smooth, name, **kwargs) + pen.endPath() + + def addComponent(self, baseGlyphName, transformation): + assert self.currentPath is None + self.pen.addComponent(baseGlyphName, transformation) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/filterPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/filterPen.py new file mode 100644 index 00000000..4355ba41 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/filterPen.py @@ -0,0 +1,158 @@ +from fontTools.pens.basePen import AbstractPen +from fontTools.pens.pointPen import AbstractPointPen +from fontTools.pens.recordingPen import RecordingPen + + +class _PassThruComponentsMixin(object): + + def addComponent(self, glyphName, transformation, **kwargs): + self._outPen.addComponent(glyphName, transformation, **kwargs) + + +class FilterPen(_PassThruComponentsMixin, AbstractPen): + + """ Base class for pens that apply some transformation to the coordinates + they receive and pass them to another pen. + + You can override any of its methods. The default implementation does + nothing, but passes the commands unmodified to the other pen. + + >>> from fontTools.pens.recordingPen import RecordingPen + >>> rec = RecordingPen() + >>> pen = FilterPen(rec) + >>> v = iter(rec.value) + + >>> pen.moveTo((0, 0)) + >>> next(v) + ('moveTo', ((0, 0),)) + + >>> pen.lineTo((1, 1)) + >>> next(v) + ('lineTo', ((1, 1),)) + + >>> pen.curveTo((2, 2), (3, 3), (4, 4)) + >>> next(v) + ('curveTo', ((2, 2), (3, 3), (4, 4))) + + >>> pen.qCurveTo((5, 5), (6, 6), (7, 7), (8, 8)) + >>> next(v) + ('qCurveTo', ((5, 5), (6, 6), (7, 7), (8, 8))) + + >>> pen.closePath() + >>> next(v) + ('closePath', ()) + + >>> pen.moveTo((9, 9)) + >>> next(v) + ('moveTo', ((9, 9),)) + + >>> pen.endPath() + >>> next(v) + ('endPath', ()) + + >>> pen.addComponent('foo', (1, 0, 0, 1, 0, 0)) + >>> next(v) + ('addComponent', ('foo', (1, 0, 0, 1, 0, 0))) + """ + + def __init__(self, outPen): + self._outPen = outPen + + def moveTo(self, pt): + self._outPen.moveTo(pt) + + def lineTo(self, pt): + self._outPen.lineTo(pt) + + def curveTo(self, *points): + self._outPen.curveTo(*points) + + def qCurveTo(self, *points): + self._outPen.qCurveTo(*points) + + def closePath(self): + self._outPen.closePath() + + def endPath(self): + self._outPen.endPath() + + +class ContourFilterPen(_PassThruComponentsMixin, RecordingPen): + """A "buffered" filter pen that accumulates contour data, passes + it through a ``filterContour`` method when the contour is closed or ended, + and finally draws the result with the output pen. + + Components are passed through unchanged. + """ + + def __init__(self, outPen): + super(ContourFilterPen, self).__init__() + self._outPen = outPen + + def closePath(self): + super(ContourFilterPen, self).closePath() + self._flushContour() + + def endPath(self): + super(ContourFilterPen, self).endPath() + self._flushContour() + + def _flushContour(self): + result = self.filterContour(self.value) + if result is not None: + self.value = result + self.replay(self._outPen) + self.value = [] + + def filterContour(self, contour): + """Subclasses must override this to perform the filtering. + + The contour is a list of pen (operator, operands) tuples. + Operators are strings corresponding to the AbstractPen methods: + "moveTo", "lineTo", "curveTo", "qCurveTo", "closePath" and + "endPath". The operands are the positional arguments that are + passed to each method. + + If the method doesn't return a value (i.e. returns None), it's + assumed that the argument was modified in-place. + Otherwise, the return value is drawn with the output pen. + """ + return # or return contour + + +class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen): + """ Baseclass for point pens that apply some transformation to the + coordinates they receive and pass them to another point pen. + + You can override any of its methods. The default implementation does + nothing, but passes the commands unmodified to the other pen. + + >>> from fontTools.pens.recordingPen import RecordingPointPen + >>> rec = RecordingPointPen() + >>> pen = FilterPointPen(rec) + >>> v = iter(rec.value) + >>> pen.beginPath(identifier="abc") + >>> next(v) + ('beginPath', (), {'identifier': 'abc'}) + >>> pen.addPoint((1, 2), "line", False) + >>> next(v) + ('addPoint', ((1, 2), 'line', False, None), {}) + >>> pen.addComponent("a", (2, 0, 0, 2, 10, -10), identifier="0001") + >>> next(v) + ('addComponent', ('a', (2, 0, 0, 2, 10, -10)), {'identifier': '0001'}) + >>> pen.endPath() + >>> next(v) + ('endPath', (), {}) + """ + + def __init__(self, outPointPen): + self._outPen = outPointPen + + def beginPath(self, **kwargs): + self._outPen.beginPath(**kwargs) + + def endPath(self): + self._outPen.endPath() + + def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): + self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/hashPointPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/hashPointPen.py new file mode 100644 index 00000000..9aef5d87 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/hashPointPen.py @@ -0,0 +1,77 @@ +# Modified from https://github.com/adobe-type-tools/psautohint/blob/08b346865710ed3c172f1eb581d6ef243b203f99/python/psautohint/ufoFont.py#L800-L838 +import hashlib + +from fontTools.pens.basePen import MissingComponentError +from fontTools.pens.pointPen import AbstractPointPen + + +class HashPointPen(AbstractPointPen): + """ + This pen can be used to check if a glyph's contents (outlines plus + components) have changed. + + Components are added as the original outline plus each composite's + transformation. + + Example: You have some TrueType hinting code for a glyph which you want to + compile. The hinting code specifies a hash value computed with HashPointPen + that was valid for the glyph's outlines at the time the hinting code was + written. Now you can calculate the hash for the glyph's current outlines to + check if the outlines have changed, which would probably make the hinting + code invalid. + + > glyph = ufo[name] + > hash_pen = HashPointPen(glyph.width, ufo) + > glyph.drawPoints(hash_pen) + > ttdata = glyph.lib.get("public.truetype.instructions", None) + > stored_hash = ttdata.get("id", None) # The hash is stored in the "id" key + > if stored_hash is None or stored_hash != hash_pen.hash: + > logger.error(f"Glyph hash mismatch, glyph '{name}' will have no instructions in font.") + > else: + > # The hash values are identical, the outline has not changed. + > # Compile the hinting code ... + > pass + """ + + def __init__(self, glyphWidth=0, glyphSet=None): + self.glyphset = glyphSet + self.data = ["w%s" % round(glyphWidth, 9)] + + @property + def hash(self): + data = "".join(self.data) + if len(data) >= 128: + data = hashlib.sha512(data.encode("ascii")).hexdigest() + return data + + def beginPath(self, identifier=None, **kwargs): + pass + + def endPath(self): + self.data.append("|") + + def addPoint( + self, + pt, + segmentType=None, + smooth=False, + name=None, + identifier=None, + **kwargs, + ): + if segmentType is None: + pt_type = "o" # offcurve + else: + pt_type = segmentType[0] + self.data.append(f"{pt_type}{pt[0]:g}{pt[1]:+g}") + + def addComponent( + self, baseGlyphName, transformation, identifier=None, **kwargs + ): + tr = "".join([f"{t:+}" for t in transformation]) + self.data.append("[") + try: + self.glyphset[baseGlyphName].drawPoints(self) + except KeyError: + raise MissingComponentError(baseGlyphName) + self.data.append(f"({tr})]") diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/momentsPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/momentsPen.py new file mode 100644 index 00000000..8c90f70a --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/momentsPen.py @@ -0,0 +1,297 @@ +"""Pen calculating 0th, 1st, and 2nd moments of area of glyph shapes. +This is low-level, autogenerated pen. Use statisticsPen instead.""" +from fontTools.pens.basePen import BasePen + + +__all__ = ["MomentsPen"] + + +class OpenContourError(NotImplementedError): + pass + + +class MomentsPen(BasePen): + + def __init__(self, glyphset=None): + BasePen.__init__(self, glyphset) + + self.area = 0 + self.momentX = 0 + self.momentY = 0 + self.momentXX = 0 + self.momentXY = 0 + self.momentYY = 0 + + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _endPath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + raise OpenContourError( + "Green theorem is not defined on open contours." + ) + + def _lineTo(self, p1): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + + r0 = x1*y0 + r1 = x1*y1 + r2 = x1**2 + r3 = x0**2 + r4 = 2*y0 + r5 = y0 - y1 + r6 = r5*x0 + r7 = y0**2 + r8 = y1**2 + r9 = x1**3 + r10 = r4*y1 + r11 = y0**3 + r12 = y1**3 + + self.area += -r0/2 - r1/2 + x0*(y0 + y1)/2 + self.momentX += -r2*y0/6 - r2*y1/3 + r3*(r4 + y1)/6 - r6*x1/6 + self.momentY += -r0*y1/6 - r7*x1/6 - r8*x1/6 + x0*(r7 + r8 + y0*y1)/6 + self.momentXX += -r2*r6/12 - r3*r5*x1/12 - r9*y0/12 - r9*y1/4 + x0**3*(3*y0 + y1)/12 + self.momentXY += -r10*r2/24 - r2*r7/24 - r2*r8/8 + r3*(r10 + 3*r7 + r8)/24 - x0*x1*(r7 - r8)/12 + self.momentYY += -r0*r8/12 - r1*r7/12 - r11*x1/12 - r12*x1/12 + x0*(r11 + r12 + r7*y1 + r8*y0)/12 + + def _qCurveToOne(self, p1, p2): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 + + r0 = 2*x1 + r1 = r0*y2 + r2 = 2*y1 + r3 = r2*x2 + r4 = 3*y2 + r5 = r4*x2 + r6 = 3*y0 + r7 = x1**2 + r8 = 2*y2 + r9 = x2**2 + r10 = 4*y1 + r11 = 10*y2 + r12 = r0*x2 + r13 = x0**2 + r14 = 10*y0 + r15 = x2*y2 + r16 = r0*y1 + r15 + r17 = 4*x1 + r18 = x2*y0 + r19 = r10*r15 + r20 = y1**2 + r21 = 2*r20 + r22 = y2**2 + r23 = r22*x2 + r24 = 5*r23 + r25 = y0**2 + r26 = y0*y2 + r27 = 5*r25 + r28 = 8*x1**3 + r29 = x2**3 + r30 = 30*y1 + r31 = 6*y1 + r32 = 10*r9*x1 + r33 = 4*r7 + r34 = 5*y2 + r35 = 12*r7 + r36 = r5 + 20*x1*y1 + r37 = 30*x1 + r38 = 12*x1 + r39 = 20*r7 + r40 = 8*r7*y1 + r41 = r34*r9 + r42 = 60*y1 + r43 = 20*r20 + r44 = 4*r20 + r45 = 15*r22 + r46 = r38*x2 + r47 = y1*y2 + r48 = 8*r20*x1 + r24 + r49 = 6*x1 + r50 = 8*y1**3 + r51 = y2**3 + r52 = y0**3 + r53 = 10*y1 + r54 = 12*y1 + r55 = 12*r20 + + self.area += r1/6 - r3/6 - r5/6 + x0*(r2 + r6 + y2)/6 - y0*(r0 + x2)/6 + self.momentX += -r10*r9/30 - r11*r9/30 - r12*(-r8 + y1)/30 + r13*(r10 + r14 + y2)/30 + r7*r8/30 + x0*(r1 + r16 - r17*y0 - r18)/30 - y0*(r12 + 2*r7 + r9)/30 + self.momentY += r1*(r8 + y1)/30 - r19/30 - r21*x2/30 - r24/30 - r25*(r17 + x2)/30 + x0*(r10*y0 + r2*y2 + r21 + r22 + r26 + r27)/30 - y0*(r16 + r3)/30 + self.momentXX += r13*(r11*x1 - 5*r18 + r3 + r36 - r37*y0)/420 + r28*y2/420 - r29*r30/420 - r29*y2/4 - r32*(r2 - r4)/420 - r33*x2*(r2 - r34)/420 + x0**3*(r31 + 21*y0 + y2)/84 - x0*(-r15*r38 + r18*r38 + r2*r9 - r35*y2 + r39*y0 - r40 - r41 + r6*r9)/420 - y0*(r28 + 5*r29 + r32 + r35*x2)/420 + self.momentXY += r13*(r14*y2 + 3*r22 + 105*r25 + r42*y0 + r43 + 12*r47)/840 - r17*x2*(r44 - r45)/840 - r22*r9/8 - r25*(r39 + r46 + 3*r9)/840 + r33*y2*(r10 + r34)/840 - r42*r9*y2/840 - r43*r9/840 + x0*(-r10*r18 + r17*r26 + r19 + r22*r49 - r25*r37 - r27*x2 + r38*r47 + r48)/420 - y0*(r15*r17 + r31*r9 + r40 + r41 + r46*y1)/420 + self.momentYY += r1*(r11*y1 + r44 + r45)/420 - r15*r43/420 - r23*r30/420 - r25*(r1 + r36 + r53*x2)/420 - r50*x2/420 - r51*x2/12 - r52*(r49 + x2)/84 + x0*(r22*r53 + r22*r6 + r25*r30 + r25*r34 + r26*r54 + r43*y0 + r50 + 5*r51 + 35*r52 + r55*y2)/420 - y0*(-r0*r22 + r15*r54 + r48 + r55*x2)/420 + + def _curveToOne(self, p1, p2, p3): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 + x3,y3 = p3 + + r0 = 6*x2 + r1 = r0*y3 + r2 = 6*y2 + r3 = 10*y3 + r4 = r3*x3 + r5 = 3*x1 + r6 = 3*y1 + r7 = 6*x1 + r8 = 3*x2 + r9 = 6*y1 + r10 = 3*y2 + r11 = x2**2 + r12 = r11*y3 + r13 = 45*r12 + r14 = x3**2 + r15 = r14*y2 + r16 = r14*y3 + r17 = x2*x3 + r18 = 15*r17 + r19 = 7*y3 + r20 = x1**2 + r21 = 9*r20 + r22 = x0**2 + r23 = 21*y1 + r24 = 9*r11 + r25 = 9*x2 + r26 = x2*y3 + r27 = 15*r26 + r28 = -r25*y1 + r27 + r29 = r25*y2 + r30 = r9*x3 + r31 = 45*x1 + r32 = x1*x3 + r33 = 45*r20 + r34 = 5*r14 + r35 = x2*y2 + r36 = 18*r35 + r37 = 5*x3 + r38 = r37*y3 + r39 = r31*y1 + r36 + r38 + r40 = x1*y0 + r41 = x1*y3 + r42 = x2*y0 + r43 = x3*y1 + r44 = r10*x3 + r45 = x3*y2*y3 + r46 = y2**2 + r47 = 45*r46 + r48 = r47*x3 + r49 = y3**2 + r50 = r49*x3 + r51 = y1**2 + r52 = 9*r51 + r53 = y0**2 + r54 = 21*x1 + r55 = x3*y2 + r56 = 15*r55 + r57 = 9*y2 + r58 = y2*y3 + r59 = 15*r58 + r60 = 9*r46 + r61 = 3*y3 + r62 = 45*y1 + r63 = r8*y3 + r64 = y0*y1 + r65 = y0*y2 + r66 = 30*r65 + r67 = 5*y3 + r68 = y1*y3 + r69 = 45*r51 + r70 = 5*r49 + r71 = x2**3 + r72 = x3**3 + r73 = 126*x3 + r74 = x1**3 + r75 = r14*x2 + r76 = 63*r11 + r77 = r76*x3 + r78 = 15*r35 + r79 = r19*x3 + r80 = x1*y1 + r81 = 63*r35 + r82 = r38 + 378*r80 + r81 + r83 = x1*y2 + r84 = x2*y1 + r85 = x3*y0 + r86 = x2*x3*y1 + r87 = x2*x3*y3 + r88 = r11*y2 + r89 = 27*r88 + r90 = 42*y3 + r91 = r14*r90 + r92 = 90*x1*x2 + r93 = 189*x2 + r94 = 30*x1*x3 + r95 = 14*r16 + 126*r20*y1 + 45*r88 + r94*y2 + r96 = x1*x2 + r97 = 252*r96 + r98 = x1*x2*y2 + r99 = 42*r32 + r100 = x1*x3*y1 + r101 = 30*r17 + r102 = 18*r17 + r103 = 378*r20 + r104 = 189*y2 + r105 = r20*y3 + r106 = r11*y1 + r107 = r14*y1 + r108 = 378*r46 + r109 = 252*y2 + r110 = y1*y2 + r111 = x2*x3*y2 + r112 = y0*y3 + r113 = 378*r51 + r114 = 63*r46 + r115 = 27*x2 + r116 = r115*r46 + 42*r50 + r117 = x2*y1*y3 + r118 = x3*y1*y2 + r119 = r49*x2 + r120 = r51*x3 + r121 = x3*y3 + r122 = 14*x3 + r123 = 30*r117 + r122*r49 + r47*x2 + 126*r51*x1 + r124 = x1*y1*y3 + r125 = x1*y2*y3 + r126 = x2*y1*y2 + r127 = 54*y3 + r128 = 21*r55 + r129 = 630*r53 + r130 = r46*x1 + r131 = r49*x1 + r132 = 126*r53 + r133 = y2**3 + r134 = y3**3 + r135 = 630*r49 + r136 = y1**3 + r137 = y0**3 + r138 = r114*y3 + r23*r49 + r139 = r49*y2 + + self.area += r1/20 - r2*x3/20 - r4/20 + r5*(y2 + y3)/20 - r6*(x2 + x3)/20 + x0*(r10 + r9 + 10*y0 + y3)/20 - y0*(r7 + r8 + x3)/20 + self.momentX += r13/840 - r15/8 - r16/3 - r18*(r10 - r19)/840 + r21*(r10 + 2*y3)/840 + r22*(r2 + r23 + 56*y0 + y3)/168 + r5*(r28 + r29 - r30 + r4)/840 - r6*(10*r14 + r18 + r24)/840 + x0*(12*r26 + r31*y2 - r37*y0 + r39 - 105*r40 + 15*r41 - 30*r42 - 3*r43 + r44)/840 - y0*(18*r11 + r18 + r31*x2 + 12*r32 + r33 + r34)/840 + self.momentY += r27*(r10 + r19)/840 - r45/8 - r48/840 + r5*(10*r49 + r57*y1 + r59 + r60 + r9*y3)/840 - r50/6 - r52*(r8 + 2*x3)/840 - r53*(r0 + r54 + x3)/168 - r6*(r29 + r4 + r56)/840 + x0*(18*r46 + 140*r53 + r59 + r62*y2 + 105*r64 + r66 + r67*y0 + 12*r68 + r69 + r70)/840 - y0*(r39 + 15*r43 + 12*r55 - r61*x1 + r62*x2 + r63)/840 + self.momentXX += -r11*r73*(-r61 + y2)/9240 + r21*(r28 - r37*y1 + r44 + r78 + r79)/9240 + r22*(21*r26 - 630*r40 + 42*r41 - 126*r42 + r57*x3 + r82 + 210*r83 + 42*r84 - 14*r85)/9240 - r5*(r11*r62 + r14*r23 + 14*r15 - r76*y3 + 54*r86 - 84*r87 - r89 - r91)/9240 - r6*(27*r71 + 42*r72 + 70*r75 + r77)/9240 + 3*r71*y3/220 - 3*r72*y2/44 - r72*y3/4 + 3*r74*(r57 + r67)/3080 - r75*(378*y2 - 630*y3)/9240 + x0**3*(r57 + r62 + 165*y0 + y3)/660 + x0*(-18*r100 - r101*y0 - r101*y1 + r102*y2 - r103*y0 + r104*r20 + 63*r105 - 27*r106 - 9*r107 + r13 - r34*y0 - r76*y0 + 42*r87 + r92*y3 + r94*y3 + r95 - r97*y0 + 162*r98 - r99*y0)/9240 - y0*(135*r11*x1 + r14*r54 + r20*r93 + r33*x3 + 45*r71 + 14*r72 + 126*r74 + 42*r75 + r77 + r92*x3)/9240 + self.momentXY += -r108*r14/18480 + r12*(r109 + 378*y3)/18480 - r14*r49/8 - 3*r14*r58/44 - r17*(252*r46 - 1260*r49)/18480 + r21*(18*r110 + r3*y1 + 15*r46 + 7*r49 + 18*r58)/18480 + r22*(252*r110 + 28*r112 + r113 + r114 + 2310*r53 + 30*r58 + 1260*r64 + 252*r65 + 42*r68 + r70)/18480 - r52*(r102 + 15*r11 + 7*r14)/18480 - r53*(r101 + r103 + r34 + r76 + r97 + r99)/18480 + r7*(-r115*r51 + r116 + 18*r117 - 18*r118 + 42*r119 - 15*r120 + 28*r45 + r81*y3)/18480 - r9*(63*r111 + 42*r15 + 28*r87 + r89 + r91)/18480 + x0*(r1*y0 + r104*r80 + r112*r54 + 21*r119 - 9*r120 - r122*r53 + r123 + 54*r124 + 60*r125 + 54*r126 + r127*r35 + r128*y3 - r129*x1 + 81*r130 + 15*r131 - r132*x2 - r2*r85 - r23*r85 + r30*y3 + 84*r40*y2 - 84*r42*y1 + r60*x3)/9240 - y0*(54*r100 - 9*r105 + 81*r106 + 15*r107 + 54*r111 + r121*r7 + 21*r15 + r24*y3 + 60*r86 + 21*r87 + r95 + 189*r96*y1 + 54*r98)/9240 + self.momentYY += -r108*r121/9240 - r133*r73/9240 - r134*x3/12 - r135*r55/9240 - 3*r136*(r25 + r37)/3080 - r137*(r25 + r31 + x3)/660 + r26*(r135 + 126*r46 + 378*y2*y3)/9240 + r5*(r110*r127 + 27*r133 + 42*r134 + r138 + 70*r139 + r46*r62 + 27*r51*y2 + 15*r51*y3)/9240 - r52*(r56 + r63 + r78 + r79)/9240 - r53*(r128 + r25*y3 + 42*r43 + r82 + 42*r83 + 210*r84)/9240 - r6*(r114*x3 + r116 - 14*r119 + 84*r45)/9240 + x0*(r104*r51 + r109*r64 + 90*r110*y3 + r113*y0 + r114*y0 + r129*y1 + r132*y2 + 45*r133 + 14*r134 + 126*r136 + 770*r137 + r138 + 42*r139 + 135*r46*y1 + 14*r53*y3 + r64*r90 + r66*y3 + r69*y3 + r70*y0)/9240 - y0*(90*r118 + 63*r120 + r123 - 18*r124 - 30*r125 + 162*r126 - 27*r130 - 9*r131 + r36*y3 + 30*r43*y3 + 42*r45 + r48 + r51*r93)/9240 + +if __name__ == '__main__': + from fontTools.misc.symfont import x, y, printGreenPen + printGreenPen('MomentsPen', [ + ('area', 1), + ('momentX', x), + ('momentY', y), + ('momentXX', x**2), + ('momentXY', x*y), + ('momentYY', y**2), + ]) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/perimeterPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/perimeterPen.py new file mode 100644 index 00000000..9a09cb8f --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/perimeterPen.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +"""Calculate the perimeter of a glyph.""" + +from fontTools.pens.basePen import BasePen +from fontTools.misc.bezierTools import approximateQuadraticArcLengthC, calcQuadraticArcLengthC, approximateCubicArcLengthC, calcCubicArcLengthC +import math + + +__all__ = ["PerimeterPen"] + + +def _distance(p0, p1): + return math.hypot(p0[0] - p1[0], p0[1] - p1[1]) + +class PerimeterPen(BasePen): + + def __init__(self, glyphset=None, tolerance=0.005): + BasePen.__init__(self, glyphset) + self.value = 0 + self.tolerance = tolerance + + # Choose which algorithm to use for quadratic and for cubic. + # Quadrature is faster but has fixed error characteristic with no strong + # error bound. The cutoff points are derived empirically. + self._addCubic = self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive + self._addQuadratic = self._addQuadraticQuadrature if tolerance >= 0.00075 else self._addQuadraticExact + + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _lineTo(self, p1): + p0 = self._getCurrentPoint() + self.value += _distance(p0, p1) + + def _addQuadraticExact(self, c0, c1, c2): + self.value += calcQuadraticArcLengthC(c0, c1, c2) + + def _addQuadraticQuadrature(self, c0, c1, c2): + self.value += approximateQuadraticArcLengthC(c0, c1, c2) + + def _qCurveToOne(self, p1, p2): + p0 = self._getCurrentPoint() + self._addQuadratic(complex(*p0), complex(*p1), complex(*p2)) + + def _addCubicRecursive(self, c0, c1, c2, c3): + self.value += calcCubicArcLengthC(c0, c1, c2, c3, self.tolerance) + + def _addCubicQuadrature(self, c0, c1, c2, c3): + self.value += approximateCubicArcLengthC(c0, c1, c2, c3) + + def _curveToOne(self, p1, p2, p3): + p0 = self._getCurrentPoint() + self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3)) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/pointInsidePen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/pointInsidePen.py new file mode 100644 index 00000000..34597f40 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/pointInsidePen.py @@ -0,0 +1,190 @@ +"""fontTools.pens.pointInsidePen -- Pen implementing "point inside" testing +for shapes. +""" + +from fontTools.pens.basePen import BasePen +from fontTools.misc.bezierTools import solveQuadratic, solveCubic + + +__all__ = ["PointInsidePen"] + + +class PointInsidePen(BasePen): + + """This pen implements "point inside" testing: to test whether + a given point lies inside the shape (black) or outside (white). + Instances of this class can be recycled, as long as the + setTestPoint() method is used to set the new point to test. + + Typical usage: + + pen = PointInsidePen(glyphSet, (100, 200)) + outline.draw(pen) + isInside = pen.getResult() + + Both the even-odd algorithm and the non-zero-winding-rule + algorithm are implemented. The latter is the default, specify + True for the evenOdd argument of __init__ or setTestPoint + to use the even-odd algorithm. + """ + + # This class implements the classical "shoot a ray from the test point + # to infinity and count how many times it intersects the outline" (as well + # as the non-zero variant, where the counter is incremented if the outline + # intersects the ray in one direction and decremented if it intersects in + # the other direction). + # I found an amazingly clear explanation of the subtleties involved in + # implementing this correctly for polygons here: + # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html + # I extended the principles outlined on that page to curves. + + def __init__(self, glyphSet, testPoint, evenOdd=False): + BasePen.__init__(self, glyphSet) + self.setTestPoint(testPoint, evenOdd) + + def setTestPoint(self, testPoint, evenOdd=False): + """Set the point to test. Call this _before_ the outline gets drawn.""" + self.testPoint = testPoint + self.evenOdd = evenOdd + self.firstPoint = None + self.intersectionCount = 0 + + def getWinding(self): + if self.firstPoint is not None: + # always make sure the sub paths are closed; the algorithm only works + # for closed paths. + self.closePath() + return self.intersectionCount + + def getResult(self): + """After the shape has been drawn, getResult() returns True if the test + point lies within the (black) shape, and False if it doesn't. + """ + winding = self.getWinding() + if self.evenOdd: + result = winding % 2 + else: # non-zero + result = self.intersectionCount != 0 + return not not result + + def _addIntersection(self, goingUp): + if self.evenOdd or goingUp: + self.intersectionCount += 1 + else: + self.intersectionCount -= 1 + + def _moveTo(self, point): + if self.firstPoint is not None: + # always make sure the sub paths are closed; the algorithm only works + # for closed paths. + self.closePath() + self.firstPoint = point + + def _lineTo(self, point): + x, y = self.testPoint + x1, y1 = self._getCurrentPoint() + x2, y2 = point + + if x1 < x and x2 < x: + return + if y1 < y and y2 < y: + return + if y1 >= y and y2 >= y: + return + + dx = x2 - x1 + dy = y2 - y1 + t = (y - y1) / dy + ix = dx * t + x1 + if ix < x: + return + self._addIntersection(y2 > y1) + + def _curveToOne(self, bcp1, bcp2, point): + x, y = self.testPoint + x1, y1 = self._getCurrentPoint() + x2, y2 = bcp1 + x3, y3 = bcp2 + x4, y4 = point + + if x1 < x and x2 < x and x3 < x and x4 < x: + return + if y1 < y and y2 < y and y3 < y and y4 < y: + return + if y1 >= y and y2 >= y and y3 >= y and y4 >= y: + return + + dy = y1 + cy = (y2 - dy) * 3.0 + by = (y3 - y2) * 3.0 - cy + ay = y4 - dy - cy - by + solutions = sorted(solveCubic(ay, by, cy, dy - y)) + solutions = [t for t in solutions if -0. <= t <= 1.] + if not solutions: + return + + dx = x1 + cx = (x2 - dx) * 3.0 + bx = (x3 - x2) * 3.0 - cx + ax = x4 - dx - cx - bx + + above = y1 >= y + lastT = None + for t in solutions: + if t == lastT: + continue + lastT = t + t2 = t * t + t3 = t2 * t + + direction = 3*ay*t2 + 2*by*t + cy + incomingGoingUp = outgoingGoingUp = direction > 0.0 + if direction == 0.0: + direction = 6*ay*t + 2*by + outgoingGoingUp = direction > 0.0 + incomingGoingUp = not outgoingGoingUp + if direction == 0.0: + direction = ay + incomingGoingUp = outgoingGoingUp = direction > 0.0 + + xt = ax*t3 + bx*t2 + cx*t + dx + if xt < x: + continue + + if t in (0.0, -0.0): + if not outgoingGoingUp: + self._addIntersection(outgoingGoingUp) + elif t == 1.0: + if incomingGoingUp: + self._addIntersection(incomingGoingUp) + else: + if incomingGoingUp == outgoingGoingUp: + self._addIntersection(outgoingGoingUp) + #else: + # we're not really intersecting, merely touching + + def _qCurveToOne_unfinished(self, bcp, point): + # XXX need to finish this, for now doing it through a cubic + # (BasePen implements _qCurveTo in terms of a cubic) will + # have to do. + x, y = self.testPoint + x1, y1 = self._getCurrentPoint() + x2, y2 = bcp + x3, y3 = point + c = y1 + b = (y2 - c) * 2.0 + a = y3 - c - b + solutions = sorted(solveQuadratic(a, b, c - y)) + solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] + if not solutions: + return + # XXX + + def _closePath(self): + if self._getCurrentPoint() != self.firstPoint: + self.lineTo(self.firstPoint) + self.firstPoint = None + + def _endPath(self): + """Insideness is not defined for open contours.""" + raise NotImplementedError diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/pointPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/pointPen.py new file mode 100644 index 00000000..4c3148bf --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/pointPen.py @@ -0,0 +1,493 @@ +""" +========= +PointPens +========= + +Where **SegmentPens** have an intuitive approach to drawing +(if you're familiar with postscript anyway), the **PointPen** +is geared towards accessing all the data in the contours of +the glyph. A PointPen has a very simple interface, it just +steps through all the points in a call from glyph.drawPoints(). +This allows the caller to provide more data for each point. +For instance, whether or not a point is smooth, and its name. +""" + +import math +from typing import Any, Optional, Tuple + +from fontTools.pens.basePen import AbstractPen, PenError + +__all__ = [ + "AbstractPointPen", + "BasePointToSegmentPen", + "PointToSegmentPen", + "SegmentToPointPen", + "GuessSmoothPointPen", + "ReverseContourPointPen", +] + + +class AbstractPointPen: + """Baseclass for all PointPens.""" + + def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None: + """Start a new sub path.""" + raise NotImplementedError + + def endPath(self) -> None: + """End the current sub path.""" + raise NotImplementedError + + def addPoint( + self, + pt: Tuple[float, float], + segmentType: Optional[str] = None, + smooth: bool = False, + name: Optional[str] = None, + identifier: Optional[str] = None, + **kwargs: Any + ) -> None: + """Add a point to the current sub path.""" + raise NotImplementedError + + def addComponent( + self, + baseGlyphName: str, + transformation: Tuple[float, float, float, float, float, float], + identifier: Optional[str] = None, + **kwargs: Any + ) -> None: + """Add a sub glyph.""" + raise NotImplementedError + + +class BasePointToSegmentPen(AbstractPointPen): + """ + Base class for retrieving the outline in a segment-oriented + way. The PointPen protocol is simple yet also a little tricky, + so when you need an outline presented as segments but you have + as points, do use this base implementation as it properly takes + care of all the edge cases. + """ + + def __init__(self): + self.currentPath = None + + def beginPath(self, identifier=None, **kwargs): + if self.currentPath is not None: + raise PenError("Path already begun.") + self.currentPath = [] + + def _flushContour(self, segments): + """Override this method. + + It will be called for each non-empty sub path with a list + of segments: the 'segments' argument. + + The segments list contains tuples of length 2: + (segmentType, points) + + segmentType is one of "move", "line", "curve" or "qcurve". + "move" may only occur as the first segment, and it signifies + an OPEN path. A CLOSED path does NOT start with a "move", in + fact it will not contain a "move" at ALL. + + The 'points' field in the 2-tuple is a list of point info + tuples. The list has 1 or more items, a point tuple has + four items: + (point, smooth, name, kwargs) + 'point' is an (x, y) coordinate pair. + + For a closed path, the initial moveTo point is defined as + the last point of the last segment. + + The 'points' list of "move" and "line" segments always contains + exactly one point tuple. + """ + raise NotImplementedError + + def endPath(self): + if self.currentPath is None: + raise PenError("Path not begun.") + points = self.currentPath + self.currentPath = None + if not points: + return + if len(points) == 1: + # Not much more we can do than output a single move segment. + pt, segmentType, smooth, name, kwargs = points[0] + segments = [("move", [(pt, smooth, name, kwargs)])] + self._flushContour(segments) + return + segments = [] + if points[0][1] == "move": + # It's an open contour, insert a "move" segment for the first + # point and remove that first point from the point list. + pt, segmentType, smooth, name, kwargs = points[0] + segments.append(("move", [(pt, smooth, name, kwargs)])) + points.pop(0) + else: + # It's a closed contour. Locate the first on-curve point, and + # rotate the point list so that it _ends_ with an on-curve + # point. + firstOnCurve = None + for i in range(len(points)): + segmentType = points[i][1] + if segmentType is not None: + firstOnCurve = i + break + if firstOnCurve is None: + # Special case for quadratics: a contour with no on-curve + # points. Add a "None" point. (See also the Pen protocol's + # qCurveTo() method and fontTools.pens.basePen.py.) + points.append((None, "qcurve", None, None, None)) + else: + points = points[firstOnCurve+1:] + points[:firstOnCurve+1] + + currentSegment = [] + for pt, segmentType, smooth, name, kwargs in points: + currentSegment.append((pt, smooth, name, kwargs)) + if segmentType is None: + continue + segments.append((segmentType, currentSegment)) + currentSegment = [] + + self._flushContour(segments) + + def addPoint(self, pt, segmentType=None, smooth=False, name=None, + identifier=None, **kwargs): + if self.currentPath is None: + raise PenError("Path not begun") + self.currentPath.append((pt, segmentType, smooth, name, kwargs)) + + +class PointToSegmentPen(BasePointToSegmentPen): + """ + Adapter class that converts the PointPen protocol to the + (Segment)Pen protocol. + + NOTE: The segment pen does not support and will drop point names, identifiers + and kwargs. + """ + + def __init__(self, segmentPen, outputImpliedClosingLine=False): + BasePointToSegmentPen.__init__(self) + self.pen = segmentPen + self.outputImpliedClosingLine = outputImpliedClosingLine + + def _flushContour(self, segments): + if not segments: + raise PenError("Must have at least one segment.") + pen = self.pen + if segments[0][0] == "move": + # It's an open path. + closed = False + points = segments[0][1] + if len(points) != 1: + raise PenError(f"Illegal move segment point count: {len(points)}") + movePt, _, _ , _ = points[0] + del segments[0] + else: + # It's a closed path, do a moveTo to the last + # point of the last segment. + closed = True + segmentType, points = segments[-1] + movePt, _, _ , _ = points[-1] + if movePt is None: + # quad special case: a contour with no on-curve points contains + # one "qcurve" segment that ends with a point that's None. We + # must not output a moveTo() in that case. + pass + else: + pen.moveTo(movePt) + outputImpliedClosingLine = self.outputImpliedClosingLine + nSegments = len(segments) + lastPt = movePt + for i in range(nSegments): + segmentType, points = segments[i] + points = [pt for pt, _, _ , _ in points] + if segmentType == "line": + if len(points) != 1: + raise PenError(f"Illegal line segment point count: {len(points)}") + pt = points[0] + # For closed contours, a 'lineTo' is always implied from the last oncurve + # point to the starting point, thus we can omit it when the last and + # starting point don't overlap. + # However, when the last oncurve point is a "line" segment and has same + # coordinates as the starting point of a closed contour, we need to output + # the closing 'lineTo' explicitly (regardless of the value of the + # 'outputImpliedClosingLine' option) in order to disambiguate this case from + # the implied closing 'lineTo', otherwise the duplicate point would be lost. + # See https://github.com/googlefonts/fontmake/issues/572. + if ( + i + 1 != nSegments + or outputImpliedClosingLine + or not closed + or pt == lastPt + ): + pen.lineTo(pt) + lastPt = pt + elif segmentType == "curve": + pen.curveTo(*points) + lastPt = points[-1] + elif segmentType == "qcurve": + pen.qCurveTo(*points) + lastPt = points[-1] + else: + raise PenError(f"Illegal segmentType: {segmentType}") + if closed: + pen.closePath() + else: + pen.endPath() + + def addComponent(self, glyphName, transform, identifier=None, **kwargs): + del identifier # unused + del kwargs # unused + self.pen.addComponent(glyphName, transform) + + +class SegmentToPointPen(AbstractPen): + """ + Adapter class that converts the (Segment)Pen protocol to the + PointPen protocol. + """ + + def __init__(self, pointPen, guessSmooth=True): + if guessSmooth: + self.pen = GuessSmoothPointPen(pointPen) + else: + self.pen = pointPen + self.contour = None + + def _flushContour(self): + pen = self.pen + pen.beginPath() + for pt, segmentType in self.contour: + pen.addPoint(pt, segmentType=segmentType) + pen.endPath() + + def moveTo(self, pt): + self.contour = [] + self.contour.append((pt, "move")) + + def lineTo(self, pt): + if self.contour is None: + raise PenError("Contour missing required initial moveTo") + self.contour.append((pt, "line")) + + def curveTo(self, *pts): + if not pts: + raise TypeError("Must pass in at least one point") + if self.contour is None: + raise PenError("Contour missing required initial moveTo") + for pt in pts[:-1]: + self.contour.append((pt, None)) + self.contour.append((pts[-1], "curve")) + + def qCurveTo(self, *pts): + if not pts: + raise TypeError("Must pass in at least one point") + if pts[-1] is None: + self.contour = [] + else: + if self.contour is None: + raise PenError("Contour missing required initial moveTo") + for pt in pts[:-1]: + self.contour.append((pt, None)) + if pts[-1] is not None: + self.contour.append((pts[-1], "qcurve")) + + def closePath(self): + if self.contour is None: + raise PenError("Contour missing required initial moveTo") + if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]: + self.contour[0] = self.contour[-1] + del self.contour[-1] + else: + # There's an implied line at the end, replace "move" with "line" + # for the first point + pt, tp = self.contour[0] + if tp == "move": + self.contour[0] = pt, "line" + self._flushContour() + self.contour = None + + def endPath(self): + if self.contour is None: + raise PenError("Contour missing required initial moveTo") + self._flushContour() + self.contour = None + + def addComponent(self, glyphName, transform): + if self.contour is not None: + raise PenError("Components must be added before or after contours") + self.pen.addComponent(glyphName, transform) + + +class GuessSmoothPointPen(AbstractPointPen): + """ + Filtering PointPen that tries to determine whether an on-curve point + should be "smooth", ie. that it's a "tangent" point or a "curve" point. + """ + + def __init__(self, outPen, error=0.05): + self._outPen = outPen + self._error = error + self._points = None + + def _flushContour(self): + if self._points is None: + raise PenError("Path not begun") + points = self._points + nPoints = len(points) + if not nPoints: + return + if points[0][1] == "move": + # Open path. + indices = range(1, nPoints - 1) + elif nPoints > 1: + # Closed path. To avoid having to mod the contour index, we + # simply abuse Python's negative index feature, and start at -1 + indices = range(-1, nPoints - 1) + else: + # closed path containing 1 point (!), ignore. + indices = [] + for i in indices: + pt, segmentType, _, name, kwargs = points[i] + if segmentType is None: + continue + prev = i - 1 + next = i + 1 + if points[prev][1] is not None and points[next][1] is not None: + continue + # At least one of our neighbors is an off-curve point + pt = points[i][0] + prevPt = points[prev][0] + nextPt = points[next][0] + if pt != prevPt and pt != nextPt: + dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1] + dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1] + a1 = math.atan2(dy1, dx1) + a2 = math.atan2(dy2, dx2) + if abs(a1 - a2) < self._error: + points[i] = pt, segmentType, True, name, kwargs + + for pt, segmentType, smooth, name, kwargs in points: + self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs) + + def beginPath(self, identifier=None, **kwargs): + if self._points is not None: + raise PenError("Path already begun") + self._points = [] + if identifier is not None: + kwargs["identifier"] = identifier + self._outPen.beginPath(**kwargs) + + def endPath(self): + self._flushContour() + self._outPen.endPath() + self._points = None + + def addPoint(self, pt, segmentType=None, smooth=False, name=None, + identifier=None, **kwargs): + if self._points is None: + raise PenError("Path not begun") + if identifier is not None: + kwargs["identifier"] = identifier + self._points.append((pt, segmentType, False, name, kwargs)) + + def addComponent(self, glyphName, transformation, identifier=None, **kwargs): + if self._points is not None: + raise PenError("Components must be added before or after contours") + if identifier is not None: + kwargs["identifier"] = identifier + self._outPen.addComponent(glyphName, transformation, **kwargs) + + +class ReverseContourPointPen(AbstractPointPen): + """ + This is a PointPen that passes outline data to another PointPen, but + reversing the winding direction of all contours. Components are simply + passed through unchanged. + + Closed contours are reversed in such a way that the first point remains + the first point. + """ + + def __init__(self, outputPointPen): + self.pen = outputPointPen + # a place to store the points for the current sub path + self.currentContour = None + + def _flushContour(self): + pen = self.pen + contour = self.currentContour + if not contour: + pen.beginPath(identifier=self.currentContourIdentifier) + pen.endPath() + return + + closed = contour[0][1] != "move" + if not closed: + lastSegmentType = "move" + else: + # Remove the first point and insert it at the end. When + # the list of points gets reversed, this point will then + # again be at the start. In other words, the following + # will hold: + # for N in range(len(originalContour)): + # originalContour[N] == reversedContour[-N] + contour.append(contour.pop(0)) + # Find the first on-curve point. + firstOnCurve = None + for i in range(len(contour)): + if contour[i][1] is not None: + firstOnCurve = i + break + if firstOnCurve is None: + # There are no on-curve points, be basically have to + # do nothing but contour.reverse(). + lastSegmentType = None + else: + lastSegmentType = contour[firstOnCurve][1] + + contour.reverse() + if not closed: + # Open paths must start with a move, so we simply dump + # all off-curve points leading up to the first on-curve. + while contour[0][1] is None: + contour.pop(0) + pen.beginPath(identifier=self.currentContourIdentifier) + for pt, nextSegmentType, smooth, name, kwargs in contour: + if nextSegmentType is not None: + segmentType = lastSegmentType + lastSegmentType = nextSegmentType + else: + segmentType = None + pen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs) + pen.endPath() + + def beginPath(self, identifier=None, **kwargs): + if self.currentContour is not None: + raise PenError("Path already begun") + self.currentContour = [] + self.currentContourIdentifier = identifier + self.onCurve = [] + + def endPath(self): + if self.currentContour is None: + raise PenError("Path not begun") + self._flushContour() + self.currentContour = None + + def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs): + if self.currentContour is None: + raise PenError("Path not begun") + if identifier is not None: + kwargs["identifier"] = identifier + self.currentContour.append((pt, segmentType, smooth, name, kwargs)) + + def addComponent(self, glyphName, transform, identifier=None, **kwargs): + if self.currentContour is not None: + raise PenError("Components must be added before or after contours") + self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/qtPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/qtPen.py new file mode 100644 index 00000000..34736453 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/qtPen.py @@ -0,0 +1,29 @@ +from fontTools.pens.basePen import BasePen + + +__all__ = ["QtPen"] + + +class QtPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + from PyQt5.QtGui import QPainterPath + path = QPainterPath() + self.path = path + + def _moveTo(self, p): + self.path.moveTo(*p) + + def _lineTo(self, p): + self.path.lineTo(*p) + + def _curveToOne(self, p1, p2, p3): + self.path.cubicTo(*p1+p2+p3) + + def _qCurveToOne(self, p1, p2): + self.path.quadTo(*p1+p2) + + def _closePath(self): + self.path.closeSubpath() diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/quartzPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/quartzPen.py new file mode 100644 index 00000000..16b9c2d8 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/quartzPen.py @@ -0,0 +1,45 @@ +from fontTools.pens.basePen import BasePen + +from Quartz.CoreGraphics import CGPathCreateMutable, CGPathMoveToPoint +from Quartz.CoreGraphics import CGPathAddLineToPoint, CGPathAddCurveToPoint +from Quartz.CoreGraphics import CGPathAddQuadCurveToPoint, CGPathCloseSubpath + + +__all__ = ["QuartzPen"] + + +class QuartzPen(BasePen): + + """A pen that creates a CGPath + + Parameters + - path: an optional CGPath to add to + - xform: an optional CGAffineTransform to apply to the path + """ + + def __init__(self, glyphSet, path=None, xform=None): + BasePen.__init__(self, glyphSet) + if path is None: + path = CGPathCreateMutable() + self.path = path + self.xform = xform + + def _moveTo(self, pt): + x, y = pt + CGPathMoveToPoint(self.path, self.xform, x, y) + + def _lineTo(self, pt): + x, y = pt + CGPathAddLineToPoint(self.path, self.xform, x, y) + + def _curveToOne(self, p1, p2, p3): + (x1, y1), (x2, y2), (x3, y3) = p1, p2, p3 + CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3) + + def _qCurveToOne(self, p1, p2): + (x1, y1), (x2, y2) = p1, p2 + CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2) + + def _closePath(self): + CGPathCloseSubpath(self.path) + diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/recordingPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/recordingPen.py new file mode 100644 index 00000000..203082a4 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/recordingPen.py @@ -0,0 +1,155 @@ +"""Pen recording operations that can be accessed or replayed.""" +from fontTools.pens.basePen import AbstractPen, DecomposingPen +from fontTools.pens.pointPen import AbstractPointPen + + +__all__ = [ + "replayRecording", + "RecordingPen", + "DecomposingRecordingPen", + "RecordingPointPen", +] + + +def replayRecording(recording, pen): + """Replay a recording, as produced by RecordingPen or DecomposingRecordingPen, + to a pen. + + Note that recording does not have to be produced by those pens. + It can be any iterable of tuples of method name and tuple-of-arguments. + Likewise, pen can be any objects receiving those method calls. + """ + for operator,operands in recording: + getattr(pen, operator)(*operands) + + +class RecordingPen(AbstractPen): + """Pen recording operations that can be accessed or replayed. + + The recording can be accessed as pen.value; or replayed using + pen.replay(otherPen). + + Usage example: + ============== + from fontTools.ttLib import TTFont + from fontTools.pens.recordingPen import RecordingPen + + glyph_name = 'dollar' + font_path = 'MyFont.otf' + + font = TTFont(font_path) + glyphset = font.getGlyphSet() + glyph = glyphset[glyph_name] + + pen = RecordingPen() + glyph.draw(pen) + print(pen.value) + """ + + def __init__(self): + self.value = [] + def moveTo(self, p0): + self.value.append(('moveTo', (p0,))) + def lineTo(self, p1): + self.value.append(('lineTo', (p1,))) + def qCurveTo(self, *points): + self.value.append(('qCurveTo', points)) + def curveTo(self, *points): + self.value.append(('curveTo', points)) + def closePath(self): + self.value.append(('closePath', ())) + def endPath(self): + self.value.append(('endPath', ())) + def addComponent(self, glyphName, transformation): + self.value.append(('addComponent', (glyphName, transformation))) + def replay(self, pen): + replayRecording(self.value, pen) + + +class DecomposingRecordingPen(DecomposingPen, RecordingPen): + """ Same as RecordingPen, except that it doesn't keep components + as references, but draws them decomposed as regular contours. + + The constructor takes a single 'glyphSet' positional argument, + a dictionary of glyph objects (i.e. with a 'draw' method) keyed + by thir name. + + >>> class SimpleGlyph(object): + ... def draw(self, pen): + ... pen.moveTo((0, 0)) + ... pen.curveTo((1, 1), (2, 2), (3, 3)) + ... pen.closePath() + >>> class CompositeGlyph(object): + ... def draw(self, pen): + ... pen.addComponent('a', (1, 0, 0, 1, -1, 1)) + >>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()} + >>> for name, glyph in sorted(glyphSet.items()): + ... pen = DecomposingRecordingPen(glyphSet) + ... glyph.draw(pen) + ... print("{}: {}".format(name, pen.value)) + a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())] + b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())] + """ + # raises KeyError if base glyph is not found in glyphSet + skipMissingComponents = False + + +class RecordingPointPen(AbstractPointPen): + """PointPen recording operations that can be accessed or replayed. + + The recording can be accessed as pen.value; or replayed using + pointPen.replay(otherPointPen). + + Usage example: + ============== + from defcon import Font + from fontTools.pens.recordingPen import RecordingPointPen + + glyph_name = 'a' + font_path = 'MyFont.ufo' + + font = Font(font_path) + glyph = font[glyph_name] + + pen = RecordingPointPen() + glyph.drawPoints(pen) + print(pen.value) + + new_glyph = font.newGlyph('b') + pen.replay(new_glyph.getPointPen()) + """ + + def __init__(self): + self.value = [] + + def beginPath(self, identifier=None, **kwargs): + if identifier is not None: + kwargs["identifier"] = identifier + self.value.append(("beginPath", (), kwargs)) + + def endPath(self): + self.value.append(("endPath", (), {})) + + def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs): + if identifier is not None: + kwargs["identifier"] = identifier + self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs)) + + def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs): + if identifier is not None: + kwargs["identifier"] = identifier + self.value.append(("addComponent", (baseGlyphName, transformation), kwargs)) + + def replay(self, pointPen): + for operator, args, kwargs in self.value: + getattr(pointPen, operator)(*args, **kwargs) + + +if __name__ == "__main__": + pen = RecordingPen() + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25)) + pen.closePath() + from pprint import pprint + pprint(pen.value) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/reportLabPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/reportLabPen.py new file mode 100644 index 00000000..c0a4610b --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/reportLabPen.py @@ -0,0 +1,73 @@ +from fontTools.pens.basePen import BasePen +from reportlab.graphics.shapes import Path + + +__all__ = ["ReportLabPen"] + + +class ReportLabPen(BasePen): + + """A pen for drawing onto a reportlab.graphics.shapes.Path object.""" + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + path = Path() + self.path = path + + def _moveTo(self, p): + (x,y) = p + self.path.moveTo(x,y) + + def _lineTo(self, p): + (x,y) = p + self.path.lineTo(x,y) + + def _curveToOne(self, p1, p2, p3): + (x1,y1) = p1 + (x2,y2) = p2 + (x3,y3) = p3 + self.path.curveTo(x1, y1, x2, y2, x3, y3) + + def _closePath(self): + self.path.closePath() + + +if __name__=="__main__": + import sys + if len(sys.argv) < 3: + print("Usage: reportLabPen.py []") + print(" If no image file name is created, by default .png is created.") + print(" example: reportLabPen.py Arial.TTF R test.png") + print(" (The file format will be PNG, regardless of the image file name supplied)") + sys.exit(0) + + from fontTools.ttLib import TTFont + from reportlab.lib import colors + + path = sys.argv[1] + glyphName = sys.argv[2] + if (len(sys.argv) > 3): + imageFile = sys.argv[3] + else: + imageFile = "%s.png" % glyphName + + font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font + gs = font.getGlyphSet() + pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5)) + g = gs[glyphName] + g.draw(pen) + + w, h = g.width, 1000 + from reportlab.graphics import renderPM + from reportlab.graphics.shapes import Group, Drawing, scale + + # Everything is wrapped in a group to allow transformations. + g = Group(pen.path) + g.translate(0, 200) + g.scale(0.3, 0.3) + + d = Drawing(w, h) + d.add(g) + + renderPM.drawToFile(d, imageFile, fmt="PNG") diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/reverseContourPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/reverseContourPen.py new file mode 100644 index 00000000..9b3241b6 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/reverseContourPen.py @@ -0,0 +1,95 @@ +from fontTools.misc.arrayTools import pairwise +from fontTools.pens.filterPen import ContourFilterPen + + +__all__ = ["reversedContour", "ReverseContourPen"] + + +class ReverseContourPen(ContourFilterPen): + """Filter pen that passes outline data to another pen, but reversing + the winding direction of all contours. Components are simply passed + through unchanged. + + Closed contours are reversed in such a way that the first point remains + the first point. + """ + + def filterContour(self, contour): + return reversedContour(contour) + + +def reversedContour(contour): + """ Generator that takes a list of pen's (operator, operands) tuples, + and yields them with the winding direction reversed. + """ + if not contour: + return # nothing to do, stop iteration + + # valid contours must have at least a starting and ending command, + # can't have one without the other + assert len(contour) > 1, "invalid contour" + + # the type of the last command determines if the contour is closed + contourType = contour.pop()[0] + assert contourType in ("endPath", "closePath") + closed = contourType == "closePath" + + firstType, firstPts = contour.pop(0) + assert firstType in ("moveTo", "qCurveTo"), ( + "invalid initial segment type: %r" % firstType) + firstOnCurve = firstPts[-1] + if firstType == "qCurveTo": + # special case for TrueType paths contaning only off-curve points + assert firstOnCurve is None, ( + "off-curve only paths must end with 'None'") + assert not contour, ( + "only one qCurveTo allowed per off-curve path") + firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) + + (None,)) + + if not contour: + # contour contains only one segment, nothing to reverse + if firstType == "moveTo": + closed = False # single-point paths can't be closed + else: + closed = True # off-curve paths are closed by definition + yield firstType, firstPts + else: + lastType, lastPts = contour[-1] + lastOnCurve = lastPts[-1] + if closed: + # for closed paths, we keep the starting point + yield firstType, firstPts + if firstOnCurve != lastOnCurve: + # emit an implied line between the last and first points + yield "lineTo", (lastOnCurve,) + contour[-1] = (lastType, + tuple(lastPts[:-1]) + (firstOnCurve,)) + + if len(contour) > 1: + secondType, secondPts = contour[0] + else: + # contour has only two points, the second and last are the same + secondType, secondPts = lastType, lastPts + # if a lineTo follows the initial moveTo, after reversing it + # will be implied by the closePath, so we don't emit one; + # unless the lineTo and moveTo overlap, in which case we keep the + # duplicate points + if secondType == "lineTo" and firstPts != secondPts: + del contour[0] + if contour: + contour[-1] = (lastType, + tuple(lastPts[:-1]) + secondPts) + else: + # for open paths, the last point will become the first + yield firstType, (lastOnCurve,) + contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,)) + + # we iterate over all segment pairs in reverse order, and yield + # each one with the off-curve points reversed (if any), and + # with the on-curve point of the following segment + for (curType, curPts), (_, nextPts) in pairwise( + contour, reverse=True): + yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],) + + yield "closePath" if closed else "endPath", () diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/roundingPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/roundingPen.py new file mode 100644 index 00000000..2a7c476c --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/roundingPen.py @@ -0,0 +1,112 @@ +from fontTools.misc.roundTools import otRound +from fontTools.misc.transform import Transform +from fontTools.pens.filterPen import FilterPen, FilterPointPen + + +__all__ = ["RoundingPen", "RoundingPointPen"] + + +class RoundingPen(FilterPen): + """ + Filter pen that rounds point coordinates and component XY offsets to integer. + + >>> from fontTools.pens.recordingPen import RecordingPen + >>> recpen = RecordingPen() + >>> roundpen = RoundingPen(recpen) + >>> roundpen.moveTo((0.4, 0.6)) + >>> roundpen.lineTo((1.6, 2.5)) + >>> roundpen.qCurveTo((2.4, 4.6), (3.3, 5.7), (4.9, 6.1)) + >>> roundpen.curveTo((6.4, 8.6), (7.3, 9.7), (8.9, 10.1)) + >>> roundpen.addComponent("a", (1.5, 0, 0, 1.5, 10.5, -10.5)) + >>> recpen.value == [ + ... ('moveTo', ((0, 1),)), + ... ('lineTo', ((2, 3),)), + ... ('qCurveTo', ((2, 5), (3, 6), (5, 6))), + ... ('curveTo', ((6, 9), (7, 10), (9, 10))), + ... ('addComponent', ('a', (1.5, 0, 0, 1.5, 11, -10))), + ... ] + True + """ + + def __init__(self, outPen, roundFunc=otRound): + super().__init__(outPen) + self.roundFunc = roundFunc + + def moveTo(self, pt): + self._outPen.moveTo((self.roundFunc(pt[0]), self.roundFunc(pt[1]))) + + def lineTo(self, pt): + self._outPen.lineTo((self.roundFunc(pt[0]), self.roundFunc(pt[1]))) + + def curveTo(self, *points): + self._outPen.curveTo( + *((self.roundFunc(x), self.roundFunc(y)) for x, y in points) + ) + + def qCurveTo(self, *points): + self._outPen.qCurveTo( + *((self.roundFunc(x), self.roundFunc(y)) for x, y in points) + ) + + def addComponent(self, glyphName, transformation): + self._outPen.addComponent( + glyphName, + Transform( + *transformation[:4], + self.roundFunc(transformation[4]), + self.roundFunc(transformation[5]), + ), + ) + + +class RoundingPointPen(FilterPointPen): + """ + Filter point pen that rounds point coordinates and component XY offsets to integer. + + >>> from fontTools.pens.recordingPen import RecordingPointPen + >>> recpen = RecordingPointPen() + >>> roundpen = RoundingPointPen(recpen) + >>> roundpen.beginPath() + >>> roundpen.addPoint((0.4, 0.6), 'line') + >>> roundpen.addPoint((1.6, 2.5), 'line') + >>> roundpen.addPoint((2.4, 4.6)) + >>> roundpen.addPoint((3.3, 5.7)) + >>> roundpen.addPoint((4.9, 6.1), 'qcurve') + >>> roundpen.endPath() + >>> roundpen.addComponent("a", (1.5, 0, 0, 1.5, 10.5, -10.5)) + >>> recpen.value == [ + ... ('beginPath', (), {}), + ... ('addPoint', ((0, 1), 'line', False, None), {}), + ... ('addPoint', ((2, 3), 'line', False, None), {}), + ... ('addPoint', ((2, 5), None, False, None), {}), + ... ('addPoint', ((3, 6), None, False, None), {}), + ... ('addPoint', ((5, 6), 'qcurve', False, None), {}), + ... ('endPath', (), {}), + ... ('addComponent', ('a', (1.5, 0, 0, 1.5, 11, -10)), {}), + ... ] + True + """ + + def __init__(self, outPen, roundFunc=otRound): + super().__init__(outPen) + self.roundFunc = roundFunc + + def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): + self._outPen.addPoint( + (self.roundFunc(pt[0]), self.roundFunc(pt[1])), + segmentType=segmentType, + smooth=smooth, + name=name, + **kwargs, + ) + + def addComponent(self, baseGlyphName, transformation, **kwargs): + self._outPen.addComponent( + baseGlyphName, + Transform( + *transformation[:4], + self.roundFunc(transformation[4]), + self.roundFunc(transformation[5]), + ), + **kwargs, + ) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/statisticsPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/statisticsPen.py new file mode 100644 index 00000000..abd6ff5e --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/statisticsPen.py @@ -0,0 +1,100 @@ +"""Pen calculating area, center of mass, variance and standard-deviation, +covariance and correlation, and slant, of glyph shapes.""" +import math +from fontTools.pens.momentsPen import MomentsPen + +__all__ = ["StatisticsPen"] + + +class StatisticsPen(MomentsPen): + + """Pen calculating area, center of mass, variance and + standard-deviation, covariance and correlation, and slant, + of glyph shapes. + + Note that all the calculated values are 'signed'. Ie. if the + glyph shape is self-intersecting, the values are not correct + (but well-defined). As such, area will be negative if contour + directions are clockwise. Moreover, variance might be negative + if the shapes are self-intersecting in certain ways.""" + + def __init__(self, glyphset=None): + MomentsPen.__init__(self, glyphset=glyphset) + self.__zero() + + def _closePath(self): + MomentsPen._closePath(self) + self.__update() + + def __zero(self): + self.meanX = 0 + self.meanY = 0 + self.varianceX = 0 + self.varianceY = 0 + self.stddevX = 0 + self.stddevY = 0 + self.covariance = 0 + self.correlation = 0 + self.slant = 0 + + def __update(self): + + area = self.area + if not area: + self.__zero() + return + + # Center of mass + # https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume + self.meanX = meanX = self.momentX / area + self.meanY = meanY = self.momentY / area + + # Var(X) = E[X^2] - E[X]^2 + self.varianceX = varianceX = self.momentXX / area - meanX**2 + self.varianceY = varianceY = self.momentYY / area - meanY**2 + + self.stddevX = stddevX = math.copysign(abs(varianceX)**.5, varianceX) + self.stddevY = stddevY = math.copysign(abs(varianceY)**.5, varianceY) + + # Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] ) + self.covariance = covariance = self.momentXY / area - meanX*meanY + + # Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) ) + # https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient + correlation = covariance / (stddevX * stddevY) + self.correlation = correlation if abs(correlation) > 1e-3 else 0 + + slant = covariance / varianceY + self.slant = slant if abs(slant) > 1e-3 else 0 + + +def _test(glyphset, upem, glyphs): + from fontTools.pens.transformPen import TransformPen + from fontTools.misc.transform import Scale + + print('upem', upem) + + for glyph_name in glyphs: + print() + print("glyph:", glyph_name) + glyph = glyphset[glyph_name] + pen = StatisticsPen(glyphset=glyphset) + transformer = TransformPen(pen, Scale(1./upem)) + glyph.draw(transformer) + for item in ['area', 'momentX', 'momentY', 'momentXX', 'momentYY', 'momentXY', 'meanX', 'meanY', 'varianceX', 'varianceY', 'stddevX', 'stddevY', 'covariance', 'correlation', 'slant']: + if item[0] == '_': continue + print ("%s: %g" % (item, getattr(pen, item))) + +def main(args): + if not args: + return + filename, glyphs = args[0], args[1:] + if not glyphs: + glyphs = ['e', 'o', 'I', 'slash', 'E', 'zero', 'eight', 'minus', 'equal'] + from fontTools.ttLib import TTFont + font = TTFont(filename) + _test(font.getGlyphSet(), font['head'].unitsPerEm, glyphs) + +if __name__ == '__main__': + import sys + main(sys.argv[1:]) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py new file mode 100644 index 00000000..4352ba47 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py @@ -0,0 +1,176 @@ +from fontTools.pens.basePen import BasePen + + +def pointToString(pt): + return " ".join([str(i) for i in pt]) + + +class SVGPathPen(BasePen): + + def __init__(self, glyphSet): + BasePen.__init__(self, glyphSet) + self._commands = [] + self._lastCommand = None + self._lastX = None + self._lastY = None + + def _handleAnchor(self): + """ + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 0)) + >>> pen.moveTo((10, 10)) + >>> pen._commands + ['M10 10'] + """ + if self._lastCommand == "M": + self._commands.pop(-1) + + def _moveTo(self, pt): + """ + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 0)) + >>> pen._commands + ['M0 0'] + + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 0)) + >>> pen._commands + ['M10 0'] + + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 10)) + >>> pen._commands + ['M0 10'] + """ + self._handleAnchor() + t = "M%s" % (pointToString(pt)) + self._commands.append(t) + self._lastCommand = "M" + self._lastX, self._lastY = pt + + def _lineTo(self, pt): + """ + # duplicate point + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 10)) + >>> pen.lineTo((10, 10)) + >>> pen._commands + ['M10 10'] + + # vertical line + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 10)) + >>> pen.lineTo((10, 0)) + >>> pen._commands + ['M10 10', 'V0'] + + # horizontal line + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 10)) + >>> pen.lineTo((0, 10)) + >>> pen._commands + ['M10 10', 'H0'] + + # basic + >>> pen = SVGPathPen(None) + >>> pen.lineTo((70, 80)) + >>> pen._commands + ['L70 80'] + + # basic following a moveto + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 0)) + >>> pen.lineTo((10, 10)) + >>> pen._commands + ['M0 0', ' 10 10'] + """ + x, y = pt + # duplicate point + if x == self._lastX and y == self._lastY: + return + # vertical line + elif x == self._lastX: + cmd = "V" + pts = str(y) + # horizontal line + elif y == self._lastY: + cmd = "H" + pts = str(x) + # previous was a moveto + elif self._lastCommand == "M": + cmd = None + pts = " " + pointToString(pt) + # basic + else: + cmd = "L" + pts = pointToString(pt) + # write the string + t = "" + if cmd: + t += cmd + self._lastCommand = cmd + t += pts + self._commands.append(t) + # store for future reference + self._lastX, self._lastY = pt + + def _curveToOne(self, pt1, pt2, pt3): + """ + >>> pen = SVGPathPen(None) + >>> pen.curveTo((10, 20), (30, 40), (50, 60)) + >>> pen._commands + ['C10 20 30 40 50 60'] + """ + t = "C" + t += pointToString(pt1) + " " + t += pointToString(pt2) + " " + t += pointToString(pt3) + self._commands.append(t) + self._lastCommand = "C" + self._lastX, self._lastY = pt3 + + def _qCurveToOne(self, pt1, pt2): + """ + >>> pen = SVGPathPen(None) + >>> pen.qCurveTo((10, 20), (30, 40)) + >>> pen._commands + ['Q10 20 30 40'] + """ + assert pt2 is not None + t = "Q" + t += pointToString(pt1) + " " + t += pointToString(pt2) + self._commands.append(t) + self._lastCommand = "Q" + self._lastX, self._lastY = pt2 + + def _closePath(self): + """ + >>> pen = SVGPathPen(None) + >>> pen.closePath() + >>> pen._commands + ['Z'] + """ + self._commands.append("Z") + self._lastCommand = "Z" + self._lastX = self._lastY = None + + def _endPath(self): + """ + >>> pen = SVGPathPen(None) + >>> pen.endPath() + >>> pen._commands + ['Z'] + """ + self._closePath() + self._lastCommand = None + self._lastX = self._lastY = None + + def getCommands(self): + return "".join(self._commands) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/t2CharStringPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/t2CharStringPen.py new file mode 100644 index 00000000..0fddec1a --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/t2CharStringPen.py @@ -0,0 +1,65 @@ +# Copyright (c) 2009 Type Supply LLC +# Author: Tal Leming + +from fontTools.misc.roundTools import otRound, roundFunc +from fontTools.misc.psCharStrings import T2CharString +from fontTools.pens.basePen import BasePen +from fontTools.cffLib.specializer import specializeCommands, commandsToProgram + + +class T2CharStringPen(BasePen): + """Pen to draw Type 2 CharStrings. + + The 'roundTolerance' argument controls the rounding of point coordinates. + It is defined as the maximum absolute difference between the original + float and the rounded integer value. + The default tolerance of 0.5 means that all floats are rounded to integer; + a value of 0 disables rounding; values in between will only round floats + which are close to their integral part within the tolerated range. + """ + + def __init__(self, width, glyphSet, roundTolerance=0.5, CFF2=False): + super(T2CharStringPen, self).__init__(glyphSet) + self.round = roundFunc(roundTolerance) + self._CFF2 = CFF2 + self._width = width + self._commands = [] + self._p0 = (0,0) + + def _p(self, pt): + p0 = self._p0 + pt = self._p0 = (self.round(pt[0]), self.round(pt[1])) + return [pt[0]-p0[0], pt[1]-p0[1]] + + def _moveTo(self, pt): + self._commands.append(('rmoveto', self._p(pt))) + + def _lineTo(self, pt): + self._commands.append(('rlineto', self._p(pt))) + + def _curveToOne(self, pt1, pt2, pt3): + _p = self._p + self._commands.append(('rrcurveto', _p(pt1)+_p(pt2)+_p(pt3))) + + def _closePath(self): + pass + + def _endPath(self): + pass + + def getCharString(self, private=None, globalSubrs=None, optimize=True): + commands = self._commands + if optimize: + maxstack = 48 if not self._CFF2 else 513 + commands = specializeCommands(commands, + generalizeFirst=False, + maxstack=maxstack) + program = commandsToProgram(commands) + if self._width is not None: + assert not self._CFF2, "CFF2 does not allow encoding glyph width in CharString." + program.insert(0, otRound(self._width)) + if not self._CFF2: + program.append('endchar') + charString = T2CharString( + program=program, private=private, globalSubrs=globalSubrs) + return charString diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/teePen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/teePen.py new file mode 100644 index 00000000..2f30e922 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/teePen.py @@ -0,0 +1,46 @@ +"""Pen multiplexing drawing to one or more pens.""" +from fontTools.pens.basePen import AbstractPen + + +__all__ = ["TeePen"] + + +class TeePen(AbstractPen): + """Pen multiplexing drawing to one or more pens. + + Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens).""" + + def __init__(self, *pens): + if len(pens) == 1: + pens = pens[0] + self.pens = pens + def moveTo(self, p0): + for pen in self.pens: + pen.moveTo(p0) + def lineTo(self, p1): + for pen in self.pens: + pen.lineTo(p1) + def qCurveTo(self, *points): + for pen in self.pens: + pen.qCurveTo(*points) + def curveTo(self, *points): + for pen in self.pens: + pen.curveTo(*points) + def closePath(self): + for pen in self.pens: + pen.closePath() + def endPath(self): + for pen in self.pens: + pen.endPath() + def addComponent(self, glyphName, transformation): + for pen in self.pens: + pen.addComponent(glyphName, transformation) + + +if __name__ == "__main__": + from fontTools.pens.basePen import _TestPen + pen = TeePen(_TestPen(), _TestPen()) + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25)) + pen.closePath() diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/transformPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/transformPen.py new file mode 100644 index 00000000..93d19191 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/transformPen.py @@ -0,0 +1,108 @@ +from fontTools.pens.filterPen import FilterPen, FilterPointPen + + +__all__ = ["TransformPen", "TransformPointPen"] + + +class TransformPen(FilterPen): + + """Pen that transforms all coordinates using a Affine transformation, + and passes them to another pen. + """ + + def __init__(self, outPen, transformation): + """The 'outPen' argument is another pen object. It will receive the + transformed coordinates. The 'transformation' argument can either + be a six-tuple, or a fontTools.misc.transform.Transform object. + """ + super(TransformPen, self).__init__(outPen) + if not hasattr(transformation, "transformPoint"): + from fontTools.misc.transform import Transform + transformation = Transform(*transformation) + self._transformation = transformation + self._transformPoint = transformation.transformPoint + self._stack = [] + + def moveTo(self, pt): + self._outPen.moveTo(self._transformPoint(pt)) + + def lineTo(self, pt): + self._outPen.lineTo(self._transformPoint(pt)) + + def curveTo(self, *points): + self._outPen.curveTo(*self._transformPoints(points)) + + def qCurveTo(self, *points): + if points[-1] is None: + points = self._transformPoints(points[:-1]) + [None] + else: + points = self._transformPoints(points) + self._outPen.qCurveTo(*points) + + def _transformPoints(self, points): + transformPoint = self._transformPoint + return [transformPoint(pt) for pt in points] + + def closePath(self): + self._outPen.closePath() + + def endPath(self): + self._outPen.endPath() + + def addComponent(self, glyphName, transformation): + transformation = self._transformation.transform(transformation) + self._outPen.addComponent(glyphName, transformation) + + +class TransformPointPen(FilterPointPen): + """PointPen that transforms all coordinates using a Affine transformation, + and passes them to another PointPen. + + >>> from fontTools.pens.recordingPen import RecordingPointPen + >>> rec = RecordingPointPen() + >>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5)) + >>> v = iter(rec.value) + >>> pen.beginPath(identifier="contour-0") + >>> next(v) + ('beginPath', (), {'identifier': 'contour-0'}) + >>> pen.addPoint((100, 100), "line") + >>> next(v) + ('addPoint', ((190, 205), 'line', False, None), {}) + >>> pen.endPath() + >>> next(v) + ('endPath', (), {}) + >>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0") + >>> next(v) + ('addComponent', ('a', ), {'identifier': 'component-0'}) + """ + + def __init__(self, outPointPen, transformation): + """The 'outPointPen' argument is another point pen object. + It will receive the transformed coordinates. + The 'transformation' argument can either be a six-tuple, or a + fontTools.misc.transform.Transform object. + """ + super().__init__(outPointPen) + if not hasattr(transformation, "transformPoint"): + from fontTools.misc.transform import Transform + transformation = Transform(*transformation) + self._transformation = transformation + self._transformPoint = transformation.transformPoint + + def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs): + self._outPen.addPoint( + self._transformPoint(pt), segmentType, smooth, name, **kwargs + ) + + def addComponent(self, baseGlyphName, transformation, **kwargs): + transformation = self._transformation.transform(transformation) + self._outPen.addComponent(baseGlyphName, transformation, **kwargs) + + +if __name__ == "__main__": + from fontTools.pens.basePen import _TestPen + pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0)) + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.closePath() diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/ttGlyphPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/ttGlyphPen.py new file mode 100644 index 00000000..5087e158 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/ttGlyphPen.py @@ -0,0 +1,285 @@ +from array import array +from typing import Any, Dict, Optional, Tuple +from fontTools.misc.fixedTools import MAX_F2DOT14, floatToFixedToFloat +from fontTools.misc.loggingTools import LogMixin +from fontTools.pens.pointPen import AbstractPointPen +from fontTools.misc.roundTools import otRound +from fontTools.pens.basePen import LoggingPen, PenError +from fontTools.pens.transformPen import TransformPen, TransformPointPen +from fontTools.ttLib.tables import ttProgram +from fontTools.ttLib.tables._g_l_y_f import Glyph +from fontTools.ttLib.tables._g_l_y_f import GlyphComponent +from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates + + +__all__ = ["TTGlyphPen", "TTGlyphPointPen"] + + +class _TTGlyphBasePen: + def __init__( + self, + glyphSet: Optional[Dict[str, Any]], + handleOverflowingTransforms: bool = True, + ) -> None: + """ + Construct a new pen. + + Args: + glyphSet (Dict[str, Any]): A glyphset object, used to resolve components. + handleOverflowingTransforms (bool): See below. + + If ``handleOverflowingTransforms`` is True, the components' transform values + are checked that they don't overflow the limits of a F2Dot14 number: + -2.0 <= v < +2.0. If any transform value exceeds these, the composite + glyph is decomposed. + + An exception to this rule is done for values that are very close to +2.0 + (both for consistency with the -2.0 case, and for the relative frequency + these occur in real fonts). When almost +2.0 values occur (and all other + values are within the range -2.0 <= x <= +2.0), they are clamped to the + maximum positive value that can still be encoded as an F2Dot14: i.e. + 1.99993896484375. + + If False, no check is done and all components are translated unmodified + into the glyf table, followed by an inevitable ``struct.error`` once an + attempt is made to compile them. + + If both contours and components are present in a glyph, the components + are decomposed. + """ + self.glyphSet = glyphSet + self.handleOverflowingTransforms = handleOverflowingTransforms + self.init() + + def _decompose( + self, + glyphName: str, + transformation: Tuple[float, float, float, float, float, float], + ): + tpen = self.transformPen(self, transformation) + getattr(self.glyphSet[glyphName], self.drawMethod)(tpen) + + def _isClosed(self): + """ + Check if the current path is closed. + """ + raise NotImplementedError + + def init(self) -> None: + self.points = [] + self.endPts = [] + self.types = [] + self.components = [] + + def addComponent( + self, + baseGlyphName: str, + transformation: Tuple[float, float, float, float, float, float], + identifier: Optional[str] = None, + **kwargs: Any, + ) -> None: + """ + Add a sub glyph. + """ + self.components.append((baseGlyphName, transformation)) + + def _buildComponents(self, componentFlags): + if self.handleOverflowingTransforms: + # we can't encode transform values > 2 or < -2 in F2Dot14, + # so we must decompose the glyph if any transform exceeds these + overflowing = any( + s > 2 or s < -2 + for (glyphName, transformation) in self.components + for s in transformation[:4] + ) + components = [] + for glyphName, transformation in self.components: + if glyphName not in self.glyphSet: + self.log.warning(f"skipped non-existing component '{glyphName}'") + continue + if self.points or (self.handleOverflowingTransforms and overflowing): + # can't have both coordinates and components, so decompose + self._decompose(glyphName, transformation) + continue + + component = GlyphComponent() + component.glyphName = glyphName + component.x, component.y = (otRound(v) for v in transformation[4:]) + # quantize floats to F2Dot14 so we get same values as when decompiled + # from a binary glyf table + transformation = tuple( + floatToFixedToFloat(v, 14) for v in transformation[:4] + ) + if transformation != (1, 0, 0, 1): + if self.handleOverflowingTransforms and any( + MAX_F2DOT14 < s <= 2 for s in transformation + ): + # clamp values ~= +2.0 so we can keep the component + transformation = tuple( + MAX_F2DOT14 if MAX_F2DOT14 < s <= 2 else s + for s in transformation + ) + component.transform = (transformation[:2], transformation[2:]) + component.flags = componentFlags + components.append(component) + return components + + def glyph(self, componentFlags: int = 0x4) -> Glyph: + """ + Returns a :py:class:`~._g_l_y_f.Glyph` object representing the glyph. + """ + if not self._isClosed(): + raise PenError("Didn't close last contour.") + components = self._buildComponents(componentFlags) + + glyph = Glyph() + glyph.coordinates = GlyphCoordinates(self.points) + glyph.coordinates.toInt() + glyph.endPtsOfContours = self.endPts + glyph.flags = array("B", self.types) + self.init() + + if components: + # If both components and contours were present, they have by now + # been decomposed by _buildComponents. + glyph.components = components + glyph.numberOfContours = -1 + else: + glyph.numberOfContours = len(glyph.endPtsOfContours) + glyph.program = ttProgram.Program() + glyph.program.fromBytecode(b"") + + return glyph + + +class TTGlyphPen(_TTGlyphBasePen, LoggingPen): + """ + Pen used for drawing to a TrueType glyph. + + This pen can be used to construct or modify glyphs in a TrueType format + font. After using the pen to draw, use the ``.glyph()`` method to retrieve + a :py:class:`~._g_l_y_f.Glyph` object representing the glyph. + """ + + drawMethod = "draw" + transformPen = TransformPen + + def _addPoint(self, pt: Tuple[float, float], onCurve: int) -> None: + self.points.append(pt) + self.types.append(onCurve) + + def _popPoint(self) -> None: + self.points.pop() + self.types.pop() + + def _isClosed(self) -> bool: + return (not self.points) or ( + self.endPts and self.endPts[-1] == len(self.points) - 1 + ) + + def lineTo(self, pt: Tuple[float, float]) -> None: + self._addPoint(pt, 1) + + def moveTo(self, pt: Tuple[float, float]) -> None: + if not self._isClosed(): + raise PenError('"move"-type point must begin a new contour.') + self._addPoint(pt, 1) + + def curveTo(self, *points) -> None: + raise NotImplementedError + + def qCurveTo(self, *points) -> None: + assert len(points) >= 1 + for pt in points[:-1]: + self._addPoint(pt, 0) + + # last point is None if there are no on-curve points + if points[-1] is not None: + self._addPoint(points[-1], 1) + + def closePath(self) -> None: + endPt = len(self.points) - 1 + + # ignore anchors (one-point paths) + if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1): + self._popPoint() + return + + # if first and last point on this path are the same, remove last + startPt = 0 + if self.endPts: + startPt = self.endPts[-1] + 1 + if self.points[startPt] == self.points[endPt]: + self._popPoint() + endPt -= 1 + + self.endPts.append(endPt) + + def endPath(self) -> None: + # TrueType contours are always "closed" + self.closePath() + + +class TTGlyphPointPen(_TTGlyphBasePen, LogMixin, AbstractPointPen): + """ + Point pen used for drawing to a TrueType glyph. + + This pen can be used to construct or modify glyphs in a TrueType format + font. After using the pen to draw, use the ``.glyph()`` method to retrieve + a :py:class:`~._g_l_y_f.Glyph` object representing the glyph. + """ + + drawMethod = "drawPoints" + transformPen = TransformPointPen + + def init(self) -> None: + super().init() + self._currentContourStartIndex = None + + def _isClosed(self) -> bool: + return self._currentContourStartIndex is None + + def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None: + """ + Start a new sub path. + """ + if not self._isClosed(): + raise PenError("Didn't close previous contour.") + self._currentContourStartIndex = len(self.points) + + def endPath(self) -> None: + """ + End the current sub path. + """ + # TrueType contours are always "closed" + if self._isClosed(): + raise PenError("Contour is already closed.") + if self._currentContourStartIndex == len(self.points): + raise PenError("Tried to end an empty contour.") + self.endPts.append(len(self.points) - 1) + self._currentContourStartIndex = None + + def addPoint( + self, + pt: Tuple[float, float], + segmentType: Optional[str] = None, + smooth: bool = False, + name: Optional[str] = None, + identifier: Optional[str] = None, + **kwargs: Any, + ) -> None: + """ + Add a point to the current sub path. + """ + if self._isClosed(): + raise PenError("Can't add a point to a closed contour.") + if segmentType is None: + self.types.append(0) # offcurve + elif segmentType in ("qcurve", "line", "move"): + self.types.append(1) # oncurve + elif segmentType == "curve": + raise NotImplementedError("cubic curves are not supported") + else: + raise AssertionError(segmentType) + + self.points.append(pt) diff --git a/.venv/lib/python3.9/site-packages/fontTools/pens/wxPen.py b/.venv/lib/python3.9/site-packages/fontTools/pens/wxPen.py new file mode 100644 index 00000000..1504f089 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/pens/wxPen.py @@ -0,0 +1,29 @@ +from fontTools.pens.basePen import BasePen + + +__all__ = ["WxPen"] + + +class WxPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + import wx + path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath() + self.path = path + + def _moveTo(self, p): + self.path.MoveToPoint(*p) + + def _lineTo(self, p): + self.path.AddLineToPoint(*p) + + def _curveToOne(self, p1, p2, p3): + self.path.AddCurveToPoint(*p1+p2+p3) + + def _qCurveToOne(self, p1, p2): + self.path.AddQuadCurveToPoint(*p1+p2) + + def _closePath(self): + self.path.CloseSubpath() diff --git a/.venv/lib/python3.9/site-packages/fontTools/subset/__init__.py b/.venv/lib/python3.9/site-packages/fontTools/subset/__init__.py new file mode 100644 index 00000000..8fb732f3 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/fontTools/subset/__init__.py @@ -0,0 +1,3130 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from fontTools.misc.roundTools import otRound +from fontTools import ttLib +from fontTools.ttLib.tables import otTables +from fontTools.otlLib.maxContextCalc import maxCtxFont +from fontTools.pens.basePen import NullPen +from fontTools.misc.loggingTools import Timer +from fontTools.subset.util import _add_method, _uniq_sort +from fontTools.subset.cff import * +from fontTools.subset.svg import * +import sys +import struct +import array +import logging +from collections import Counter, defaultdict +from functools import reduce +from types import MethodType + +__usage__ = "pyftsubset font-file [glyph...] [--option=value]..." + +__doc__="""\ +pyftsubset -- OpenType font subsetter and optimizer + + pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. + It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) + font file. The subsetted glyph set is based on the specified glyphs + or characters, and specified OpenType layout features. + + The tool also performs some size-reducing optimizations, aimed for using + subset fonts as webfonts. Individual optimizations can be enabled or + disabled, and are enabled by default when they are safe. + +Usage: + """+__usage__+""" + + At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, + --text, --text-file, --unicodes, or --unicodes-file, must be specified. + +Arguments: + font-file + The input font file. + glyph + Specify one or more glyph identifiers to include in the subset. Must be + PS glyph names, or the special string '*' to keep the entire glyph set. + +Initial glyph set specification: + These options populate the initial glyph set. Same option can appear + multiple times, and the results are accummulated. + --gids=[,...] + Specify comma/whitespace-separated list of glyph IDs or ranges as + decimal numbers. For example, --gids=10-12,14 adds glyphs with + numbers 10, 11, 12, and 14. + --gids-file= + Like --gids but reads from a file. Anything after a '#' on any line + is ignored as comments. + --glyphs=[,...] + Specify comma/whitespace-separated PS glyph names to add to the subset. + Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc + that are accepted on the command line. The special string '*' will keep + the entire glyph set. + --glyphs-file= + Like --glyphs but reads from a file. Anything after a '#' on any line + is ignored as comments. + --text= + Specify characters to include in the subset, as UTF-8 string. + --text-file= + Like --text but reads from a file. Newline character are not added to + the subset. + --unicodes=[,...] + Specify comma/whitespace-separated list of Unicode codepoints or + ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. + For example, --unicodes=41-5a,61-7a adds ASCII letters, so does + the more verbose --unicodes=U+0041-005A,U+0061-007A. + The special strings '*' will choose all Unicode characters mapped + by the font. + --unicodes-file= + Like --unicodes, but reads from a file. Anything after a '#' on any + line in the file is ignored as comments. + --ignore-missing-glyphs + Do not fail if some requested glyphs or gids are not available in + the font. + --no-ignore-missing-glyphs + Stop and fail if some requested glyphs or gids are not available + in the font. [default] + --ignore-missing-unicodes [default] + Do not fail if some requested Unicode characters (including those + indirectly specified using --text or --text-file) are not available + in the font. + --no-ignore-missing-unicodes + Stop and fail if some requested Unicode characters are not available + in the font. + Note the default discrepancy between ignoring missing glyphs versus + unicodes. This is for historical reasons and in the future + --no-ignore-missing-unicodes might become default. + +Other options: + For the other options listed below, to see the current value of the option, + pass a value of '?' to it, with or without a '='. + Examples: + $ pyftsubset --glyph-names? + Current setting for 'glyph-names' is: False + $ ./pyftsubset --name-IDs=? + Current setting for 'name-IDs' is: [0, 1, 2, 3, 4, 5, 6] + $ ./pyftsubset --hinting? --no-hinting --hinting? + Current setting for 'hinting' is: True + Current setting for 'hinting' is: False + +Output options: + --output-file= + The output font file. If not specified, the subsetted font + will be saved in as font-file.subset. + --flavor= + Specify flavor of output font file. May be 'woff' or 'woff2'. + Note that WOFF2 requires the Brotli Python extension, available + at https://github.com/google/brotli + --with-zopfli + Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 % + smaller than pure zlib, but the compression speed is much slower. + The Zopfli Python bindings are available at: + https://pypi.python.org/pypi/zopfli + +Glyph set expansion: + These options control how additional glyphs are added to the subset. + --retain-gids + Retain glyph indices; just empty glyphs not needed in-place. + --notdef-glyph + Add the '.notdef' glyph to the subset (ie, keep it). [default] + --no-notdef-glyph + Drop the '.notdef' glyph unless specified in the glyph set. This + saves a few bytes, but is not possible for Postscript-flavored + fonts, as those require '.notdef'. For TrueType-flavored fonts, + this works fine as long as no unsupported glyphs are requested + from the font. + --notdef-outline + Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is + used when glyphs not supported by the font are to be shown. It is not + needed otherwise. + --no-notdef-outline + When including a '.notdef' glyph, remove its outline. This saves + a few bytes. [default] + --recommended-glyphs + Add glyphs 0, 1, 2, and 3 to the subset, as recommended for + TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. + Some legacy software might require this, but no modern system does. + --no-recommended-glyphs + Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in + glyph set. [default] + --no-layout-closure + Do not expand glyph set to add glyphs produced by OpenType layout + features. Instead, OpenType layout features will be subset to only + rules that are relevant to the otherwise-specified glyph set. + --layout-features[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of OpenType layout feature tags that will be preserved. + Glyph variants used by the preserved features are added to the + specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', + 'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt', + 'rlig', 'rvrn', and all features required for script shaping are + preserved. To see the full list, try '--layout-features=?'. + Use '*' to keep all features. + Multiple --layout-features options can be provided if necessary. + Examples: + --layout-features+=onum,pnum,ss01 + * Keep the default set of features and 'onum', 'pnum', 'ss01'. + --layout-features-='mark','mkmk' + * Keep the default set of features but drop 'mark' and 'mkmk'. + --layout-features='kern' + * Only keep the 'kern' feature, drop all others. + --layout-features='' + * Drop all features. + --layout-features='*' + * Keep all features. + --layout-features+=aalt --layout-features-=vrt2 + * Keep default set of features plus 'aalt', but drop 'vrt2'. + --layout-scripts[+|-]= +""" + + +# Style definitions for the HTML template +STYLE_INCLUDE = """ + +""" + + +# HTML template for HTMLWriter +DISPLAY_TEMPLATE = """ +
+ +
+ +
+ + + + + + + + + +
+
+ + + + + + +
+
+
+ + + +""" + + +INCLUDED_FRAMES = """ + for (var i=0; i<{Nframes}; i++){{ + frames[i] = "{frame_dir}/frame" + ("0000000" + i).slice(-7) + + ".{frame_format}"; + }} +""" diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_api/__init__.py b/.venv/lib/python3.9/site-packages/matplotlib/_api/__init__.py new file mode 100644 index 00000000..43b25908 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_api/__init__.py @@ -0,0 +1,299 @@ +""" +Helper functions for managing the Matplotlib API. + +This documentation is only relevant for Matplotlib developers, not for users. + +.. warning: + + This module and its submodules are for internal use only. Do not use them + in your own code. We may change the API at any time with no warning. + +""" + +import functools +import itertools +import re +import sys +import warnings + +from .deprecation import ( + deprecated, warn_deprecated, + rename_parameter, delete_parameter, make_keyword_only, + deprecate_method_override, deprecate_privatize_attribute, + suppress_matplotlib_deprecation_warning, + MatplotlibDeprecationWarning) + + +class classproperty: + """ + Like `property`, but also triggers on access via the class, and it is the + *class* that's passed as argument. + + Examples + -------- + :: + + class C: + @classproperty + def foo(cls): + return cls.__name__ + + assert C.foo == "C" + """ + + def __init__(self, fget, fset=None, fdel=None, doc=None): + self._fget = fget + if fset is not None or fdel is not None: + raise ValueError('classproperty only implements fget.') + self.fset = fset + self.fdel = fdel + # docs are ignored for now + self._doc = doc + + def __get__(self, instance, owner): + return self._fget(owner) + + @property + def fget(self): + return self._fget + + +# In the following check_foo() functions, the first parameter starts with an +# underscore because it is intended to be positional-only (e.g., so that +# `_api.check_isinstance([...], types=foo)` doesn't fail. + +def check_isinstance(_types, **kwargs): + """ + For each *key, value* pair in *kwargs*, check that *value* is an instance + of one of *_types*; if not, raise an appropriate TypeError. + + As a special case, a ``None`` entry in *_types* is treated as NoneType. + + Examples + -------- + >>> _api.check_isinstance((SomeClass, None), arg=arg) + """ + types = _types + none_type = type(None) + types = ((types,) if isinstance(types, type) else + (none_type,) if types is None else + tuple(none_type if tp is None else tp for tp in types)) + + def type_name(tp): + return ("None" if tp is none_type + else tp.__qualname__ if tp.__module__ == "builtins" + else f"{tp.__module__}.{tp.__qualname__}") + + for k, v in kwargs.items(): + if not isinstance(v, types): + names = [*map(type_name, types)] + if "None" in names: # Move it to the end for better wording. + names.remove("None") + names.append("None") + raise TypeError( + "{!r} must be an instance of {}, not a {}".format( + k, + ", ".join(names[:-1]) + " or " + names[-1] + if len(names) > 1 else names[0], + type_name(type(v)))) + + +def check_in_list(_values, *, _print_supported_values=True, **kwargs): + """ + For each *key, value* pair in *kwargs*, check that *value* is in *_values*. + + Parameters + ---------- + _values : iterable + Sequence of values to check on. + _print_supported_values : bool, default: True + Whether to print *_values* when raising ValueError. + **kwargs : dict + *key, value* pairs as keyword arguments to find in *_values*. + + Raises + ------ + ValueError + If any *value* in *kwargs* is not found in *_values*. + + Examples + -------- + >>> _api.check_in_list(["foo", "bar"], arg=arg, other_arg=other_arg) + """ + values = _values + for key, val in kwargs.items(): + if val not in values: + msg = f"{val!r} is not a valid value for {key}" + if _print_supported_values: + msg += f"; supported values are {', '.join(map(repr, values))}" + raise ValueError(msg) + + +def check_shape(_shape, **kwargs): + """ + For each *key, value* pair in *kwargs*, check that *value* has the shape + *_shape*, if not, raise an appropriate ValueError. + + *None* in the shape is treated as a "free" size that can have any length. + e.g. (None, 2) -> (N, 2) + + The values checked must be numpy arrays. + + Examples + -------- + To check for (N, 2) shaped arrays + + >>> _api.check_shape((None, 2), arg=arg, other_arg=other_arg) + """ + target_shape = _shape + for k, v in kwargs.items(): + data_shape = v.shape + + if len(target_shape) != len(data_shape) or any( + t not in [s, None] + for t, s in zip(target_shape, data_shape) + ): + dim_labels = iter(itertools.chain( + 'MNLIJKLH', + (f"D{i}" for i in itertools.count()))) + text_shape = ", ".join((str(n) + if n is not None + else next(dim_labels) + for n in target_shape)) + + raise ValueError( + f"{k!r} must be {len(target_shape)}D " + f"with shape ({text_shape}). " + f"Your input has shape {v.shape}." + ) + + +def check_getitem(_mapping, **kwargs): + """ + *kwargs* must consist of a single *key, value* pair. If *key* is in + *_mapping*, return ``_mapping[value]``; else, raise an appropriate + ValueError. + + Examples + -------- + >>> _api.check_getitem({"foo": "bar"}, arg=arg) + """ + mapping = _mapping + if len(kwargs) != 1: + raise ValueError("check_getitem takes a single keyword argument") + (k, v), = kwargs.items() + try: + return mapping[v] + except KeyError: + raise ValueError( + "{!r} is not a valid value for {}; supported values are {}" + .format(v, k, ', '.join(map(repr, mapping)))) from None + + +def caching_module_getattr(cls): + """ + Helper decorator for implementing module-level ``__getattr__`` as a class. + + This decorator must be used at the module toplevel as follows:: + + @caching_module_getattr + class __getattr__: # The class *must* be named ``__getattr__``. + @property # Only properties are taken into account. + def name(self): ... + + The ``__getattr__`` class will be replaced by a ``__getattr__`` + function such that trying to access ``name`` on the module will + resolve the corresponding property (which may be decorated e.g. with + ``_api.deprecated`` for deprecating module globals). The properties are + all implicitly cached. Moreover, a suitable AttributeError is generated + and raised if no property with the given name exists. + """ + + assert cls.__name__ == "__getattr__" + # Don't accidentally export cls dunders. + props = {name: prop for name, prop in vars(cls).items() + if isinstance(prop, property)} + instance = cls() + + @functools.lru_cache(None) + def __getattr__(name): + if name in props: + return props[name].__get__(instance) + raise AttributeError( + f"module {cls.__module__!r} has no attribute {name!r}") + + return __getattr__ + + +def select_matching_signature(funcs, *args, **kwargs): + """ + Select and call the function that accepts ``*args, **kwargs``. + + *funcs* is a list of functions which should not raise any exception (other + than `TypeError` if the arguments passed do not match their signature). + + `select_matching_signature` tries to call each of the functions in *funcs* + with ``*args, **kwargs`` (in the order in which they are given). Calls + that fail with a `TypeError` are silently skipped. As soon as a call + succeeds, `select_matching_signature` returns its return value. If no + function accepts ``*args, **kwargs``, then the `TypeError` raised by the + last failing call is re-raised. + + Callers should normally make sure that any ``*args, **kwargs`` can only + bind a single *func* (to avoid any ambiguity), although this is not checked + by `select_matching_signature`. + + Notes + ----- + `select_matching_signature` is intended to help implementing + signature-overloaded functions. In general, such functions should be + avoided, except for back-compatibility concerns. A typical use pattern is + :: + + def my_func(*args, **kwargs): + params = select_matching_signature( + [lambda old1, old2: locals(), lambda new: locals()], + *args, **kwargs) + if "old1" in params: + warn_deprecated(...) + old1, old2 = params.values() # note that locals() is ordered. + else: + new, = params.values() + # do things with params + + which allows *my_func* to be called either with two parameters (*old1* and + *old2*) or a single one (*new*). Note that the new signature is given + last, so that callers get a `TypeError` corresponding to the new signature + if the arguments they passed in do not match any signature. + """ + # Rather than relying on locals() ordering, one could have just used func's + # signature (``bound = inspect.signature(func).bind(*args, **kwargs); + # bound.apply_defaults(); return bound``) but that is significantly slower. + for i, func in enumerate(funcs): + try: + return func(*args, **kwargs) + except TypeError: + if i == len(funcs) - 1: + raise + + +def warn_external(message, category=None): + """ + `warnings.warn` wrapper that sets *stacklevel* to "outside Matplotlib". + + The original emitter of the warning can be obtained by patching this + function back to `warnings.warn`, i.e. ``_api.warn_external = + warnings.warn`` (or ``functools.partial(warnings.warn, stacklevel=2)``, + etc.). + """ + frame = sys._getframe() + for stacklevel in itertools.count(1): # lgtm[py/unused-loop-variable] + if frame is None: + # when called in embedded context may hit frame is None + break + if not re.match(r"\A(matplotlib|mpl_toolkits)(\Z|\.(?!tests\.))", + # Work around sphinx-gallery not setting __name__. + frame.f_globals.get("__name__", "")): + break + frame = frame.f_back + warnings.warn(message, category, stacklevel) diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_api/__pycache__/__init__.cpython-39.pyc b/.venv/lib/python3.9/site-packages/matplotlib/_api/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 00000000..224b7241 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/matplotlib/_api/__pycache__/__init__.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_api/__pycache__/deprecation.cpython-39.pyc b/.venv/lib/python3.9/site-packages/matplotlib/_api/__pycache__/deprecation.cpython-39.pyc new file mode 100644 index 00000000..126e1363 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/matplotlib/_api/__pycache__/deprecation.cpython-39.pyc differ diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_api/deprecation.py b/.venv/lib/python3.9/site-packages/matplotlib/_api/deprecation.py new file mode 100644 index 00000000..363bb5f7 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_api/deprecation.py @@ -0,0 +1,512 @@ +""" +Helper functions for deprecating parts of the Matplotlib API. + +This documentation is only relevant for Matplotlib developers, not for users. + +.. warning: + + This module is for internal use only. Do not use it in your own code. + We may change the API at any time with no warning. + +""" + +import contextlib +import functools +import inspect +import math +import warnings + + +class MatplotlibDeprecationWarning(DeprecationWarning): + """A class for issuing deprecation warnings for Matplotlib users.""" + + +# mplDeprecation is deprecated. Use MatplotlibDeprecationWarning instead. +# remove when removing the re-import from cbook +mplDeprecation = MatplotlibDeprecationWarning + + +def _generate_deprecation_warning( + since, message='', name='', alternative='', pending=False, obj_type='', + addendum='', *, removal=''): + if pending: + if removal: + raise ValueError( + "A pending deprecation cannot have a scheduled removal") + else: + removal = f"in {removal}" if removal else "two minor releases later" + if not message: + message = ( + ("\nThe %(name)s %(obj_type)s" if obj_type else "%(name)s") + + (" will be deprecated in a future version" + if pending else + (" was deprecated in Matplotlib %(since)s" + + (" and will be removed %(removal)s" if removal else ""))) + + "." + + (" Use %(alternative)s instead." if alternative else "") + + (" %(addendum)s" if addendum else "")) + warning_cls = (PendingDeprecationWarning if pending + else MatplotlibDeprecationWarning) + return warning_cls(message % dict( + func=name, name=name, obj_type=obj_type, since=since, removal=removal, + alternative=alternative, addendum=addendum)) + + +def warn_deprecated( + since, *, message='', name='', alternative='', pending=False, + obj_type='', addendum='', removal=''): + """ + Display a standardized deprecation. + + Parameters + ---------- + since : str + The release at which this API became deprecated. + message : str, optional + Override the default deprecation message. The ``%(since)s``, + ``%(name)s``, ``%(alternative)s``, ``%(obj_type)s``, ``%(addendum)s``, + and ``%(removal)s`` format specifiers will be replaced by the values + of the respective arguments passed to this function. + name : str, optional + The name of the deprecated object. + alternative : str, optional + An alternative API that the user may use in place of the deprecated + API. The deprecation warning will tell the user about this alternative + if provided. + pending : bool, optional + If True, uses a PendingDeprecationWarning instead of a + DeprecationWarning. Cannot be used together with *removal*. + obj_type : str, optional + The object type being deprecated. + addendum : str, optional + Additional text appended directly to the final message. + removal : str, optional + The expected removal version. With the default (an empty string), a + removal version is automatically computed from *since*. Set to other + Falsy values to not schedule a removal date. Cannot be used together + with *pending*. + + Examples + -------- + :: + + # To warn of the deprecation of "matplotlib.name_of_module" + warn_deprecated('1.4.0', name='matplotlib.name_of_module', + obj_type='module') + """ + warning = _generate_deprecation_warning( + since, message, name, alternative, pending, obj_type, addendum, + removal=removal) + from . import warn_external + warn_external(warning, category=MatplotlibDeprecationWarning) + + +def deprecated(since, *, message='', name='', alternative='', pending=False, + obj_type=None, addendum='', removal=''): + """ + Decorator to mark a function, a class, or a property as deprecated. + + When deprecating a classmethod, a staticmethod, or a property, the + ``@deprecated`` decorator should go *under* ``@classmethod`` and + ``@staticmethod`` (i.e., `deprecated` should directly decorate the + underlying callable), but *over* ``@property``. + + When deprecating a class ``C`` intended to be used as a base class in a + multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method + (if ``C`` instead inherited its ``__init__`` from its own base class, then + ``@deprecated`` would mess up ``__init__`` inheritance when installing its + own (deprecation-emitting) ``C.__init__``). + + Parameters are the same as for `warn_deprecated`, except that *obj_type* + defaults to 'class' if decorating a class, 'attribute' if decorating a + property, and 'function' otherwise. + + Examples + -------- + :: + + @deprecated('1.4.0') + def the_function_to_deprecate(): + pass + """ + + def deprecate(obj, message=message, name=name, alternative=alternative, + pending=pending, obj_type=obj_type, addendum=addendum): + from matplotlib._api import classproperty + + if isinstance(obj, type): + if obj_type is None: + obj_type = "class" + func = obj.__init__ + name = name or obj.__name__ + old_doc = obj.__doc__ + + def finalize(wrapper, new_doc): + try: + obj.__doc__ = new_doc + except AttributeError: # Can't set on some extension objects. + pass + obj.__init__ = functools.wraps(obj.__init__)(wrapper) + return obj + + elif isinstance(obj, (property, classproperty)): + if obj_type is None: + obj_type = "attribute" + func = None + name = name or obj.fget.__name__ + old_doc = obj.__doc__ + + class _deprecated_property(type(obj)): + def __get__(self, instance, owner=None): + if instance is not None or owner is not None \ + and isinstance(self, classproperty): + emit_warning() + return super().__get__(instance, owner) + + def __set__(self, instance, value): + if instance is not None: + emit_warning() + return super().__set__(instance, value) + + def __delete__(self, instance): + if instance is not None: + emit_warning() + return super().__delete__(instance) + + def __set_name__(self, owner, set_name): + nonlocal name + if name == "": + name = set_name + + def finalize(_, new_doc): + return _deprecated_property( + fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc) + + else: + if obj_type is None: + obj_type = "function" + func = obj + name = name or obj.__name__ + old_doc = func.__doc__ + + def finalize(wrapper, new_doc): + wrapper = functools.wraps(func)(wrapper) + wrapper.__doc__ = new_doc + return wrapper + + def emit_warning(): + warn_deprecated( + since, message=message, name=name, alternative=alternative, + pending=pending, obj_type=obj_type, addendum=addendum, + removal=removal) + + def wrapper(*args, **kwargs): + emit_warning() + return func(*args, **kwargs) + + old_doc = inspect.cleandoc(old_doc or '').strip('\n') + + notes_header = '\nNotes\n-----' + new_doc = (f"[*Deprecated*] {old_doc}\n" + f"{notes_header if notes_header not in old_doc else ''}\n" + f".. deprecated:: {since}\n" + f" {message.strip()}") + + if not old_doc: + # This is to prevent a spurious 'unexpected unindent' warning from + # docutils when the original docstring was blank. + new_doc += r'\ ' + + return finalize(wrapper, new_doc) + + return deprecate + + +class deprecate_privatize_attribute: + """ + Helper to deprecate public access to an attribute (or method). + + This helper should only be used at class scope, as follows:: + + class Foo: + attr = _deprecate_privatize_attribute(*args, **kwargs) + + where *all* parameters are forwarded to `deprecated`. This form makes + ``attr`` a property which forwards read and write access to ``self._attr`` + (same name but with a leading underscore), with a deprecation warning. + Note that the attribute name is derived from *the name this helper is + assigned to*. This helper also works for deprecating methods. + """ + + def __init__(self, *args, **kwargs): + self.deprecator = deprecated(*args, **kwargs) + + def __set_name__(self, owner, name): + setattr(owner, name, self.deprecator( + property(lambda self: getattr(self, f"_{name}"), + lambda self, value: setattr(self, f"_{name}", value)), + name=name)) + + +# Used by _copy_docstring_and_deprecators to redecorate pyplot wrappers and +# boilerplate.py to retrieve original signatures. It may seem natural to store +# this information as an attribute on the wrapper, but if the wrapper gets +# itself functools.wraps()ed, then such attributes are silently propagated to +# the outer wrapper, which is not desired. +DECORATORS = {} + + +def rename_parameter(since, old, new, func=None): + """ + Decorator indicating that parameter *old* of *func* is renamed to *new*. + + The actual implementation of *func* should use *new*, not *old*. If *old* + is passed to *func*, a DeprecationWarning is emitted, and its value is + used, even if *new* is also passed by keyword (this is to simplify pyplot + wrapper functions, which always pass *new* explicitly to the Axes method). + If *new* is also passed but positionally, a TypeError will be raised by the + underlying function during argument binding. + + Examples + -------- + :: + + @_api.rename_parameter("3.1", "bad_name", "good_name") + def func(good_name): ... + """ + + decorator = functools.partial(rename_parameter, since, old, new) + + if func is None: + return decorator + + signature = inspect.signature(func) + assert old not in signature.parameters, ( + f"Matplotlib internal error: {old!r} cannot be a parameter for " + f"{func.__name__}()") + assert new in signature.parameters, ( + f"Matplotlib internal error: {new!r} must be a parameter for " + f"{func.__name__}()") + + @functools.wraps(func) + def wrapper(*args, **kwargs): + if old in kwargs: + warn_deprecated( + since, message=f"The {old!r} parameter of {func.__name__}() " + f"has been renamed {new!r} since Matplotlib {since}; support " + f"for the old name will be dropped %(removal)s.") + kwargs[new] = kwargs.pop(old) + return func(*args, **kwargs) + + # wrapper() must keep the same documented signature as func(): if we + # instead made both *old* and *new* appear in wrapper()'s signature, they + # would both show up in the pyplot function for an Axes method as well and + # pyplot would explicitly pass both arguments to the Axes method. + + DECORATORS[wrapper] = decorator + return wrapper + + +class _deprecated_parameter_class: + def __repr__(self): + return "" + + +_deprecated_parameter = _deprecated_parameter_class() + + +def delete_parameter(since, name, func=None, **kwargs): + """ + Decorator indicating that parameter *name* of *func* is being deprecated. + + The actual implementation of *func* should keep the *name* parameter in its + signature, or accept a ``**kwargs`` argument (through which *name* would be + passed). + + Parameters that come after the deprecated parameter effectively become + keyword-only (as they cannot be passed positionally without triggering the + DeprecationWarning on the deprecated parameter), and should be marked as + such after the deprecation period has passed and the deprecated parameter + is removed. + + Parameters other than *since*, *name*, and *func* are keyword-only and + forwarded to `.warn_deprecated`. + + Examples + -------- + :: + + @_api.delete_parameter("3.1", "unused") + def func(used_arg, other_arg, unused, more_args): ... + """ + + decorator = functools.partial(delete_parameter, since, name, **kwargs) + + if func is None: + return decorator + + signature = inspect.signature(func) + # Name of `**kwargs` parameter of the decorated function, typically + # "kwargs" if such a parameter exists, or None if the decorated function + # doesn't accept `**kwargs`. + kwargs_name = next((param.name for param in signature.parameters.values() + if param.kind == inspect.Parameter.VAR_KEYWORD), None) + if name in signature.parameters: + kind = signature.parameters[name].kind + is_varargs = kind is inspect.Parameter.VAR_POSITIONAL + is_varkwargs = kind is inspect.Parameter.VAR_KEYWORD + if not is_varargs and not is_varkwargs: + name_idx = ( + # Deprecated parameter can't be passed positionally. + math.inf if kind is inspect.Parameter.KEYWORD_ONLY + # If call site has no more than this number of parameters, the + # deprecated parameter can't have been passed positionally. + else [*signature.parameters].index(name)) + func.__signature__ = signature = signature.replace(parameters=[ + param.replace(default=_deprecated_parameter) + if param.name == name else param + for param in signature.parameters.values()]) + else: + name_idx = -1 # Deprecated parameter can always have been passed. + else: + is_varargs = is_varkwargs = False + # Deprecated parameter can't be passed positionally. + name_idx = math.inf + assert kwargs_name, ( + f"Matplotlib internal error: {name!r} must be a parameter for " + f"{func.__name__}()") + + addendum = kwargs.pop('addendum', None) + + @functools.wraps(func) + def wrapper(*inner_args, **inner_kwargs): + if len(inner_args) <= name_idx and name not in inner_kwargs: + # Early return in the simple, non-deprecated case (much faster than + # calling bind()). + return func(*inner_args, **inner_kwargs) + arguments = signature.bind(*inner_args, **inner_kwargs).arguments + if is_varargs and arguments.get(name): + warn_deprecated( + since, message=f"Additional positional arguments to " + f"{func.__name__}() are deprecated since %(since)s and " + f"support for them will be removed %(removal)s.") + elif is_varkwargs and arguments.get(name): + warn_deprecated( + since, message=f"Additional keyword arguments to " + f"{func.__name__}() are deprecated since %(since)s and " + f"support for them will be removed %(removal)s.") + # We cannot just check `name not in arguments` because the pyplot + # wrappers always pass all arguments explicitly. + elif any(name in d and d[name] != _deprecated_parameter + for d in [arguments, arguments.get(kwargs_name, {})]): + deprecation_addendum = ( + f"If any parameter follows {name!r}, they should be passed as " + f"keyword, not positionally.") + warn_deprecated( + since, + name=repr(name), + obj_type=f"parameter of {func.__name__}()", + addendum=(addendum + " " + deprecation_addendum) if addendum + else deprecation_addendum, + **kwargs) + return func(*inner_args, **inner_kwargs) + + DECORATORS[wrapper] = decorator + return wrapper + + +def make_keyword_only(since, name, func=None): + """ + Decorator indicating that passing parameter *name* (or any of the following + ones) positionally to *func* is being deprecated. + + When used on a method that has a pyplot wrapper, this should be the + outermost decorator, so that :file:`boilerplate.py` can access the original + signature. + """ + + decorator = functools.partial(make_keyword_only, since, name) + + if func is None: + return decorator + + signature = inspect.signature(func) + POK = inspect.Parameter.POSITIONAL_OR_KEYWORD + KWO = inspect.Parameter.KEYWORD_ONLY + assert (name in signature.parameters + and signature.parameters[name].kind == POK), ( + f"Matplotlib internal error: {name!r} must be a positional-or-keyword " + f"parameter for {func.__name__}()") + names = [*signature.parameters] + name_idx = names.index(name) + kwonly = [name for name in names[name_idx:] + if signature.parameters[name].kind == POK] + + @functools.wraps(func) + def wrapper(*args, **kwargs): + # Don't use signature.bind here, as it would fail when stacked with + # rename_parameter and an "old" argument name is passed in + # (signature.bind would fail, but the actual call would succeed). + if len(args) > name_idx: + warn_deprecated( + since, message="Passing the %(name)s %(obj_type)s " + "positionally is deprecated since Matplotlib %(since)s; the " + "parameter will become keyword-only %(removal)s.", + name=name, obj_type=f"parameter of {func.__name__}()") + return func(*args, **kwargs) + + # Don't modify *func*'s signature, as boilerplate.py needs it. + wrapper.__signature__ = signature.replace(parameters=[ + param.replace(kind=KWO) if param.name in kwonly else param + for param in signature.parameters.values()]) + DECORATORS[wrapper] = decorator + return wrapper + + +def deprecate_method_override(method, obj, *, allow_empty=False, **kwargs): + """ + Return ``obj.method`` with a deprecation if it was overridden, else None. + + Parameters + ---------- + method + An unbound method, i.e. an expression of the form + ``Class.method_name``. Remember that within the body of a method, one + can always use ``__class__`` to refer to the class that is currently + being defined. + obj + Either an object of the class where *method* is defined, or a subclass + of that class. + allow_empty : bool, default: False + Whether to allow overrides by "empty" methods without emitting a + warning. + **kwargs + Additional parameters passed to `warn_deprecated` to generate the + deprecation warning; must at least include the "since" key. + """ + + def empty(): pass + def empty_with_docstring(): """doc""" + + name = method.__name__ + bound_child = getattr(obj, name) + bound_base = ( + method # If obj is a class, then we need to use unbound methods. + if isinstance(bound_child, type(empty)) and isinstance(obj, type) + else method.__get__(obj)) + if (bound_child != bound_base + and (not allow_empty + or (getattr(getattr(bound_child, "__code__", None), + "co_code", None) + not in [empty.__code__.co_code, + empty_with_docstring.__code__.co_code]))): + warn_deprecated(**{"name": name, "obj_type": "method", **kwargs}) + return bound_child + return None + + +@contextlib.contextmanager +def suppress_matplotlib_deprecation_warning(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", MatplotlibDeprecationWarning) + yield diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_blocking_input.py b/.venv/lib/python3.9/site-packages/matplotlib/_blocking_input.py new file mode 100644 index 00000000..45f07757 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_blocking_input.py @@ -0,0 +1,30 @@ +def blocking_input_loop(figure, event_names, timeout, handler): + """ + Run *figure*'s event loop while listening to interactive events. + + The events listed in *event_names* are passed to *handler*. + + This function is used to implement `.Figure.waitforbuttonpress`, + `.Figure.ginput`, and `.Axes.clabel`. + + Parameters + ---------- + figure : `~matplotlib.figure.Figure` + event_names : list of str + The names of the events passed to *handler*. + timeout : float + If positive, the event loop is stopped after *timeout* seconds. + handler : Callable[[Event], Any] + Function called for each event; it can force an early exit of the event + loop by calling ``canvas.stop_event_loop()``. + """ + if figure.canvas.manager: + figure.show() # Ensure that the figure is shown if we are managing it. + # Connect the events to the on_event function call. + cids = [figure.canvas.mpl_connect(name, handler) for name in event_names] + try: + figure.canvas.start_event_loop(timeout) # Start event loop. + finally: # Run even on exception like ctrl-c. + # Disconnect the callbacks. + for cid in cids: + figure.canvas.mpl_disconnect(cid) diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_c_internal_utils.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/matplotlib/_c_internal_utils.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..07af37fa Binary files /dev/null and b/.venv/lib/python3.9/site-packages/matplotlib/_c_internal_utils.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_cm.py b/.venv/lib/python3.9/site-packages/matplotlib/_cm.py new file mode 100644 index 00000000..586417d5 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_cm.py @@ -0,0 +1,1440 @@ +""" +Nothing here but dictionaries for generating LinearSegmentedColormaps, +and a dictionary of these dictionaries. + +Documentation for each is in pyplot.colormaps(). Please update this +with the purpose and type of your colormap if you add data for one here. +""" + +from functools import partial + +import numpy as np + +_binary_data = { + 'red': ((0., 1., 1.), (1., 0., 0.)), + 'green': ((0., 1., 1.), (1., 0., 0.)), + 'blue': ((0., 1., 1.), (1., 0., 0.)) + } + +_autumn_data = {'red': ((0., 1.0, 1.0), (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'blue': ((0., 0., 0.), (1.0, 0., 0.))} + +_bone_data = {'red': ((0., 0., 0.), + (0.746032, 0.652778, 0.652778), + (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), + (0.365079, 0.319444, 0.319444), + (0.746032, 0.777778, 0.777778), + (1.0, 1.0, 1.0)), + 'blue': ((0., 0., 0.), + (0.365079, 0.444444, 0.444444), + (1.0, 1.0, 1.0))} + +_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'green': ((0., 1., 1.), (1.0, 0., 0.)), + 'blue': ((0., 1., 1.), (1.0, 1., 1.))} + +_copper_data = {'red': ((0., 0., 0.), + (0.809524, 1.000000, 1.000000), + (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), + (1.0, 0.7812, 0.7812)), + 'blue': ((0., 0., 0.), + (1.0, 0.4975, 0.4975))} + +def _flag_red(x): return 0.75 * np.sin((x * 31.5 + 0.25) * np.pi) + 0.5 +def _flag_green(x): return np.sin(x * 31.5 * np.pi) +def _flag_blue(x): return 0.75 * np.sin((x * 31.5 - 0.25) * np.pi) + 0.5 +_flag_data = {'red': _flag_red, 'green': _flag_green, 'blue': _flag_blue} + +def _prism_red(x): return 0.75 * np.sin((x * 20.9 + 0.25) * np.pi) + 0.67 +def _prism_green(x): return 0.75 * np.sin((x * 20.9 - 0.25) * np.pi) + 0.33 +def _prism_blue(x): return -1.1 * np.sin((x * 20.9) * np.pi) +_prism_data = {'red': _prism_red, 'green': _prism_green, 'blue': _prism_blue} + +def _ch_helper(gamma, s, r, h, p0, p1, x): + """Helper function for generating picklable cubehelix colormaps.""" + # Apply gamma factor to emphasise low or high intensity values + xg = x ** gamma + # Calculate amplitude and angle of deviation from the black to white + # diagonal in the plane of constant perceived intensity. + a = h * xg * (1 - xg) / 2 + phi = 2 * np.pi * (s / 3 + r * x) + return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi)) + +def cubehelix(gamma=1.0, s=0.5, r=-1.5, h=1.0): + """ + Return custom data dictionary of (r, g, b) conversion functions, which can + be used with :func:`register_cmap`, for the cubehelix color scheme. + + Unlike most other color schemes cubehelix was designed by D.A. Green to + be monotonically increasing in terms of perceived brightness. + Also, when printed on a black and white postscript printer, the scheme + results in a greyscale with monotonically increasing brightness. + This color scheme is named cubehelix because the (r, g, b) values produced + can be visualised as a squashed helix around the diagonal in the + (r, g, b) color cube. + + For a unit color cube (i.e. 3D coordinates for (r, g, b) each in the + range 0 to 1) the color scheme starts at (r, g, b) = (0, 0, 0), i.e. black, + and finishes at (r, g, b) = (1, 1, 1), i.e. white. For some fraction *x*, + between 0 and 1, the color is the corresponding grey value at that + fraction along the black to white diagonal (x, x, x) plus a color + element. This color element is calculated in a plane of constant + perceived intensity and controlled by the following parameters. + + Parameters + ---------- + gamma : float, default: 1 + Gamma factor emphasizing either low intensity values (gamma < 1), or + high intensity values (gamma > 1). + s : float, default: 0.5 (purple) + The starting color. + r : float, default: -1.5 + The number of r, g, b rotations in color that are made from the start + to the end of the color scheme. The default of -1.5 corresponds to -> + B -> G -> R -> B. + h : float, default: 1 + The hue, i.e. how saturated the colors are. If this parameter is zero + then the color scheme is purely a greyscale. + """ + return {'red': partial(_ch_helper, gamma, s, r, h, -0.14861, 1.78277), + 'green': partial(_ch_helper, gamma, s, r, h, -0.29227, -0.90649), + 'blue': partial(_ch_helper, gamma, s, r, h, 1.97294, 0.0)} + +_cubehelix_data = cubehelix() + +_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0)) +_brg_data = ((0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)) + +# Gnuplot palette functions +def _g0(x): return 0 +def _g1(x): return 0.5 +def _g2(x): return 1 +def _g3(x): return x +def _g4(x): return x ** 2 +def _g5(x): return x ** 3 +def _g6(x): return x ** 4 +def _g7(x): return np.sqrt(x) +def _g8(x): return np.sqrt(np.sqrt(x)) +def _g9(x): return np.sin(x * np.pi / 2) +def _g10(x): return np.cos(x * np.pi / 2) +def _g11(x): return np.abs(x - 0.5) +def _g12(x): return (2 * x - 1) ** 2 +def _g13(x): return np.sin(x * np.pi) +def _g14(x): return np.abs(np.cos(x * np.pi)) +def _g15(x): return np.sin(x * 2 * np.pi) +def _g16(x): return np.cos(x * 2 * np.pi) +def _g17(x): return np.abs(np.sin(x * 2 * np.pi)) +def _g18(x): return np.abs(np.cos(x * 2 * np.pi)) +def _g19(x): return np.abs(np.sin(x * 4 * np.pi)) +def _g20(x): return np.abs(np.cos(x * 4 * np.pi)) +def _g21(x): return 3 * x +def _g22(x): return 3 * x - 1 +def _g23(x): return 3 * x - 2 +def _g24(x): return np.abs(3 * x - 1) +def _g25(x): return np.abs(3 * x - 2) +def _g26(x): return (3 * x - 1) / 2 +def _g27(x): return (3 * x - 2) / 2 +def _g28(x): return np.abs((3 * x - 1) / 2) +def _g29(x): return np.abs((3 * x - 2) / 2) +def _g30(x): return x / 0.32 - 0.78125 +def _g31(x): return 2 * x - 0.84 +def _g32(x): + ret = np.zeros(len(x)) + m = (x < 0.25) + ret[m] = 4 * x[m] + m = (x >= 0.25) & (x < 0.92) + ret[m] = -2 * x[m] + 1.84 + m = (x >= 0.92) + ret[m] = x[m] / 0.08 - 11.5 + return ret +def _g33(x): return np.abs(2 * x - 0.5) +def _g34(x): return 2 * x +def _g35(x): return 2 * x - 0.5 +def _g36(x): return 2 * x - 1 + +gfunc = {i: globals()["_g{}".format(i)] for i in range(37)} + +_gnuplot_data = { + 'red': gfunc[7], + 'green': gfunc[5], + 'blue': gfunc[15], +} + +_gnuplot2_data = { + 'red': gfunc[30], + 'green': gfunc[31], + 'blue': gfunc[32], +} + +_ocean_data = { + 'red': gfunc[23], + 'green': gfunc[28], + 'blue': gfunc[3], +} + +_afmhot_data = { + 'red': gfunc[34], + 'green': gfunc[35], + 'blue': gfunc[36], +} + +_rainbow_data = { + 'red': gfunc[33], + 'green': gfunc[13], + 'blue': gfunc[10], +} + +_seismic_data = ( + (0.0, 0.0, 0.3), (0.0, 0.0, 1.0), + (1.0, 1.0, 1.0), (1.0, 0.0, 0.0), + (0.5, 0.0, 0.0)) + +_terrain_data = ( + (0.00, (0.2, 0.2, 0.6)), + (0.15, (0.0, 0.6, 1.0)), + (0.25, (0.0, 0.8, 0.4)), + (0.50, (1.0, 1.0, 0.6)), + (0.75, (0.5, 0.36, 0.33)), + (1.00, (1.0, 1.0, 1.0))) + +_gray_data = {'red': ((0., 0, 0), (1., 1, 1)), + 'green': ((0., 0, 0), (1., 1, 1)), + 'blue': ((0., 0, 0), (1., 1, 1))} + +_hot_data = {'red': ((0., 0.0416, 0.0416), + (0.365079, 1.000000, 1.000000), + (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), + (0.365079, 0.000000, 0.000000), + (0.746032, 1.000000, 1.000000), + (1.0, 1.0, 1.0)), + 'blue': ((0., 0., 0.), + (0.746032, 0.000000, 0.000000), + (1.0, 1.0, 1.0))} + +_hsv_data = {'red': ((0., 1., 1.), + (0.158730, 1.000000, 1.000000), + (0.174603, 0.968750, 0.968750), + (0.333333, 0.031250, 0.031250), + (0.349206, 0.000000, 0.000000), + (0.666667, 0.000000, 0.000000), + (0.682540, 0.031250, 0.031250), + (0.841270, 0.968750, 0.968750), + (0.857143, 1.000000, 1.000000), + (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), + (0.158730, 0.937500, 0.937500), + (0.174603, 1.000000, 1.000000), + (0.507937, 1.000000, 1.000000), + (0.666667, 0.062500, 0.062500), + (0.682540, 0.000000, 0.000000), + (1.0, 0., 0.)), + 'blue': ((0., 0., 0.), + (0.333333, 0.000000, 0.000000), + (0.349206, 0.062500, 0.062500), + (0.507937, 1.000000, 1.000000), + (0.841270, 1.000000, 1.000000), + (0.857143, 0.937500, 0.937500), + (1.0, 0.09375, 0.09375))} + +_jet_data = {'red': ((0.00, 0, 0), + (0.35, 0, 0), + (0.66, 1, 1), + (0.89, 1, 1), + (1.00, 0.5, 0.5)), + 'green': ((0.000, 0, 0), + (0.125, 0, 0), + (0.375, 1, 1), + (0.640, 1, 1), + (0.910, 0, 0), + (1.000, 0, 0)), + 'blue': ((0.00, 0.5, 0.5), + (0.11, 1, 1), + (0.34, 1, 1), + (0.65, 0, 0), + (1.00, 0, 0))} + +_pink_data = {'red': ((0., 0.1178, 0.1178), (0.015873, 0.195857, 0.195857), + (0.031746, 0.250661, 0.250661), + (0.047619, 0.295468, 0.295468), + (0.063492, 0.334324, 0.334324), + (0.079365, 0.369112, 0.369112), + (0.095238, 0.400892, 0.400892), + (0.111111, 0.430331, 0.430331), + (0.126984, 0.457882, 0.457882), + (0.142857, 0.483867, 0.483867), + (0.158730, 0.508525, 0.508525), + (0.174603, 0.532042, 0.532042), + (0.190476, 0.554563, 0.554563), + (0.206349, 0.576204, 0.576204), + (0.222222, 0.597061, 0.597061), + (0.238095, 0.617213, 0.617213), + (0.253968, 0.636729, 0.636729), + (0.269841, 0.655663, 0.655663), + (0.285714, 0.674066, 0.674066), + (0.301587, 0.691980, 0.691980), + (0.317460, 0.709441, 0.709441), + (0.333333, 0.726483, 0.726483), + (0.349206, 0.743134, 0.743134), + (0.365079, 0.759421, 0.759421), + (0.380952, 0.766356, 0.766356), + (0.396825, 0.773229, 0.773229), + (0.412698, 0.780042, 0.780042), + (0.428571, 0.786796, 0.786796), + (0.444444, 0.793492, 0.793492), + (0.460317, 0.800132, 0.800132), + (0.476190, 0.806718, 0.806718), + (0.492063, 0.813250, 0.813250), + (0.507937, 0.819730, 0.819730), + (0.523810, 0.826160, 0.826160), + (0.539683, 0.832539, 0.832539), + (0.555556, 0.838870, 0.838870), + (0.571429, 0.845154, 0.845154), + (0.587302, 0.851392, 0.851392), + (0.603175, 0.857584, 0.857584), + (0.619048, 0.863731, 0.863731), + (0.634921, 0.869835, 0.869835), + (0.650794, 0.875897, 0.875897), + (0.666667, 0.881917, 0.881917), + (0.682540, 0.887896, 0.887896), + (0.698413, 0.893835, 0.893835), + (0.714286, 0.899735, 0.899735), + (0.730159, 0.905597, 0.905597), + (0.746032, 0.911421, 0.911421), + (0.761905, 0.917208, 0.917208), + (0.777778, 0.922958, 0.922958), + (0.793651, 0.928673, 0.928673), + (0.809524, 0.934353, 0.934353), + (0.825397, 0.939999, 0.939999), + (0.841270, 0.945611, 0.945611), + (0.857143, 0.951190, 0.951190), + (0.873016, 0.956736, 0.956736), + (0.888889, 0.962250, 0.962250), + (0.904762, 0.967733, 0.967733), + (0.920635, 0.973185, 0.973185), + (0.936508, 0.978607, 0.978607), + (0.952381, 0.983999, 0.983999), + (0.968254, 0.989361, 0.989361), + (0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), (0.015873, 0.102869, 0.102869), + (0.031746, 0.145479, 0.145479), + (0.047619, 0.178174, 0.178174), + (0.063492, 0.205738, 0.205738), + (0.079365, 0.230022, 0.230022), + (0.095238, 0.251976, 0.251976), + (0.111111, 0.272166, 0.272166), + (0.126984, 0.290957, 0.290957), + (0.142857, 0.308607, 0.308607), + (0.158730, 0.325300, 0.325300), + (0.174603, 0.341178, 0.341178), + (0.190476, 0.356348, 0.356348), + (0.206349, 0.370899, 0.370899), + (0.222222, 0.384900, 0.384900), + (0.238095, 0.398410, 0.398410), + (0.253968, 0.411476, 0.411476), + (0.269841, 0.424139, 0.424139), + (0.285714, 0.436436, 0.436436), + (0.301587, 0.448395, 0.448395), + (0.317460, 0.460044, 0.460044), + (0.333333, 0.471405, 0.471405), + (0.349206, 0.482498, 0.482498), + (0.365079, 0.493342, 0.493342), + (0.380952, 0.517549, 0.517549), + (0.396825, 0.540674, 0.540674), + (0.412698, 0.562849, 0.562849), + (0.428571, 0.584183, 0.584183), + (0.444444, 0.604765, 0.604765), + (0.460317, 0.624669, 0.624669), + (0.476190, 0.643958, 0.643958), + (0.492063, 0.662687, 0.662687), + (0.507937, 0.680900, 0.680900), + (0.523810, 0.698638, 0.698638), + (0.539683, 0.715937, 0.715937), + (0.555556, 0.732828, 0.732828), + (0.571429, 0.749338, 0.749338), + (0.587302, 0.765493, 0.765493), + (0.603175, 0.781313, 0.781313), + (0.619048, 0.796819, 0.796819), + (0.634921, 0.812029, 0.812029), + (0.650794, 0.826960, 0.826960), + (0.666667, 0.841625, 0.841625), + (0.682540, 0.856040, 0.856040), + (0.698413, 0.870216, 0.870216), + (0.714286, 0.884164, 0.884164), + (0.730159, 0.897896, 0.897896), + (0.746032, 0.911421, 0.911421), + (0.761905, 0.917208, 0.917208), + (0.777778, 0.922958, 0.922958), + (0.793651, 0.928673, 0.928673), + (0.809524, 0.934353, 0.934353), + (0.825397, 0.939999, 0.939999), + (0.841270, 0.945611, 0.945611), + (0.857143, 0.951190, 0.951190), + (0.873016, 0.956736, 0.956736), + (0.888889, 0.962250, 0.962250), + (0.904762, 0.967733, 0.967733), + (0.920635, 0.973185, 0.973185), + (0.936508, 0.978607, 0.978607), + (0.952381, 0.983999, 0.983999), + (0.968254, 0.989361, 0.989361), + (0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)), + 'blue': ((0., 0., 0.), (0.015873, 0.102869, 0.102869), + (0.031746, 0.145479, 0.145479), + (0.047619, 0.178174, 0.178174), + (0.063492, 0.205738, 0.205738), + (0.079365, 0.230022, 0.230022), + (0.095238, 0.251976, 0.251976), + (0.111111, 0.272166, 0.272166), + (0.126984, 0.290957, 0.290957), + (0.142857, 0.308607, 0.308607), + (0.158730, 0.325300, 0.325300), + (0.174603, 0.341178, 0.341178), + (0.190476, 0.356348, 0.356348), + (0.206349, 0.370899, 0.370899), + (0.222222, 0.384900, 0.384900), + (0.238095, 0.398410, 0.398410), + (0.253968, 0.411476, 0.411476), + (0.269841, 0.424139, 0.424139), + (0.285714, 0.436436, 0.436436), + (0.301587, 0.448395, 0.448395), + (0.317460, 0.460044, 0.460044), + (0.333333, 0.471405, 0.471405), + (0.349206, 0.482498, 0.482498), + (0.365079, 0.493342, 0.493342), + (0.380952, 0.503953, 0.503953), + (0.396825, 0.514344, 0.514344), + (0.412698, 0.524531, 0.524531), + (0.428571, 0.534522, 0.534522), + (0.444444, 0.544331, 0.544331), + (0.460317, 0.553966, 0.553966), + (0.476190, 0.563436, 0.563436), + (0.492063, 0.572750, 0.572750), + (0.507937, 0.581914, 0.581914), + (0.523810, 0.590937, 0.590937), + (0.539683, 0.599824, 0.599824), + (0.555556, 0.608581, 0.608581), + (0.571429, 0.617213, 0.617213), + (0.587302, 0.625727, 0.625727), + (0.603175, 0.634126, 0.634126), + (0.619048, 0.642416, 0.642416), + (0.634921, 0.650600, 0.650600), + (0.650794, 0.658682, 0.658682), + (0.666667, 0.666667, 0.666667), + (0.682540, 0.674556, 0.674556), + (0.698413, 0.682355, 0.682355), + (0.714286, 0.690066, 0.690066), + (0.730159, 0.697691, 0.697691), + (0.746032, 0.705234, 0.705234), + (0.761905, 0.727166, 0.727166), + (0.777778, 0.748455, 0.748455), + (0.793651, 0.769156, 0.769156), + (0.809524, 0.789314, 0.789314), + (0.825397, 0.808969, 0.808969), + (0.841270, 0.828159, 0.828159), + (0.857143, 0.846913, 0.846913), + (0.873016, 0.865261, 0.865261), + (0.888889, 0.883229, 0.883229), + (0.904762, 0.900837, 0.900837), + (0.920635, 0.918109, 0.918109), + (0.936508, 0.935061, 0.935061), + (0.952381, 0.951711, 0.951711), + (0.968254, 0.968075, 0.968075), + (0.984127, 0.984167, 0.984167), (1.0, 1.0, 1.0))} + +_spring_data = {'red': ((0., 1., 1.), (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'blue': ((0., 1., 1.), (1.0, 0.0, 0.0))} + + +_summer_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'green': ((0., 0.5, 0.5), (1.0, 1.0, 1.0)), + 'blue': ((0., 0.4, 0.4), (1.0, 0.4, 0.4))} + + +_winter_data = {'red': ((0., 0., 0.), (1.0, 0.0, 0.0)), + 'green': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'blue': ((0., 1., 1.), (1.0, 0.5, 0.5))} + +_nipy_spectral_data = { + 'red': [ + (0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667), + (0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0), + (0.20, 0.0, 0.0), (0.25, 0.0, 0.0), + (0.30, 0.0, 0.0), (0.35, 0.0, 0.0), + (0.40, 0.0, 0.0), (0.45, 0.0, 0.0), + (0.50, 0.0, 0.0), (0.55, 0.0, 0.0), + (0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333), + (0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0), + (0.80, 1.0, 1.0), (0.85, 1.0, 1.0), + (0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80), + (1.0, 0.80, 0.80), + ], + 'green': [ + (0.0, 0.0, 0.0), (0.05, 0.0, 0.0), + (0.10, 0.0, 0.0), (0.15, 0.0, 0.0), + (0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667), + (0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667), + (0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000), + (0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667), + (0.60, 1.0, 1.0), (0.65, 1.0, 1.0), + (0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000), + (0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0), + (0.90, 0.0, 0.0), (0.95, 0.0, 0.0), + (1.0, 0.80, 0.80), + ], + 'blue': [ + (0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333), + (0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667), + (0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667), + (0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667), + (0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0), + (0.5, 0.0, 0.0), (0.55, 0.0, 0.0), + (0.60, 0.0, 0.0), (0.65, 0.0, 0.0), + (0.70, 0.0, 0.0), (0.75, 0.0, 0.0), + (0.80, 0.0, 0.0), (0.85, 0.0, 0.0), + (0.90, 0.0, 0.0), (0.95, 0.0, 0.0), + (1.0, 0.80, 0.80), + ], +} + + +# 34 colormaps based on color specifications and designs +# developed by Cynthia Brewer (https://colorbrewer2.org/). +# The ColorBrewer palettes have been included under the terms +# of an Apache-stype license (for details, see the file +# LICENSE_COLORBREWER in the license directory of the matplotlib +# source distribution). + +# RGB values taken from Brewer's Excel sheet, divided by 255 + +_Blues_data = ( + (0.96862745098039216, 0.98431372549019602, 1.0 ), + (0.87058823529411766, 0.92156862745098034, 0.96862745098039216), + (0.77647058823529413, 0.85882352941176465, 0.93725490196078431), + (0.61960784313725492, 0.792156862745098 , 0.88235294117647056), + (0.41960784313725491, 0.68235294117647061, 0.83921568627450982), + (0.25882352941176473, 0.5725490196078431 , 0.77647058823529413), + (0.12941176470588237, 0.44313725490196076, 0.70980392156862748), + (0.03137254901960784, 0.31764705882352939, 0.61176470588235299), + (0.03137254901960784, 0.18823529411764706, 0.41960784313725491) + ) + +_BrBG_data = ( + (0.32941176470588235, 0.18823529411764706, 0.0196078431372549 ), + (0.5490196078431373 , 0.31764705882352939, 0.0392156862745098 ), + (0.74901960784313726, 0.50588235294117645, 0.17647058823529413), + (0.87450980392156863, 0.76078431372549016, 0.49019607843137253), + (0.96470588235294119, 0.90980392156862744, 0.76470588235294112), + (0.96078431372549022, 0.96078431372549022, 0.96078431372549022), + (0.7803921568627451 , 0.91764705882352937, 0.89803921568627454), + (0.50196078431372548, 0.80392156862745101, 0.75686274509803919), + (0.20784313725490197, 0.59215686274509804, 0.5607843137254902 ), + (0.00392156862745098, 0.4 , 0.36862745098039218), + (0.0 , 0.23529411764705882, 0.18823529411764706) + ) + +_BuGn_data = ( + (0.96862745098039216, 0.9882352941176471 , 0.99215686274509807), + (0.89803921568627454, 0.96078431372549022, 0.97647058823529409), + (0.8 , 0.92549019607843142, 0.90196078431372551), + (0.6 , 0.84705882352941175, 0.78823529411764703), + (0.4 , 0.76078431372549016, 0.64313725490196083), + (0.25490196078431371, 0.68235294117647061, 0.46274509803921571), + (0.13725490196078433, 0.54509803921568623, 0.27058823529411763), + (0.0 , 0.42745098039215684, 0.17254901960784313), + (0.0 , 0.26666666666666666, 0.10588235294117647) + ) + +_BuPu_data = ( + (0.96862745098039216, 0.9882352941176471 , 0.99215686274509807), + (0.8784313725490196 , 0.92549019607843142, 0.95686274509803926), + (0.74901960784313726, 0.82745098039215681, 0.90196078431372551), + (0.61960784313725492, 0.73725490196078436, 0.85490196078431369), + (0.5490196078431373 , 0.58823529411764708, 0.77647058823529413), + (0.5490196078431373 , 0.41960784313725491, 0.69411764705882351), + (0.53333333333333333, 0.25490196078431371, 0.61568627450980395), + (0.50588235294117645, 0.05882352941176471, 0.48627450980392156), + (0.30196078431372547, 0.0 , 0.29411764705882354) + ) + +_GnBu_data = ( + (0.96862745098039216, 0.9882352941176471 , 0.94117647058823528), + (0.8784313725490196 , 0.95294117647058818, 0.85882352941176465), + (0.8 , 0.92156862745098034, 0.77254901960784317), + (0.6588235294117647 , 0.8666666666666667 , 0.70980392156862748), + (0.4823529411764706 , 0.8 , 0.7686274509803922 ), + (0.30588235294117649, 0.70196078431372544, 0.82745098039215681), + (0.16862745098039217, 0.5490196078431373 , 0.74509803921568629), + (0.03137254901960784, 0.40784313725490196, 0.67450980392156867), + (0.03137254901960784, 0.25098039215686274, 0.50588235294117645) + ) + +_Greens_data = ( + (0.96862745098039216, 0.9882352941176471 , 0.96078431372549022), + (0.89803921568627454, 0.96078431372549022, 0.8784313725490196 ), + (0.7803921568627451 , 0.9137254901960784 , 0.75294117647058822), + (0.63137254901960782, 0.85098039215686272, 0.60784313725490191), + (0.45490196078431372, 0.7686274509803922 , 0.46274509803921571), + (0.25490196078431371, 0.6705882352941176 , 0.36470588235294116), + (0.13725490196078433, 0.54509803921568623, 0.27058823529411763), + (0.0 , 0.42745098039215684, 0.17254901960784313), + (0.0 , 0.26666666666666666, 0.10588235294117647) + ) + +_Greys_data = ( + (1.0 , 1.0 , 1.0 ), + (0.94117647058823528, 0.94117647058823528, 0.94117647058823528), + (0.85098039215686272, 0.85098039215686272, 0.85098039215686272), + (0.74117647058823533, 0.74117647058823533, 0.74117647058823533), + (0.58823529411764708, 0.58823529411764708, 0.58823529411764708), + (0.45098039215686275, 0.45098039215686275, 0.45098039215686275), + (0.32156862745098042, 0.32156862745098042, 0.32156862745098042), + (0.14509803921568629, 0.14509803921568629, 0.14509803921568629), + (0.0 , 0.0 , 0.0 ) + ) + +_Oranges_data = ( + (1.0 , 0.96078431372549022, 0.92156862745098034), + (0.99607843137254903, 0.90196078431372551, 0.80784313725490198), + (0.99215686274509807, 0.81568627450980391, 0.63529411764705879), + (0.99215686274509807, 0.68235294117647061, 0.41960784313725491), + (0.99215686274509807, 0.55294117647058827, 0.23529411764705882), + (0.94509803921568625, 0.41176470588235292, 0.07450980392156863), + (0.85098039215686272, 0.28235294117647058, 0.00392156862745098), + (0.65098039215686276, 0.21176470588235294, 0.01176470588235294), + (0.49803921568627452, 0.15294117647058825, 0.01568627450980392) + ) + +_OrRd_data = ( + (1.0 , 0.96862745098039216, 0.92549019607843142), + (0.99607843137254903, 0.90980392156862744, 0.78431372549019607), + (0.99215686274509807, 0.83137254901960789, 0.61960784313725492), + (0.99215686274509807, 0.73333333333333328, 0.51764705882352946), + (0.9882352941176471 , 0.55294117647058827, 0.34901960784313724), + (0.93725490196078431, 0.396078431372549 , 0.28235294117647058), + (0.84313725490196079, 0.18823529411764706, 0.12156862745098039), + (0.70196078431372544, 0.0 , 0.0 ), + (0.49803921568627452, 0.0 , 0.0 ) + ) + +_PiYG_data = ( + (0.55686274509803924, 0.00392156862745098, 0.32156862745098042), + (0.77254901960784317, 0.10588235294117647, 0.49019607843137253), + (0.87058823529411766, 0.46666666666666667, 0.68235294117647061), + (0.94509803921568625, 0.71372549019607845, 0.85490196078431369), + (0.99215686274509807, 0.8784313725490196 , 0.93725490196078431), + (0.96862745098039216, 0.96862745098039216, 0.96862745098039216), + (0.90196078431372551, 0.96078431372549022, 0.81568627450980391), + (0.72156862745098038, 0.88235294117647056, 0.52549019607843139), + (0.49803921568627452, 0.73725490196078436, 0.25490196078431371), + (0.30196078431372547, 0.5725490196078431 , 0.12941176470588237), + (0.15294117647058825, 0.39215686274509803, 0.09803921568627451) + ) + +_PRGn_data = ( + (0.25098039215686274, 0.0 , 0.29411764705882354), + (0.46274509803921571, 0.16470588235294117, 0.51372549019607838), + (0.6 , 0.4392156862745098 , 0.6705882352941176 ), + (0.76078431372549016, 0.6470588235294118 , 0.81176470588235294), + (0.90588235294117647, 0.83137254901960789, 0.90980392156862744), + (0.96862745098039216, 0.96862745098039216, 0.96862745098039216), + (0.85098039215686272, 0.94117647058823528, 0.82745098039215681), + (0.65098039215686276, 0.85882352941176465, 0.62745098039215685), + (0.35294117647058826, 0.68235294117647061, 0.38039215686274508), + (0.10588235294117647, 0.47058823529411764, 0.21568627450980393), + (0.0 , 0.26666666666666666, 0.10588235294117647) + ) + +_PuBu_data = ( + (1.0 , 0.96862745098039216, 0.98431372549019602), + (0.92549019607843142, 0.90588235294117647, 0.94901960784313721), + (0.81568627450980391, 0.81960784313725488, 0.90196078431372551), + (0.65098039215686276, 0.74117647058823533, 0.85882352941176465), + (0.45490196078431372, 0.66274509803921566, 0.81176470588235294), + (0.21176470588235294, 0.56470588235294117, 0.75294117647058822), + (0.0196078431372549 , 0.4392156862745098 , 0.69019607843137254), + (0.01568627450980392, 0.35294117647058826, 0.55294117647058827), + (0.00784313725490196, 0.2196078431372549 , 0.34509803921568627) + ) + +_PuBuGn_data = ( + (1.0 , 0.96862745098039216, 0.98431372549019602), + (0.92549019607843142, 0.88627450980392153, 0.94117647058823528), + (0.81568627450980391, 0.81960784313725488, 0.90196078431372551), + (0.65098039215686276, 0.74117647058823533, 0.85882352941176465), + (0.40392156862745099, 0.66274509803921566, 0.81176470588235294), + (0.21176470588235294, 0.56470588235294117, 0.75294117647058822), + (0.00784313725490196, 0.50588235294117645, 0.54117647058823526), + (0.00392156862745098, 0.42352941176470588, 0.34901960784313724), + (0.00392156862745098, 0.27450980392156865, 0.21176470588235294) + ) + +_PuOr_data = ( + (0.49803921568627452, 0.23137254901960785, 0.03137254901960784), + (0.70196078431372544, 0.34509803921568627, 0.02352941176470588), + (0.8784313725490196 , 0.50980392156862742, 0.07843137254901961), + (0.99215686274509807, 0.72156862745098038, 0.38823529411764707), + (0.99607843137254903, 0.8784313725490196 , 0.71372549019607845), + (0.96862745098039216, 0.96862745098039216, 0.96862745098039216), + (0.84705882352941175, 0.85490196078431369, 0.92156862745098034), + (0.69803921568627447, 0.6705882352941176 , 0.82352941176470584), + (0.50196078431372548, 0.45098039215686275, 0.67450980392156867), + (0.32941176470588235, 0.15294117647058825, 0.53333333333333333), + (0.17647058823529413, 0.0 , 0.29411764705882354) + ) + +_PuRd_data = ( + (0.96862745098039216, 0.95686274509803926, 0.97647058823529409), + (0.90588235294117647, 0.88235294117647056, 0.93725490196078431), + (0.83137254901960789, 0.72549019607843135, 0.85490196078431369), + (0.78823529411764703, 0.58039215686274515, 0.7803921568627451 ), + (0.87450980392156863, 0.396078431372549 , 0.69019607843137254), + (0.90588235294117647, 0.16078431372549021, 0.54117647058823526), + (0.80784313725490198, 0.07058823529411765, 0.33725490196078434), + (0.59607843137254901, 0.0 , 0.2627450980392157 ), + (0.40392156862745099, 0.0 , 0.12156862745098039) + ) + +_Purples_data = ( + (0.9882352941176471 , 0.98431372549019602, 0.99215686274509807), + (0.93725490196078431, 0.92941176470588238, 0.96078431372549022), + (0.85490196078431369, 0.85490196078431369, 0.92156862745098034), + (0.73725490196078436, 0.74117647058823533, 0.86274509803921573), + (0.61960784313725492, 0.60392156862745094, 0.78431372549019607), + (0.50196078431372548, 0.49019607843137253, 0.72941176470588232), + (0.41568627450980394, 0.31764705882352939, 0.63921568627450975), + (0.32941176470588235, 0.15294117647058825, 0.5607843137254902 ), + (0.24705882352941178, 0.0 , 0.49019607843137253) + ) + +_RdBu_data = ( + (0.40392156862745099, 0.0 , 0.12156862745098039), + (0.69803921568627447, 0.09411764705882353, 0.16862745098039217), + (0.83921568627450982, 0.37647058823529411, 0.30196078431372547), + (0.95686274509803926, 0.6470588235294118 , 0.50980392156862742), + (0.99215686274509807, 0.85882352941176465, 0.7803921568627451 ), + (0.96862745098039216, 0.96862745098039216, 0.96862745098039216), + (0.81960784313725488, 0.89803921568627454, 0.94117647058823528), + (0.5725490196078431 , 0.77254901960784317, 0.87058823529411766), + (0.2627450980392157 , 0.57647058823529407, 0.76470588235294112), + (0.12941176470588237, 0.4 , 0.67450980392156867), + (0.0196078431372549 , 0.18823529411764706, 0.38039215686274508) + ) + +_RdGy_data = ( + (0.40392156862745099, 0.0 , 0.12156862745098039), + (0.69803921568627447, 0.09411764705882353, 0.16862745098039217), + (0.83921568627450982, 0.37647058823529411, 0.30196078431372547), + (0.95686274509803926, 0.6470588235294118 , 0.50980392156862742), + (0.99215686274509807, 0.85882352941176465, 0.7803921568627451 ), + (1.0 , 1.0 , 1.0 ), + (0.8784313725490196 , 0.8784313725490196 , 0.8784313725490196 ), + (0.72941176470588232, 0.72941176470588232, 0.72941176470588232), + (0.52941176470588236, 0.52941176470588236, 0.52941176470588236), + (0.30196078431372547, 0.30196078431372547, 0.30196078431372547), + (0.10196078431372549, 0.10196078431372549, 0.10196078431372549) + ) + +_RdPu_data = ( + (1.0 , 0.96862745098039216, 0.95294117647058818), + (0.99215686274509807, 0.8784313725490196 , 0.86666666666666667), + (0.9882352941176471 , 0.77254901960784317, 0.75294117647058822), + (0.98039215686274506, 0.62352941176470589, 0.70980392156862748), + (0.96862745098039216, 0.40784313725490196, 0.63137254901960782), + (0.86666666666666667, 0.20392156862745098, 0.59215686274509804), + (0.68235294117647061, 0.00392156862745098, 0.49411764705882355), + (0.47843137254901963, 0.00392156862745098, 0.46666666666666667), + (0.28627450980392155, 0.0 , 0.41568627450980394) + ) + +_RdYlBu_data = ( + (0.6470588235294118 , 0.0 , 0.14901960784313725), + (0.84313725490196079, 0.18823529411764706 , 0.15294117647058825), + (0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ), + (0.99215686274509807, 0.68235294117647061 , 0.38039215686274508), + (0.99607843137254903, 0.8784313725490196 , 0.56470588235294117), + (1.0 , 1.0 , 0.74901960784313726), + (0.8784313725490196 , 0.95294117647058818 , 0.97254901960784312), + (0.6705882352941176 , 0.85098039215686272 , 0.9137254901960784 ), + (0.45490196078431372, 0.67843137254901964 , 0.81960784313725488), + (0.27058823529411763, 0.45882352941176469 , 0.70588235294117652), + (0.19215686274509805, 0.21176470588235294 , 0.58431372549019611) + ) + +_RdYlGn_data = ( + (0.6470588235294118 , 0.0 , 0.14901960784313725), + (0.84313725490196079, 0.18823529411764706 , 0.15294117647058825), + (0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ), + (0.99215686274509807, 0.68235294117647061 , 0.38039215686274508), + (0.99607843137254903, 0.8784313725490196 , 0.54509803921568623), + (1.0 , 1.0 , 0.74901960784313726), + (0.85098039215686272, 0.93725490196078431 , 0.54509803921568623), + (0.65098039215686276, 0.85098039215686272 , 0.41568627450980394), + (0.4 , 0.74117647058823533 , 0.38823529411764707), + (0.10196078431372549, 0.59607843137254901 , 0.31372549019607843), + (0.0 , 0.40784313725490196 , 0.21568627450980393) + ) + +_Reds_data = ( + (1.0 , 0.96078431372549022 , 0.94117647058823528), + (0.99607843137254903, 0.8784313725490196 , 0.82352941176470584), + (0.9882352941176471 , 0.73333333333333328 , 0.63137254901960782), + (0.9882352941176471 , 0.5725490196078431 , 0.44705882352941179), + (0.98431372549019602, 0.41568627450980394 , 0.29019607843137257), + (0.93725490196078431, 0.23137254901960785 , 0.17254901960784313), + (0.79607843137254897, 0.094117647058823528, 0.11372549019607843), + (0.6470588235294118 , 0.058823529411764705, 0.08235294117647058), + (0.40392156862745099, 0.0 , 0.05098039215686274) + ) + +_Spectral_data = ( + (0.61960784313725492, 0.003921568627450980, 0.25882352941176473), + (0.83529411764705885, 0.24313725490196078 , 0.30980392156862746), + (0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ), + (0.99215686274509807, 0.68235294117647061 , 0.38039215686274508), + (0.99607843137254903, 0.8784313725490196 , 0.54509803921568623), + (1.0 , 1.0 , 0.74901960784313726), + (0.90196078431372551, 0.96078431372549022 , 0.59607843137254901), + (0.6705882352941176 , 0.8666666666666667 , 0.64313725490196083), + (0.4 , 0.76078431372549016 , 0.6470588235294118 ), + (0.19607843137254902, 0.53333333333333333 , 0.74117647058823533), + (0.36862745098039218, 0.30980392156862746 , 0.63529411764705879) + ) + +_YlGn_data = ( + (1.0 , 1.0 , 0.89803921568627454), + (0.96862745098039216, 0.9882352941176471 , 0.72549019607843135), + (0.85098039215686272, 0.94117647058823528 , 0.63921568627450975), + (0.67843137254901964, 0.8666666666666667 , 0.55686274509803924), + (0.47058823529411764, 0.77647058823529413 , 0.47450980392156861), + (0.25490196078431371, 0.6705882352941176 , 0.36470588235294116), + (0.13725490196078433, 0.51764705882352946 , 0.2627450980392157 ), + (0.0 , 0.40784313725490196 , 0.21568627450980393), + (0.0 , 0.27058823529411763 , 0.16078431372549021) + ) + +_YlGnBu_data = ( + (1.0 , 1.0 , 0.85098039215686272), + (0.92941176470588238, 0.97254901960784312 , 0.69411764705882351), + (0.7803921568627451 , 0.9137254901960784 , 0.70588235294117652), + (0.49803921568627452, 0.80392156862745101 , 0.73333333333333328), + (0.25490196078431371, 0.71372549019607845 , 0.7686274509803922 ), + (0.11372549019607843, 0.56862745098039214 , 0.75294117647058822), + (0.13333333333333333, 0.36862745098039218 , 0.6588235294117647 ), + (0.14509803921568629, 0.20392156862745098 , 0.58039215686274515), + (0.03137254901960784, 0.11372549019607843 , 0.34509803921568627) + ) + +_YlOrBr_data = ( + (1.0 , 1.0 , 0.89803921568627454), + (1.0 , 0.96862745098039216 , 0.73725490196078436), + (0.99607843137254903, 0.8901960784313725 , 0.56862745098039214), + (0.99607843137254903, 0.7686274509803922 , 0.30980392156862746), + (0.99607843137254903, 0.6 , 0.16078431372549021), + (0.92549019607843142, 0.4392156862745098 , 0.07843137254901961), + (0.8 , 0.29803921568627451 , 0.00784313725490196), + (0.6 , 0.20392156862745098 , 0.01568627450980392), + (0.4 , 0.14509803921568629 , 0.02352941176470588) + ) + +_YlOrRd_data = ( + (1.0 , 1.0 , 0.8 ), + (1.0 , 0.92941176470588238 , 0.62745098039215685), + (0.99607843137254903, 0.85098039215686272 , 0.46274509803921571), + (0.99607843137254903, 0.69803921568627447 , 0.29803921568627451), + (0.99215686274509807, 0.55294117647058827 , 0.23529411764705882), + (0.9882352941176471 , 0.30588235294117649 , 0.16470588235294117), + (0.8901960784313725 , 0.10196078431372549 , 0.10980392156862745), + (0.74117647058823533, 0.0 , 0.14901960784313725), + (0.50196078431372548, 0.0 , 0.14901960784313725) + ) + + +# ColorBrewer's qualitative maps, implemented using ListedColormap +# for use with mpl.colors.NoNorm + +_Accent_data = ( + (0.49803921568627452, 0.78823529411764703, 0.49803921568627452), + (0.74509803921568629, 0.68235294117647061, 0.83137254901960789), + (0.99215686274509807, 0.75294117647058822, 0.52549019607843139), + (1.0, 1.0, 0.6 ), + (0.2196078431372549, 0.42352941176470588, 0.69019607843137254), + (0.94117647058823528, 0.00784313725490196, 0.49803921568627452), + (0.74901960784313726, 0.35686274509803922, 0.09019607843137254), + (0.4, 0.4, 0.4 ), + ) + +_Dark2_data = ( + (0.10588235294117647, 0.61960784313725492, 0.46666666666666667), + (0.85098039215686272, 0.37254901960784315, 0.00784313725490196), + (0.45882352941176469, 0.4392156862745098, 0.70196078431372544), + (0.90588235294117647, 0.16078431372549021, 0.54117647058823526), + (0.4, 0.65098039215686276, 0.11764705882352941), + (0.90196078431372551, 0.6705882352941176, 0.00784313725490196), + (0.65098039215686276, 0.46274509803921571, 0.11372549019607843), + (0.4, 0.4, 0.4 ), + ) + +_Paired_data = ( + (0.65098039215686276, 0.80784313725490198, 0.8901960784313725 ), + (0.12156862745098039, 0.47058823529411764, 0.70588235294117652), + (0.69803921568627447, 0.87450980392156863, 0.54117647058823526), + (0.2, 0.62745098039215685, 0.17254901960784313), + (0.98431372549019602, 0.60392156862745094, 0.6 ), + (0.8901960784313725, 0.10196078431372549, 0.10980392156862745), + (0.99215686274509807, 0.74901960784313726, 0.43529411764705883), + (1.0, 0.49803921568627452, 0.0 ), + (0.792156862745098, 0.69803921568627447, 0.83921568627450982), + (0.41568627450980394, 0.23921568627450981, 0.60392156862745094), + (1.0, 1.0, 0.6 ), + (0.69411764705882351, 0.34901960784313724, 0.15686274509803921), + ) + +_Pastel1_data = ( + (0.98431372549019602, 0.70588235294117652, 0.68235294117647061), + (0.70196078431372544, 0.80392156862745101, 0.8901960784313725 ), + (0.8, 0.92156862745098034, 0.77254901960784317), + (0.87058823529411766, 0.79607843137254897, 0.89411764705882357), + (0.99607843137254903, 0.85098039215686272, 0.65098039215686276), + (1.0, 1.0, 0.8 ), + (0.89803921568627454, 0.84705882352941175, 0.74117647058823533), + (0.99215686274509807, 0.85490196078431369, 0.92549019607843142), + (0.94901960784313721, 0.94901960784313721, 0.94901960784313721), + ) + +_Pastel2_data = ( + (0.70196078431372544, 0.88627450980392153, 0.80392156862745101), + (0.99215686274509807, 0.80392156862745101, 0.67450980392156867), + (0.79607843137254897, 0.83529411764705885, 0.90980392156862744), + (0.95686274509803926, 0.792156862745098, 0.89411764705882357), + (0.90196078431372551, 0.96078431372549022, 0.78823529411764703), + (1.0, 0.94901960784313721, 0.68235294117647061), + (0.94509803921568625, 0.88627450980392153, 0.8 ), + (0.8, 0.8, 0.8 ), + ) + +_Set1_data = ( + (0.89411764705882357, 0.10196078431372549, 0.10980392156862745), + (0.21568627450980393, 0.49411764705882355, 0.72156862745098038), + (0.30196078431372547, 0.68627450980392157, 0.29019607843137257), + (0.59607843137254901, 0.30588235294117649, 0.63921568627450975), + (1.0, 0.49803921568627452, 0.0 ), + (1.0, 1.0, 0.2 ), + (0.65098039215686276, 0.33725490196078434, 0.15686274509803921), + (0.96862745098039216, 0.50588235294117645, 0.74901960784313726), + (0.6, 0.6, 0.6), + ) + +_Set2_data = ( + (0.4, 0.76078431372549016, 0.6470588235294118 ), + (0.9882352941176471, 0.55294117647058827, 0.3843137254901961 ), + (0.55294117647058827, 0.62745098039215685, 0.79607843137254897), + (0.90588235294117647, 0.54117647058823526, 0.76470588235294112), + (0.65098039215686276, 0.84705882352941175, 0.32941176470588235), + (1.0, 0.85098039215686272, 0.18431372549019609), + (0.89803921568627454, 0.7686274509803922, 0.58039215686274515), + (0.70196078431372544, 0.70196078431372544, 0.70196078431372544), + ) + +_Set3_data = ( + (0.55294117647058827, 0.82745098039215681, 0.7803921568627451 ), + (1.0, 1.0, 0.70196078431372544), + (0.74509803921568629, 0.72941176470588232, 0.85490196078431369), + (0.98431372549019602, 0.50196078431372548, 0.44705882352941179), + (0.50196078431372548, 0.69411764705882351, 0.82745098039215681), + (0.99215686274509807, 0.70588235294117652, 0.3843137254901961 ), + (0.70196078431372544, 0.87058823529411766, 0.41176470588235292), + (0.9882352941176471, 0.80392156862745101, 0.89803921568627454), + (0.85098039215686272, 0.85098039215686272, 0.85098039215686272), + (0.73725490196078436, 0.50196078431372548, 0.74117647058823533), + (0.8, 0.92156862745098034, 0.77254901960784317), + (1.0, 0.92941176470588238, 0.43529411764705883), + ) + + +# The next 7 palettes are from the Yorick scientific visualization package, +# an evolution of the GIST package, both by David H. Munro. +# They are released under a BSD-like license (see LICENSE_YORICK in +# the license directory of the matplotlib source distribution). +# +# Most palette functions have been reduced to simple function descriptions +# by Reinier Heeres, since the rgb components were mostly straight lines. +# gist_earth_data and gist_ncar_data were simplified by a script and some +# manual effort. + +_gist_earth_data = \ +{'red': ( +(0.0, 0.0, 0.0000), +(0.2824, 0.1882, 0.1882), +(0.4588, 0.2714, 0.2714), +(0.5490, 0.4719, 0.4719), +(0.6980, 0.7176, 0.7176), +(0.7882, 0.7553, 0.7553), +(1.0000, 0.9922, 0.9922), +), 'green': ( +(0.0, 0.0, 0.0000), +(0.0275, 0.0000, 0.0000), +(0.1098, 0.1893, 0.1893), +(0.1647, 0.3035, 0.3035), +(0.2078, 0.3841, 0.3841), +(0.2824, 0.5020, 0.5020), +(0.5216, 0.6397, 0.6397), +(0.6980, 0.7171, 0.7171), +(0.7882, 0.6392, 0.6392), +(0.7922, 0.6413, 0.6413), +(0.8000, 0.6447, 0.6447), +(0.8078, 0.6481, 0.6481), +(0.8157, 0.6549, 0.6549), +(0.8667, 0.6991, 0.6991), +(0.8745, 0.7103, 0.7103), +(0.8824, 0.7216, 0.7216), +(0.8902, 0.7323, 0.7323), +(0.8980, 0.7430, 0.7430), +(0.9412, 0.8275, 0.8275), +(0.9569, 0.8635, 0.8635), +(0.9647, 0.8816, 0.8816), +(0.9961, 0.9733, 0.9733), +(1.0000, 0.9843, 0.9843), +), 'blue': ( +(0.0, 0.0, 0.0000), +(0.0039, 0.1684, 0.1684), +(0.0078, 0.2212, 0.2212), +(0.0275, 0.4329, 0.4329), +(0.0314, 0.4549, 0.4549), +(0.2824, 0.5004, 0.5004), +(0.4667, 0.2748, 0.2748), +(0.5451, 0.3205, 0.3205), +(0.7843, 0.3961, 0.3961), +(0.8941, 0.6651, 0.6651), +(1.0000, 0.9843, 0.9843), +)} + +_gist_gray_data = { + 'red': gfunc[3], + 'green': gfunc[3], + 'blue': gfunc[3], +} + +def _gist_heat_red(x): return 1.5 * x +def _gist_heat_green(x): return 2 * x - 1 +def _gist_heat_blue(x): return 4 * x - 3 +_gist_heat_data = { + 'red': _gist_heat_red, 'green': _gist_heat_green, 'blue': _gist_heat_blue} + +_gist_ncar_data = \ +{'red': ( +(0.0, 0.0, 0.0000), +(0.3098, 0.0000, 0.0000), +(0.3725, 0.3993, 0.3993), +(0.4235, 0.5003, 0.5003), +(0.5333, 1.0000, 1.0000), +(0.7922, 1.0000, 1.0000), +(0.8471, 0.6218, 0.6218), +(0.8980, 0.9235, 0.9235), +(1.0000, 0.9961, 0.9961), +), 'green': ( +(0.0, 0.0, 0.0000), +(0.0510, 0.3722, 0.3722), +(0.1059, 0.0000, 0.0000), +(0.1569, 0.7202, 0.7202), +(0.1608, 0.7537, 0.7537), +(0.1647, 0.7752, 0.7752), +(0.2157, 1.0000, 1.0000), +(0.2588, 0.9804, 0.9804), +(0.2706, 0.9804, 0.9804), +(0.3176, 1.0000, 1.0000), +(0.3686, 0.8081, 0.8081), +(0.4275, 1.0000, 1.0000), +(0.5216, 1.0000, 1.0000), +(0.6314, 0.7292, 0.7292), +(0.6863, 0.2796, 0.2796), +(0.7451, 0.0000, 0.0000), +(0.7922, 0.0000, 0.0000), +(0.8431, 0.1753, 0.1753), +(0.8980, 0.5000, 0.5000), +(1.0000, 0.9725, 0.9725), +), 'blue': ( +(0.0, 0.5020, 0.5020), +(0.0510, 0.0222, 0.0222), +(0.1098, 1.0000, 1.0000), +(0.2039, 1.0000, 1.0000), +(0.2627, 0.6145, 0.6145), +(0.3216, 0.0000, 0.0000), +(0.4157, 0.0000, 0.0000), +(0.4745, 0.2342, 0.2342), +(0.5333, 0.0000, 0.0000), +(0.5804, 0.0000, 0.0000), +(0.6314, 0.0549, 0.0549), +(0.6902, 0.0000, 0.0000), +(0.7373, 0.0000, 0.0000), +(0.7922, 0.9738, 0.9738), +(0.8000, 1.0000, 1.0000), +(0.8431, 1.0000, 1.0000), +(0.8980, 0.9341, 0.9341), +(1.0000, 0.9961, 0.9961), +)} + +_gist_rainbow_data = ( + (0.000, (1.00, 0.00, 0.16)), + (0.030, (1.00, 0.00, 0.00)), + (0.215, (1.00, 1.00, 0.00)), + (0.400, (0.00, 1.00, 0.00)), + (0.586, (0.00, 1.00, 1.00)), + (0.770, (0.00, 0.00, 1.00)), + (0.954, (1.00, 0.00, 1.00)), + (1.000, (1.00, 0.00, 0.75)) +) + +_gist_stern_data = { + 'red': ( + (0.000, 0.000, 0.000), (0.0547, 1.000, 1.000), + (0.250, 0.027, 0.250), # (0.2500, 0.250, 0.250), + (1.000, 1.000, 1.000)), + 'green': ((0, 0, 0), (1, 1, 1)), + 'blue': ( + (0.000, 0.000, 0.000), (0.500, 1.000, 1.000), + (0.735, 0.000, 0.000), (1.000, 1.000, 1.000)) +} + +def _gist_yarg(x): return 1 - x +_gist_yarg_data = {'red': _gist_yarg, 'green': _gist_yarg, 'blue': _gist_yarg} + +# This bipolar colormap was generated from CoolWarmFloat33.csv of +# "Diverging Color Maps for Scientific Visualization" by Kenneth Moreland. +# +_coolwarm_data = { + 'red': [ + (0.0, 0.2298057, 0.2298057), + (0.03125, 0.26623388, 0.26623388), + (0.0625, 0.30386891, 0.30386891), + (0.09375, 0.342804478, 0.342804478), + (0.125, 0.38301334, 0.38301334), + (0.15625, 0.424369608, 0.424369608), + (0.1875, 0.46666708, 0.46666708), + (0.21875, 0.509635204, 0.509635204), + (0.25, 0.552953156, 0.552953156), + (0.28125, 0.596262162, 0.596262162), + (0.3125, 0.639176211, 0.639176211), + (0.34375, 0.681291281, 0.681291281), + (0.375, 0.722193294, 0.722193294), + (0.40625, 0.761464949, 0.761464949), + (0.4375, 0.798691636, 0.798691636), + (0.46875, 0.833466556, 0.833466556), + (0.5, 0.865395197, 0.865395197), + (0.53125, 0.897787179, 0.897787179), + (0.5625, 0.924127593, 0.924127593), + (0.59375, 0.944468518, 0.944468518), + (0.625, 0.958852946, 0.958852946), + (0.65625, 0.96732803, 0.96732803), + (0.6875, 0.969954137, 0.969954137), + (0.71875, 0.966811177, 0.966811177), + (0.75, 0.958003065, 0.958003065), + (0.78125, 0.943660866, 0.943660866), + (0.8125, 0.923944917, 0.923944917), + (0.84375, 0.89904617, 0.89904617), + (0.875, 0.869186849, 0.869186849), + (0.90625, 0.834620542, 0.834620542), + (0.9375, 0.795631745, 0.795631745), + (0.96875, 0.752534934, 0.752534934), + (1.0, 0.705673158, 0.705673158)], + 'green': [ + (0.0, 0.298717966, 0.298717966), + (0.03125, 0.353094838, 0.353094838), + (0.0625, 0.406535296, 0.406535296), + (0.09375, 0.458757618, 0.458757618), + (0.125, 0.50941904, 0.50941904), + (0.15625, 0.558148092, 0.558148092), + (0.1875, 0.604562568, 0.604562568), + (0.21875, 0.648280772, 0.648280772), + (0.25, 0.688929332, 0.688929332), + (0.28125, 0.726149107, 0.726149107), + (0.3125, 0.759599947, 0.759599947), + (0.34375, 0.788964712, 0.788964712), + (0.375, 0.813952739, 0.813952739), + (0.40625, 0.834302879, 0.834302879), + (0.4375, 0.849786142, 0.849786142), + (0.46875, 0.860207984, 0.860207984), + (0.5, 0.86541021, 0.86541021), + (0.53125, 0.848937047, 0.848937047), + (0.5625, 0.827384882, 0.827384882), + (0.59375, 0.800927443, 0.800927443), + (0.625, 0.769767752, 0.769767752), + (0.65625, 0.734132809, 0.734132809), + (0.6875, 0.694266682, 0.694266682), + (0.71875, 0.650421156, 0.650421156), + (0.75, 0.602842431, 0.602842431), + (0.78125, 0.551750968, 0.551750968), + (0.8125, 0.49730856, 0.49730856), + (0.84375, 0.439559467, 0.439559467), + (0.875, 0.378313092, 0.378313092), + (0.90625, 0.312874446, 0.312874446), + (0.9375, 0.24128379, 0.24128379), + (0.96875, 0.157246067, 0.157246067), + (1.0, 0.01555616, 0.01555616)], + 'blue': [ + (0.0, 0.753683153, 0.753683153), + (0.03125, 0.801466763, 0.801466763), + (0.0625, 0.84495867, 0.84495867), + (0.09375, 0.883725899, 0.883725899), + (0.125, 0.917387822, 0.917387822), + (0.15625, 0.945619588, 0.945619588), + (0.1875, 0.968154911, 0.968154911), + (0.21875, 0.98478814, 0.98478814), + (0.25, 0.995375608, 0.995375608), + (0.28125, 0.999836203, 0.999836203), + (0.3125, 0.998151185, 0.998151185), + (0.34375, 0.990363227, 0.990363227), + (0.375, 0.976574709, 0.976574709), + (0.40625, 0.956945269, 0.956945269), + (0.4375, 0.931688648, 0.931688648), + (0.46875, 0.901068838, 0.901068838), + (0.5, 0.865395561, 0.865395561), + (0.53125, 0.820880546, 0.820880546), + (0.5625, 0.774508472, 0.774508472), + (0.59375, 0.726736146, 0.726736146), + (0.625, 0.678007945, 0.678007945), + (0.65625, 0.628751763, 0.628751763), + (0.6875, 0.579375448, 0.579375448), + (0.71875, 0.530263762, 0.530263762), + (0.75, 0.481775914, 0.481775914), + (0.78125, 0.434243684, 0.434243684), + (0.8125, 0.387970225, 0.387970225), + (0.84375, 0.343229596, 0.343229596), + (0.875, 0.300267182, 0.300267182), + (0.90625, 0.259301199, 0.259301199), + (0.9375, 0.220525627, 0.220525627), + (0.96875, 0.184115123, 0.184115123), + (1.0, 0.150232812, 0.150232812)] + } + +# Implementation of Carey Rappaport's CMRmap. +# See `A Color Map for Effective Black-and-White Rendering of Color-Scale +# Images' by Carey Rappaport +# https://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m +_CMRmap_data = {'red': ((0.000, 0.00, 0.00), + (0.125, 0.15, 0.15), + (0.250, 0.30, 0.30), + (0.375, 0.60, 0.60), + (0.500, 1.00, 1.00), + (0.625, 0.90, 0.90), + (0.750, 0.90, 0.90), + (0.875, 0.90, 0.90), + (1.000, 1.00, 1.00)), + 'green': ((0.000, 0.00, 0.00), + (0.125, 0.15, 0.15), + (0.250, 0.15, 0.15), + (0.375, 0.20, 0.20), + (0.500, 0.25, 0.25), + (0.625, 0.50, 0.50), + (0.750, 0.75, 0.75), + (0.875, 0.90, 0.90), + (1.000, 1.00, 1.00)), + 'blue': ((0.000, 0.00, 0.00), + (0.125, 0.50, 0.50), + (0.250, 0.75, 0.75), + (0.375, 0.50, 0.50), + (0.500, 0.15, 0.15), + (0.625, 0.00, 0.00), + (0.750, 0.10, 0.10), + (0.875, 0.50, 0.50), + (1.000, 1.00, 1.00))} + + +# An MIT licensed, colorblind-friendly heatmap from Wistia: +# https://github.com/wistia/heatmap-palette +# https://wistia.com/learn/culture/heatmaps-for-colorblindness +# +# >>> import matplotlib.colors as c +# >>> colors = ["#e4ff7a", "#ffe81a", "#ffbd00", "#ffa000", "#fc7f00"] +# >>> cm = c.LinearSegmentedColormap.from_list('wistia', colors) +# >>> _wistia_data = cm._segmentdata +# >>> del _wistia_data['alpha'] +# +_wistia_data = { + 'red': [(0.0, 0.8941176470588236, 0.8941176470588236), + (0.25, 1.0, 1.0), + (0.5, 1.0, 1.0), + (0.75, 1.0, 1.0), + (1.0, 0.9882352941176471, 0.9882352941176471)], + 'green': [(0.0, 1.0, 1.0), + (0.25, 0.9098039215686274, 0.9098039215686274), + (0.5, 0.7411764705882353, 0.7411764705882353), + (0.75, 0.6274509803921569, 0.6274509803921569), + (1.0, 0.4980392156862745, 0.4980392156862745)], + 'blue': [(0.0, 0.47843137254901963, 0.47843137254901963), + (0.25, 0.10196078431372549, 0.10196078431372549), + (0.5, 0.0, 0.0), + (0.75, 0.0, 0.0), + (1.0, 0.0, 0.0)], +} + + +# Categorical palettes from Vega: +# https://github.com/vega/vega/wiki/Scales +# (divided by 255) +# + +_tab10_data = ( + (0.12156862745098039, 0.4666666666666667, 0.7058823529411765 ), # 1f77b4 + (1.0, 0.4980392156862745, 0.054901960784313725), # ff7f0e + (0.17254901960784313, 0.6274509803921569, 0.17254901960784313 ), # 2ca02c + (0.8392156862745098, 0.15294117647058825, 0.1568627450980392 ), # d62728 + (0.5803921568627451, 0.403921568627451, 0.7411764705882353 ), # 9467bd + (0.5490196078431373, 0.33725490196078434, 0.29411764705882354 ), # 8c564b + (0.8901960784313725, 0.4666666666666667, 0.7607843137254902 ), # e377c2 + (0.4980392156862745, 0.4980392156862745, 0.4980392156862745 ), # 7f7f7f + (0.7372549019607844, 0.7411764705882353, 0.13333333333333333 ), # bcbd22 + (0.09019607843137255, 0.7450980392156863, 0.8117647058823529), # 17becf +) + +_tab20_data = ( + (0.12156862745098039, 0.4666666666666667, 0.7058823529411765 ), # 1f77b4 + (0.6823529411764706, 0.7803921568627451, 0.9098039215686274 ), # aec7e8 + (1.0, 0.4980392156862745, 0.054901960784313725), # ff7f0e + (1.0, 0.7333333333333333, 0.47058823529411764 ), # ffbb78 + (0.17254901960784313, 0.6274509803921569, 0.17254901960784313 ), # 2ca02c + (0.596078431372549, 0.8745098039215686, 0.5411764705882353 ), # 98df8a + (0.8392156862745098, 0.15294117647058825, 0.1568627450980392 ), # d62728 + (1.0, 0.596078431372549, 0.5882352941176471 ), # ff9896 + (0.5803921568627451, 0.403921568627451, 0.7411764705882353 ), # 9467bd + (0.7725490196078432, 0.6901960784313725, 0.8352941176470589 ), # c5b0d5 + (0.5490196078431373, 0.33725490196078434, 0.29411764705882354 ), # 8c564b + (0.7686274509803922, 0.611764705882353, 0.5803921568627451 ), # c49c94 + (0.8901960784313725, 0.4666666666666667, 0.7607843137254902 ), # e377c2 + (0.9686274509803922, 0.7137254901960784, 0.8235294117647058 ), # f7b6d2 + (0.4980392156862745, 0.4980392156862745, 0.4980392156862745 ), # 7f7f7f + (0.7803921568627451, 0.7803921568627451, 0.7803921568627451 ), # c7c7c7 + (0.7372549019607844, 0.7411764705882353, 0.13333333333333333 ), # bcbd22 + (0.8588235294117647, 0.8588235294117647, 0.5529411764705883 ), # dbdb8d + (0.09019607843137255, 0.7450980392156863, 0.8117647058823529 ), # 17becf + (0.6196078431372549, 0.8549019607843137, 0.8980392156862745), # 9edae5 +) + +_tab20b_data = ( + (0.2235294117647059, 0.23137254901960785, 0.4745098039215686 ), # 393b79 + (0.3215686274509804, 0.32941176470588235, 0.6392156862745098 ), # 5254a3 + (0.4196078431372549, 0.43137254901960786, 0.8117647058823529 ), # 6b6ecf + (0.611764705882353, 0.6196078431372549, 0.8705882352941177 ), # 9c9ede + (0.38823529411764707, 0.4745098039215686, 0.2235294117647059 ), # 637939 + (0.5490196078431373, 0.6352941176470588, 0.3215686274509804 ), # 8ca252 + (0.7098039215686275, 0.8117647058823529, 0.4196078431372549 ), # b5cf6b + (0.807843137254902, 0.8588235294117647, 0.611764705882353 ), # cedb9c + (0.5490196078431373, 0.42745098039215684, 0.19215686274509805), # 8c6d31 + (0.7411764705882353, 0.6196078431372549, 0.2235294117647059 ), # bd9e39 + (0.9058823529411765, 0.7294117647058823, 0.3215686274509804 ), # e7ba52 + (0.9058823529411765, 0.796078431372549, 0.5803921568627451 ), # e7cb94 + (0.5176470588235295, 0.23529411764705882, 0.2235294117647059 ), # 843c39 + (0.6784313725490196, 0.28627450980392155, 0.2901960784313726 ), # ad494a + (0.8392156862745098, 0.3803921568627451, 0.4196078431372549 ), # d6616b + (0.9058823529411765, 0.5882352941176471, 0.611764705882353 ), # e7969c + (0.4823529411764706, 0.2549019607843137, 0.45098039215686275), # 7b4173 + (0.6470588235294118, 0.3176470588235294, 0.5803921568627451 ), # a55194 + (0.807843137254902, 0.42745098039215684, 0.7411764705882353 ), # ce6dbd + (0.8705882352941177, 0.6196078431372549, 0.8392156862745098 ), # de9ed6 +) + +_tab20c_data = ( + (0.19215686274509805, 0.5098039215686274, 0.7411764705882353 ), # 3182bd + (0.4196078431372549, 0.6823529411764706, 0.8392156862745098 ), # 6baed6 + (0.6196078431372549, 0.792156862745098, 0.8823529411764706 ), # 9ecae1 + (0.7764705882352941, 0.8588235294117647, 0.9372549019607843 ), # c6dbef + (0.9019607843137255, 0.3333333333333333, 0.050980392156862744), # e6550d + (0.9921568627450981, 0.5529411764705883, 0.23529411764705882 ), # fd8d3c + (0.9921568627450981, 0.6823529411764706, 0.4196078431372549 ), # fdae6b + (0.9921568627450981, 0.8156862745098039, 0.6352941176470588 ), # fdd0a2 + (0.19215686274509805, 0.6392156862745098, 0.32941176470588235 ), # 31a354 + (0.4549019607843137, 0.7686274509803922, 0.4627450980392157 ), # 74c476 + (0.6313725490196078, 0.8509803921568627, 0.6078431372549019 ), # a1d99b + (0.7803921568627451, 0.9137254901960784, 0.7529411764705882 ), # c7e9c0 + (0.4588235294117647, 0.4196078431372549, 0.6941176470588235 ), # 756bb1 + (0.6196078431372549, 0.6039215686274509, 0.7843137254901961 ), # 9e9ac8 + (0.7372549019607844, 0.7411764705882353, 0.8627450980392157 ), # bcbddc + (0.8549019607843137, 0.8549019607843137, 0.9215686274509803 ), # dadaeb + (0.38823529411764707, 0.38823529411764707, 0.38823529411764707 ), # 636363 + (0.5882352941176471, 0.5882352941176471, 0.5882352941176471 ), # 969696 + (0.7411764705882353, 0.7411764705882353, 0.7411764705882353 ), # bdbdbd + (0.8509803921568627, 0.8509803921568627, 0.8509803921568627 ), # d9d9d9 +) + + +datad = { + 'Blues': _Blues_data, + 'BrBG': _BrBG_data, + 'BuGn': _BuGn_data, + 'BuPu': _BuPu_data, + 'CMRmap': _CMRmap_data, + 'GnBu': _GnBu_data, + 'Greens': _Greens_data, + 'Greys': _Greys_data, + 'OrRd': _OrRd_data, + 'Oranges': _Oranges_data, + 'PRGn': _PRGn_data, + 'PiYG': _PiYG_data, + 'PuBu': _PuBu_data, + 'PuBuGn': _PuBuGn_data, + 'PuOr': _PuOr_data, + 'PuRd': _PuRd_data, + 'Purples': _Purples_data, + 'RdBu': _RdBu_data, + 'RdGy': _RdGy_data, + 'RdPu': _RdPu_data, + 'RdYlBu': _RdYlBu_data, + 'RdYlGn': _RdYlGn_data, + 'Reds': _Reds_data, + 'Spectral': _Spectral_data, + 'Wistia': _wistia_data, + 'YlGn': _YlGn_data, + 'YlGnBu': _YlGnBu_data, + 'YlOrBr': _YlOrBr_data, + 'YlOrRd': _YlOrRd_data, + 'afmhot': _afmhot_data, + 'autumn': _autumn_data, + 'binary': _binary_data, + 'bone': _bone_data, + 'brg': _brg_data, + 'bwr': _bwr_data, + 'cool': _cool_data, + 'coolwarm': _coolwarm_data, + 'copper': _copper_data, + 'cubehelix': _cubehelix_data, + 'flag': _flag_data, + 'gist_earth': _gist_earth_data, + 'gist_gray': _gist_gray_data, + 'gist_heat': _gist_heat_data, + 'gist_ncar': _gist_ncar_data, + 'gist_rainbow': _gist_rainbow_data, + 'gist_stern': _gist_stern_data, + 'gist_yarg': _gist_yarg_data, + 'gnuplot': _gnuplot_data, + 'gnuplot2': _gnuplot2_data, + 'gray': _gray_data, + 'hot': _hot_data, + 'hsv': _hsv_data, + 'jet': _jet_data, + 'nipy_spectral': _nipy_spectral_data, + 'ocean': _ocean_data, + 'pink': _pink_data, + 'prism': _prism_data, + 'rainbow': _rainbow_data, + 'seismic': _seismic_data, + 'spring': _spring_data, + 'summer': _summer_data, + 'terrain': _terrain_data, + 'winter': _winter_data, + # Qualitative + 'Accent': {'listed': _Accent_data}, + 'Dark2': {'listed': _Dark2_data}, + 'Paired': {'listed': _Paired_data}, + 'Pastel1': {'listed': _Pastel1_data}, + 'Pastel2': {'listed': _Pastel2_data}, + 'Set1': {'listed': _Set1_data}, + 'Set2': {'listed': _Set2_data}, + 'Set3': {'listed': _Set3_data}, + 'tab10': {'listed': _tab10_data}, + 'tab20': {'listed': _tab20_data}, + 'tab20b': {'listed': _tab20b_data}, + 'tab20c': {'listed': _tab20c_data}, +} diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_cm_listed.py b/.venv/lib/python3.9/site-packages/matplotlib/_cm_listed.py new file mode 100644 index 00000000..a331ad74 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_cm_listed.py @@ -0,0 +1,2071 @@ +from .colors import ListedColormap + +_magma_data = [[0.001462, 0.000466, 0.013866], + [0.002258, 0.001295, 0.018331], + [0.003279, 0.002305, 0.023708], + [0.004512, 0.003490, 0.029965], + [0.005950, 0.004843, 0.037130], + [0.007588, 0.006356, 0.044973], + [0.009426, 0.008022, 0.052844], + [0.011465, 0.009828, 0.060750], + [0.013708, 0.011771, 0.068667], + [0.016156, 0.013840, 0.076603], + [0.018815, 0.016026, 0.084584], + [0.021692, 0.018320, 0.092610], + [0.024792, 0.020715, 0.100676], + [0.028123, 0.023201, 0.108787], + [0.031696, 0.025765, 0.116965], + [0.035520, 0.028397, 0.125209], + [0.039608, 0.031090, 0.133515], + [0.043830, 0.033830, 0.141886], + [0.048062, 0.036607, 0.150327], + [0.052320, 0.039407, 0.158841], + [0.056615, 0.042160, 0.167446], + [0.060949, 0.044794, 0.176129], + [0.065330, 0.047318, 0.184892], + [0.069764, 0.049726, 0.193735], + [0.074257, 0.052017, 0.202660], + [0.078815, 0.054184, 0.211667], + [0.083446, 0.056225, 0.220755], + [0.088155, 0.058133, 0.229922], + [0.092949, 0.059904, 0.239164], + [0.097833, 0.061531, 0.248477], + [0.102815, 0.063010, 0.257854], + [0.107899, 0.064335, 0.267289], + [0.113094, 0.065492, 0.276784], + [0.118405, 0.066479, 0.286321], + [0.123833, 0.067295, 0.295879], + [0.129380, 0.067935, 0.305443], + [0.135053, 0.068391, 0.315000], + [0.140858, 0.068654, 0.324538], + [0.146785, 0.068738, 0.334011], + [0.152839, 0.068637, 0.343404], + [0.159018, 0.068354, 0.352688], + [0.165308, 0.067911, 0.361816], + [0.171713, 0.067305, 0.370771], + [0.178212, 0.066576, 0.379497], + [0.184801, 0.065732, 0.387973], + [0.191460, 0.064818, 0.396152], + [0.198177, 0.063862, 0.404009], + [0.204935, 0.062907, 0.411514], + [0.211718, 0.061992, 0.418647], + [0.218512, 0.061158, 0.425392], + [0.225302, 0.060445, 0.431742], + [0.232077, 0.059889, 0.437695], + [0.238826, 0.059517, 0.443256], + [0.245543, 0.059352, 0.448436], + [0.252220, 0.059415, 0.453248], + [0.258857, 0.059706, 0.457710], + [0.265447, 0.060237, 0.461840], + [0.271994, 0.060994, 0.465660], + [0.278493, 0.061978, 0.469190], + [0.284951, 0.063168, 0.472451], + [0.291366, 0.064553, 0.475462], + [0.297740, 0.066117, 0.478243], + [0.304081, 0.067835, 0.480812], + [0.310382, 0.069702, 0.483186], + [0.316654, 0.071690, 0.485380], + [0.322899, 0.073782, 0.487408], + [0.329114, 0.075972, 0.489287], + [0.335308, 0.078236, 0.491024], + [0.341482, 0.080564, 0.492631], + [0.347636, 0.082946, 0.494121], + [0.353773, 0.085373, 0.495501], + [0.359898, 0.087831, 0.496778], + [0.366012, 0.090314, 0.497960], + [0.372116, 0.092816, 0.499053], + [0.378211, 0.095332, 0.500067], + [0.384299, 0.097855, 0.501002], + [0.390384, 0.100379, 0.501864], + [0.396467, 0.102902, 0.502658], + [0.402548, 0.105420, 0.503386], + [0.408629, 0.107930, 0.504052], + [0.414709, 0.110431, 0.504662], + [0.420791, 0.112920, 0.505215], + [0.426877, 0.115395, 0.505714], + [0.432967, 0.117855, 0.506160], + [0.439062, 0.120298, 0.506555], + [0.445163, 0.122724, 0.506901], + [0.451271, 0.125132, 0.507198], + [0.457386, 0.127522, 0.507448], + [0.463508, 0.129893, 0.507652], + [0.469640, 0.132245, 0.507809], + [0.475780, 0.134577, 0.507921], + [0.481929, 0.136891, 0.507989], + [0.488088, 0.139186, 0.508011], + [0.494258, 0.141462, 0.507988], + [0.500438, 0.143719, 0.507920], + [0.506629, 0.145958, 0.507806], + [0.512831, 0.148179, 0.507648], + [0.519045, 0.150383, 0.507443], + [0.525270, 0.152569, 0.507192], + [0.531507, 0.154739, 0.506895], + [0.537755, 0.156894, 0.506551], + [0.544015, 0.159033, 0.506159], + [0.550287, 0.161158, 0.505719], + [0.556571, 0.163269, 0.505230], + [0.562866, 0.165368, 0.504692], + [0.569172, 0.167454, 0.504105], + [0.575490, 0.169530, 0.503466], + [0.581819, 0.171596, 0.502777], + [0.588158, 0.173652, 0.502035], + [0.594508, 0.175701, 0.501241], + [0.600868, 0.177743, 0.500394], + [0.607238, 0.179779, 0.499492], + [0.613617, 0.181811, 0.498536], + [0.620005, 0.183840, 0.497524], + [0.626401, 0.185867, 0.496456], + [0.632805, 0.187893, 0.495332], + [0.639216, 0.189921, 0.494150], + [0.645633, 0.191952, 0.492910], + [0.652056, 0.193986, 0.491611], + [0.658483, 0.196027, 0.490253], + [0.664915, 0.198075, 0.488836], + [0.671349, 0.200133, 0.487358], + [0.677786, 0.202203, 0.485819], + [0.684224, 0.204286, 0.484219], + [0.690661, 0.206384, 0.482558], + [0.697098, 0.208501, 0.480835], + [0.703532, 0.210638, 0.479049], + [0.709962, 0.212797, 0.477201], + [0.716387, 0.214982, 0.475290], + [0.722805, 0.217194, 0.473316], + [0.729216, 0.219437, 0.471279], + [0.735616, 0.221713, 0.469180], + [0.742004, 0.224025, 0.467018], + [0.748378, 0.226377, 0.464794], + [0.754737, 0.228772, 0.462509], + [0.761077, 0.231214, 0.460162], + [0.767398, 0.233705, 0.457755], + [0.773695, 0.236249, 0.455289], + [0.779968, 0.238851, 0.452765], + [0.786212, 0.241514, 0.450184], + [0.792427, 0.244242, 0.447543], + [0.798608, 0.247040, 0.444848], + [0.804752, 0.249911, 0.442102], + [0.810855, 0.252861, 0.439305], + [0.816914, 0.255895, 0.436461], + [0.822926, 0.259016, 0.433573], + [0.828886, 0.262229, 0.430644], + [0.834791, 0.265540, 0.427671], + [0.840636, 0.268953, 0.424666], + [0.846416, 0.272473, 0.421631], + [0.852126, 0.276106, 0.418573], + [0.857763, 0.279857, 0.415496], + [0.863320, 0.283729, 0.412403], + [0.868793, 0.287728, 0.409303], + [0.874176, 0.291859, 0.406205], + [0.879464, 0.296125, 0.403118], + [0.884651, 0.300530, 0.400047], + [0.889731, 0.305079, 0.397002], + [0.894700, 0.309773, 0.393995], + [0.899552, 0.314616, 0.391037], + [0.904281, 0.319610, 0.388137], + [0.908884, 0.324755, 0.385308], + [0.913354, 0.330052, 0.382563], + [0.917689, 0.335500, 0.379915], + [0.921884, 0.341098, 0.377376], + [0.925937, 0.346844, 0.374959], + [0.929845, 0.352734, 0.372677], + [0.933606, 0.358764, 0.370541], + [0.937221, 0.364929, 0.368567], + [0.940687, 0.371224, 0.366762], + [0.944006, 0.377643, 0.365136], + [0.947180, 0.384178, 0.363701], + [0.950210, 0.390820, 0.362468], + [0.953099, 0.397563, 0.361438], + [0.955849, 0.404400, 0.360619], + [0.958464, 0.411324, 0.360014], + [0.960949, 0.418323, 0.359630], + [0.963310, 0.425390, 0.359469], + [0.965549, 0.432519, 0.359529], + [0.967671, 0.439703, 0.359810], + [0.969680, 0.446936, 0.360311], + [0.971582, 0.454210, 0.361030], + [0.973381, 0.461520, 0.361965], + [0.975082, 0.468861, 0.363111], + [0.976690, 0.476226, 0.364466], + [0.978210, 0.483612, 0.366025], + [0.979645, 0.491014, 0.367783], + [0.981000, 0.498428, 0.369734], + [0.982279, 0.505851, 0.371874], + [0.983485, 0.513280, 0.374198], + [0.984622, 0.520713, 0.376698], + [0.985693, 0.528148, 0.379371], + [0.986700, 0.535582, 0.382210], + [0.987646, 0.543015, 0.385210], + [0.988533, 0.550446, 0.388365], + [0.989363, 0.557873, 0.391671], + [0.990138, 0.565296, 0.395122], + [0.990871, 0.572706, 0.398714], + [0.991558, 0.580107, 0.402441], + [0.992196, 0.587502, 0.406299], + [0.992785, 0.594891, 0.410283], + [0.993326, 0.602275, 0.414390], + [0.993834, 0.609644, 0.418613], + [0.994309, 0.616999, 0.422950], + [0.994738, 0.624350, 0.427397], + [0.995122, 0.631696, 0.431951], + [0.995480, 0.639027, 0.436607], + [0.995810, 0.646344, 0.441361], + [0.996096, 0.653659, 0.446213], + [0.996341, 0.660969, 0.451160], + [0.996580, 0.668256, 0.456192], + [0.996775, 0.675541, 0.461314], + [0.996925, 0.682828, 0.466526], + [0.997077, 0.690088, 0.471811], + [0.997186, 0.697349, 0.477182], + [0.997254, 0.704611, 0.482635], + [0.997325, 0.711848, 0.488154], + [0.997351, 0.719089, 0.493755], + [0.997351, 0.726324, 0.499428], + [0.997341, 0.733545, 0.505167], + [0.997285, 0.740772, 0.510983], + [0.997228, 0.747981, 0.516859], + [0.997138, 0.755190, 0.522806], + [0.997019, 0.762398, 0.528821], + [0.996898, 0.769591, 0.534892], + [0.996727, 0.776795, 0.541039], + [0.996571, 0.783977, 0.547233], + [0.996369, 0.791167, 0.553499], + [0.996162, 0.798348, 0.559820], + [0.995932, 0.805527, 0.566202], + [0.995680, 0.812706, 0.572645], + [0.995424, 0.819875, 0.579140], + [0.995131, 0.827052, 0.585701], + [0.994851, 0.834213, 0.592307], + [0.994524, 0.841387, 0.598983], + [0.994222, 0.848540, 0.605696], + [0.993866, 0.855711, 0.612482], + [0.993545, 0.862859, 0.619299], + [0.993170, 0.870024, 0.626189], + [0.992831, 0.877168, 0.633109], + [0.992440, 0.884330, 0.640099], + [0.992089, 0.891470, 0.647116], + [0.991688, 0.898627, 0.654202], + [0.991332, 0.905763, 0.661309], + [0.990930, 0.912915, 0.668481], + [0.990570, 0.920049, 0.675675], + [0.990175, 0.927196, 0.682926], + [0.989815, 0.934329, 0.690198], + [0.989434, 0.941470, 0.697519], + [0.989077, 0.948604, 0.704863], + [0.988717, 0.955742, 0.712242], + [0.988367, 0.962878, 0.719649], + [0.988033, 0.970012, 0.727077], + [0.987691, 0.977154, 0.734536], + [0.987387, 0.984288, 0.742002], + [0.987053, 0.991438, 0.749504]] + +_inferno_data = [[0.001462, 0.000466, 0.013866], + [0.002267, 0.001270, 0.018570], + [0.003299, 0.002249, 0.024239], + [0.004547, 0.003392, 0.030909], + [0.006006, 0.004692, 0.038558], + [0.007676, 0.006136, 0.046836], + [0.009561, 0.007713, 0.055143], + [0.011663, 0.009417, 0.063460], + [0.013995, 0.011225, 0.071862], + [0.016561, 0.013136, 0.080282], + [0.019373, 0.015133, 0.088767], + [0.022447, 0.017199, 0.097327], + [0.025793, 0.019331, 0.105930], + [0.029432, 0.021503, 0.114621], + [0.033385, 0.023702, 0.123397], + [0.037668, 0.025921, 0.132232], + [0.042253, 0.028139, 0.141141], + [0.046915, 0.030324, 0.150164], + [0.051644, 0.032474, 0.159254], + [0.056449, 0.034569, 0.168414], + [0.061340, 0.036590, 0.177642], + [0.066331, 0.038504, 0.186962], + [0.071429, 0.040294, 0.196354], + [0.076637, 0.041905, 0.205799], + [0.081962, 0.043328, 0.215289], + [0.087411, 0.044556, 0.224813], + [0.092990, 0.045583, 0.234358], + [0.098702, 0.046402, 0.243904], + [0.104551, 0.047008, 0.253430], + [0.110536, 0.047399, 0.262912], + [0.116656, 0.047574, 0.272321], + [0.122908, 0.047536, 0.281624], + [0.129285, 0.047293, 0.290788], + [0.135778, 0.046856, 0.299776], + [0.142378, 0.046242, 0.308553], + [0.149073, 0.045468, 0.317085], + [0.155850, 0.044559, 0.325338], + [0.162689, 0.043554, 0.333277], + [0.169575, 0.042489, 0.340874], + [0.176493, 0.041402, 0.348111], + [0.183429, 0.040329, 0.354971], + [0.190367, 0.039309, 0.361447], + [0.197297, 0.038400, 0.367535], + [0.204209, 0.037632, 0.373238], + [0.211095, 0.037030, 0.378563], + [0.217949, 0.036615, 0.383522], + [0.224763, 0.036405, 0.388129], + [0.231538, 0.036405, 0.392400], + [0.238273, 0.036621, 0.396353], + [0.244967, 0.037055, 0.400007], + [0.251620, 0.037705, 0.403378], + [0.258234, 0.038571, 0.406485], + [0.264810, 0.039647, 0.409345], + [0.271347, 0.040922, 0.411976], + [0.277850, 0.042353, 0.414392], + [0.284321, 0.043933, 0.416608], + [0.290763, 0.045644, 0.418637], + [0.297178, 0.047470, 0.420491], + [0.303568, 0.049396, 0.422182], + [0.309935, 0.051407, 0.423721], + [0.316282, 0.053490, 0.425116], + [0.322610, 0.055634, 0.426377], + [0.328921, 0.057827, 0.427511], + [0.335217, 0.060060, 0.428524], + [0.341500, 0.062325, 0.429425], + [0.347771, 0.064616, 0.430217], + [0.354032, 0.066925, 0.430906], + [0.360284, 0.069247, 0.431497], + [0.366529, 0.071579, 0.431994], + [0.372768, 0.073915, 0.432400], + [0.379001, 0.076253, 0.432719], + [0.385228, 0.078591, 0.432955], + [0.391453, 0.080927, 0.433109], + [0.397674, 0.083257, 0.433183], + [0.403894, 0.085580, 0.433179], + [0.410113, 0.087896, 0.433098], + [0.416331, 0.090203, 0.432943], + [0.422549, 0.092501, 0.432714], + [0.428768, 0.094790, 0.432412], + [0.434987, 0.097069, 0.432039], + [0.441207, 0.099338, 0.431594], + [0.447428, 0.101597, 0.431080], + [0.453651, 0.103848, 0.430498], + [0.459875, 0.106089, 0.429846], + [0.466100, 0.108322, 0.429125], + [0.472328, 0.110547, 0.428334], + [0.478558, 0.112764, 0.427475], + [0.484789, 0.114974, 0.426548], + [0.491022, 0.117179, 0.425552], + [0.497257, 0.119379, 0.424488], + [0.503493, 0.121575, 0.423356], + [0.509730, 0.123769, 0.422156], + [0.515967, 0.125960, 0.420887], + [0.522206, 0.128150, 0.419549], + [0.528444, 0.130341, 0.418142], + [0.534683, 0.132534, 0.416667], + [0.540920, 0.134729, 0.415123], + [0.547157, 0.136929, 0.413511], + [0.553392, 0.139134, 0.411829], + [0.559624, 0.141346, 0.410078], + [0.565854, 0.143567, 0.408258], + [0.572081, 0.145797, 0.406369], + [0.578304, 0.148039, 0.404411], + [0.584521, 0.150294, 0.402385], + [0.590734, 0.152563, 0.400290], + [0.596940, 0.154848, 0.398125], + [0.603139, 0.157151, 0.395891], + [0.609330, 0.159474, 0.393589], + [0.615513, 0.161817, 0.391219], + [0.621685, 0.164184, 0.388781], + [0.627847, 0.166575, 0.386276], + [0.633998, 0.168992, 0.383704], + [0.640135, 0.171438, 0.381065], + [0.646260, 0.173914, 0.378359], + [0.652369, 0.176421, 0.375586], + [0.658463, 0.178962, 0.372748], + [0.664540, 0.181539, 0.369846], + [0.670599, 0.184153, 0.366879], + [0.676638, 0.186807, 0.363849], + [0.682656, 0.189501, 0.360757], + [0.688653, 0.192239, 0.357603], + [0.694627, 0.195021, 0.354388], + [0.700576, 0.197851, 0.351113], + [0.706500, 0.200728, 0.347777], + [0.712396, 0.203656, 0.344383], + [0.718264, 0.206636, 0.340931], + [0.724103, 0.209670, 0.337424], + [0.729909, 0.212759, 0.333861], + [0.735683, 0.215906, 0.330245], + [0.741423, 0.219112, 0.326576], + [0.747127, 0.222378, 0.322856], + [0.752794, 0.225706, 0.319085], + [0.758422, 0.229097, 0.315266], + [0.764010, 0.232554, 0.311399], + [0.769556, 0.236077, 0.307485], + [0.775059, 0.239667, 0.303526], + [0.780517, 0.243327, 0.299523], + [0.785929, 0.247056, 0.295477], + [0.791293, 0.250856, 0.291390], + [0.796607, 0.254728, 0.287264], + [0.801871, 0.258674, 0.283099], + [0.807082, 0.262692, 0.278898], + [0.812239, 0.266786, 0.274661], + [0.817341, 0.270954, 0.270390], + [0.822386, 0.275197, 0.266085], + [0.827372, 0.279517, 0.261750], + [0.832299, 0.283913, 0.257383], + [0.837165, 0.288385, 0.252988], + [0.841969, 0.292933, 0.248564], + [0.846709, 0.297559, 0.244113], + [0.851384, 0.302260, 0.239636], + [0.855992, 0.307038, 0.235133], + [0.860533, 0.311892, 0.230606], + [0.865006, 0.316822, 0.226055], + [0.869409, 0.321827, 0.221482], + [0.873741, 0.326906, 0.216886], + [0.878001, 0.332060, 0.212268], + [0.882188, 0.337287, 0.207628], + [0.886302, 0.342586, 0.202968], + [0.890341, 0.347957, 0.198286], + [0.894305, 0.353399, 0.193584], + [0.898192, 0.358911, 0.188860], + [0.902003, 0.364492, 0.184116], + [0.905735, 0.370140, 0.179350], + [0.909390, 0.375856, 0.174563], + [0.912966, 0.381636, 0.169755], + [0.916462, 0.387481, 0.164924], + [0.919879, 0.393389, 0.160070], + [0.923215, 0.399359, 0.155193], + [0.926470, 0.405389, 0.150292], + [0.929644, 0.411479, 0.145367], + [0.932737, 0.417627, 0.140417], + [0.935747, 0.423831, 0.135440], + [0.938675, 0.430091, 0.130438], + [0.941521, 0.436405, 0.125409], + [0.944285, 0.442772, 0.120354], + [0.946965, 0.449191, 0.115272], + [0.949562, 0.455660, 0.110164], + [0.952075, 0.462178, 0.105031], + [0.954506, 0.468744, 0.099874], + [0.956852, 0.475356, 0.094695], + [0.959114, 0.482014, 0.089499], + [0.961293, 0.488716, 0.084289], + [0.963387, 0.495462, 0.079073], + [0.965397, 0.502249, 0.073859], + [0.967322, 0.509078, 0.068659], + [0.969163, 0.515946, 0.063488], + [0.970919, 0.522853, 0.058367], + [0.972590, 0.529798, 0.053324], + [0.974176, 0.536780, 0.048392], + [0.975677, 0.543798, 0.043618], + [0.977092, 0.550850, 0.039050], + [0.978422, 0.557937, 0.034931], + [0.979666, 0.565057, 0.031409], + [0.980824, 0.572209, 0.028508], + [0.981895, 0.579392, 0.026250], + [0.982881, 0.586606, 0.024661], + [0.983779, 0.593849, 0.023770], + [0.984591, 0.601122, 0.023606], + [0.985315, 0.608422, 0.024202], + [0.985952, 0.615750, 0.025592], + [0.986502, 0.623105, 0.027814], + [0.986964, 0.630485, 0.030908], + [0.987337, 0.637890, 0.034916], + [0.987622, 0.645320, 0.039886], + [0.987819, 0.652773, 0.045581], + [0.987926, 0.660250, 0.051750], + [0.987945, 0.667748, 0.058329], + [0.987874, 0.675267, 0.065257], + [0.987714, 0.682807, 0.072489], + [0.987464, 0.690366, 0.079990], + [0.987124, 0.697944, 0.087731], + [0.986694, 0.705540, 0.095694], + [0.986175, 0.713153, 0.103863], + [0.985566, 0.720782, 0.112229], + [0.984865, 0.728427, 0.120785], + [0.984075, 0.736087, 0.129527], + [0.983196, 0.743758, 0.138453], + [0.982228, 0.751442, 0.147565], + [0.981173, 0.759135, 0.156863], + [0.980032, 0.766837, 0.166353], + [0.978806, 0.774545, 0.176037], + [0.977497, 0.782258, 0.185923], + [0.976108, 0.789974, 0.196018], + [0.974638, 0.797692, 0.206332], + [0.973088, 0.805409, 0.216877], + [0.971468, 0.813122, 0.227658], + [0.969783, 0.820825, 0.238686], + [0.968041, 0.828515, 0.249972], + [0.966243, 0.836191, 0.261534], + [0.964394, 0.843848, 0.273391], + [0.962517, 0.851476, 0.285546], + [0.960626, 0.859069, 0.298010], + [0.958720, 0.866624, 0.310820], + [0.956834, 0.874129, 0.323974], + [0.954997, 0.881569, 0.337475], + [0.953215, 0.888942, 0.351369], + [0.951546, 0.896226, 0.365627], + [0.950018, 0.903409, 0.380271], + [0.948683, 0.910473, 0.395289], + [0.947594, 0.917399, 0.410665], + [0.946809, 0.924168, 0.426373], + [0.946392, 0.930761, 0.442367], + [0.946403, 0.937159, 0.458592], + [0.946903, 0.943348, 0.474970], + [0.947937, 0.949318, 0.491426], + [0.949545, 0.955063, 0.507860], + [0.951740, 0.960587, 0.524203], + [0.954529, 0.965896, 0.540361], + [0.957896, 0.971003, 0.556275], + [0.961812, 0.975924, 0.571925], + [0.966249, 0.980678, 0.587206], + [0.971162, 0.985282, 0.602154], + [0.976511, 0.989753, 0.616760], + [0.982257, 0.994109, 0.631017], + [0.988362, 0.998364, 0.644924]] + +_plasma_data = [[0.050383, 0.029803, 0.527975], + [0.063536, 0.028426, 0.533124], + [0.075353, 0.027206, 0.538007], + [0.086222, 0.026125, 0.542658], + [0.096379, 0.025165, 0.547103], + [0.105980, 0.024309, 0.551368], + [0.115124, 0.023556, 0.555468], + [0.123903, 0.022878, 0.559423], + [0.132381, 0.022258, 0.563250], + [0.140603, 0.021687, 0.566959], + [0.148607, 0.021154, 0.570562], + [0.156421, 0.020651, 0.574065], + [0.164070, 0.020171, 0.577478], + [0.171574, 0.019706, 0.580806], + [0.178950, 0.019252, 0.584054], + [0.186213, 0.018803, 0.587228], + [0.193374, 0.018354, 0.590330], + [0.200445, 0.017902, 0.593364], + [0.207435, 0.017442, 0.596333], + [0.214350, 0.016973, 0.599239], + [0.221197, 0.016497, 0.602083], + [0.227983, 0.016007, 0.604867], + [0.234715, 0.015502, 0.607592], + [0.241396, 0.014979, 0.610259], + [0.248032, 0.014439, 0.612868], + [0.254627, 0.013882, 0.615419], + [0.261183, 0.013308, 0.617911], + [0.267703, 0.012716, 0.620346], + [0.274191, 0.012109, 0.622722], + [0.280648, 0.011488, 0.625038], + [0.287076, 0.010855, 0.627295], + [0.293478, 0.010213, 0.629490], + [0.299855, 0.009561, 0.631624], + [0.306210, 0.008902, 0.633694], + [0.312543, 0.008239, 0.635700], + [0.318856, 0.007576, 0.637640], + [0.325150, 0.006915, 0.639512], + [0.331426, 0.006261, 0.641316], + [0.337683, 0.005618, 0.643049], + [0.343925, 0.004991, 0.644710], + [0.350150, 0.004382, 0.646298], + [0.356359, 0.003798, 0.647810], + [0.362553, 0.003243, 0.649245], + [0.368733, 0.002724, 0.650601], + [0.374897, 0.002245, 0.651876], + [0.381047, 0.001814, 0.653068], + [0.387183, 0.001434, 0.654177], + [0.393304, 0.001114, 0.655199], + [0.399411, 0.000859, 0.656133], + [0.405503, 0.000678, 0.656977], + [0.411580, 0.000577, 0.657730], + [0.417642, 0.000564, 0.658390], + [0.423689, 0.000646, 0.658956], + [0.429719, 0.000831, 0.659425], + [0.435734, 0.001127, 0.659797], + [0.441732, 0.001540, 0.660069], + [0.447714, 0.002080, 0.660240], + [0.453677, 0.002755, 0.660310], + [0.459623, 0.003574, 0.660277], + [0.465550, 0.004545, 0.660139], + [0.471457, 0.005678, 0.659897], + [0.477344, 0.006980, 0.659549], + [0.483210, 0.008460, 0.659095], + [0.489055, 0.010127, 0.658534], + [0.494877, 0.011990, 0.657865], + [0.500678, 0.014055, 0.657088], + [0.506454, 0.016333, 0.656202], + [0.512206, 0.018833, 0.655209], + [0.517933, 0.021563, 0.654109], + [0.523633, 0.024532, 0.652901], + [0.529306, 0.027747, 0.651586], + [0.534952, 0.031217, 0.650165], + [0.540570, 0.034950, 0.648640], + [0.546157, 0.038954, 0.647010], + [0.551715, 0.043136, 0.645277], + [0.557243, 0.047331, 0.643443], + [0.562738, 0.051545, 0.641509], + [0.568201, 0.055778, 0.639477], + [0.573632, 0.060028, 0.637349], + [0.579029, 0.064296, 0.635126], + [0.584391, 0.068579, 0.632812], + [0.589719, 0.072878, 0.630408], + [0.595011, 0.077190, 0.627917], + [0.600266, 0.081516, 0.625342], + [0.605485, 0.085854, 0.622686], + [0.610667, 0.090204, 0.619951], + [0.615812, 0.094564, 0.617140], + [0.620919, 0.098934, 0.614257], + [0.625987, 0.103312, 0.611305], + [0.631017, 0.107699, 0.608287], + [0.636008, 0.112092, 0.605205], + [0.640959, 0.116492, 0.602065], + [0.645872, 0.120898, 0.598867], + [0.650746, 0.125309, 0.595617], + [0.655580, 0.129725, 0.592317], + [0.660374, 0.134144, 0.588971], + [0.665129, 0.138566, 0.585582], + [0.669845, 0.142992, 0.582154], + [0.674522, 0.147419, 0.578688], + [0.679160, 0.151848, 0.575189], + [0.683758, 0.156278, 0.571660], + [0.688318, 0.160709, 0.568103], + [0.692840, 0.165141, 0.564522], + [0.697324, 0.169573, 0.560919], + [0.701769, 0.174005, 0.557296], + [0.706178, 0.178437, 0.553657], + [0.710549, 0.182868, 0.550004], + [0.714883, 0.187299, 0.546338], + [0.719181, 0.191729, 0.542663], + [0.723444, 0.196158, 0.538981], + [0.727670, 0.200586, 0.535293], + [0.731862, 0.205013, 0.531601], + [0.736019, 0.209439, 0.527908], + [0.740143, 0.213864, 0.524216], + [0.744232, 0.218288, 0.520524], + [0.748289, 0.222711, 0.516834], + [0.752312, 0.227133, 0.513149], + [0.756304, 0.231555, 0.509468], + [0.760264, 0.235976, 0.505794], + [0.764193, 0.240396, 0.502126], + [0.768090, 0.244817, 0.498465], + [0.771958, 0.249237, 0.494813], + [0.775796, 0.253658, 0.491171], + [0.779604, 0.258078, 0.487539], + [0.783383, 0.262500, 0.483918], + [0.787133, 0.266922, 0.480307], + [0.790855, 0.271345, 0.476706], + [0.794549, 0.275770, 0.473117], + [0.798216, 0.280197, 0.469538], + [0.801855, 0.284626, 0.465971], + [0.805467, 0.289057, 0.462415], + [0.809052, 0.293491, 0.458870], + [0.812612, 0.297928, 0.455338], + [0.816144, 0.302368, 0.451816], + [0.819651, 0.306812, 0.448306], + [0.823132, 0.311261, 0.444806], + [0.826588, 0.315714, 0.441316], + [0.830018, 0.320172, 0.437836], + [0.833422, 0.324635, 0.434366], + [0.836801, 0.329105, 0.430905], + [0.840155, 0.333580, 0.427455], + [0.843484, 0.338062, 0.424013], + [0.846788, 0.342551, 0.420579], + [0.850066, 0.347048, 0.417153], + [0.853319, 0.351553, 0.413734], + [0.856547, 0.356066, 0.410322], + [0.859750, 0.360588, 0.406917], + [0.862927, 0.365119, 0.403519], + [0.866078, 0.369660, 0.400126], + [0.869203, 0.374212, 0.396738], + [0.872303, 0.378774, 0.393355], + [0.875376, 0.383347, 0.389976], + [0.878423, 0.387932, 0.386600], + [0.881443, 0.392529, 0.383229], + [0.884436, 0.397139, 0.379860], + [0.887402, 0.401762, 0.376494], + [0.890340, 0.406398, 0.373130], + [0.893250, 0.411048, 0.369768], + [0.896131, 0.415712, 0.366407], + [0.898984, 0.420392, 0.363047], + [0.901807, 0.425087, 0.359688], + [0.904601, 0.429797, 0.356329], + [0.907365, 0.434524, 0.352970], + [0.910098, 0.439268, 0.349610], + [0.912800, 0.444029, 0.346251], + [0.915471, 0.448807, 0.342890], + [0.918109, 0.453603, 0.339529], + [0.920714, 0.458417, 0.336166], + [0.923287, 0.463251, 0.332801], + [0.925825, 0.468103, 0.329435], + [0.928329, 0.472975, 0.326067], + [0.930798, 0.477867, 0.322697], + [0.933232, 0.482780, 0.319325], + [0.935630, 0.487712, 0.315952], + [0.937990, 0.492667, 0.312575], + [0.940313, 0.497642, 0.309197], + [0.942598, 0.502639, 0.305816], + [0.944844, 0.507658, 0.302433], + [0.947051, 0.512699, 0.299049], + [0.949217, 0.517763, 0.295662], + [0.951344, 0.522850, 0.292275], + [0.953428, 0.527960, 0.288883], + [0.955470, 0.533093, 0.285490], + [0.957469, 0.538250, 0.282096], + [0.959424, 0.543431, 0.278701], + [0.961336, 0.548636, 0.275305], + [0.963203, 0.553865, 0.271909], + [0.965024, 0.559118, 0.268513], + [0.966798, 0.564396, 0.265118], + [0.968526, 0.569700, 0.261721], + [0.970205, 0.575028, 0.258325], + [0.971835, 0.580382, 0.254931], + [0.973416, 0.585761, 0.251540], + [0.974947, 0.591165, 0.248151], + [0.976428, 0.596595, 0.244767], + [0.977856, 0.602051, 0.241387], + [0.979233, 0.607532, 0.238013], + [0.980556, 0.613039, 0.234646], + [0.981826, 0.618572, 0.231287], + [0.983041, 0.624131, 0.227937], + [0.984199, 0.629718, 0.224595], + [0.985301, 0.635330, 0.221265], + [0.986345, 0.640969, 0.217948], + [0.987332, 0.646633, 0.214648], + [0.988260, 0.652325, 0.211364], + [0.989128, 0.658043, 0.208100], + [0.989935, 0.663787, 0.204859], + [0.990681, 0.669558, 0.201642], + [0.991365, 0.675355, 0.198453], + [0.991985, 0.681179, 0.195295], + [0.992541, 0.687030, 0.192170], + [0.993032, 0.692907, 0.189084], + [0.993456, 0.698810, 0.186041], + [0.993814, 0.704741, 0.183043], + [0.994103, 0.710698, 0.180097], + [0.994324, 0.716681, 0.177208], + [0.994474, 0.722691, 0.174381], + [0.994553, 0.728728, 0.171622], + [0.994561, 0.734791, 0.168938], + [0.994495, 0.740880, 0.166335], + [0.994355, 0.746995, 0.163821], + [0.994141, 0.753137, 0.161404], + [0.993851, 0.759304, 0.159092], + [0.993482, 0.765499, 0.156891], + [0.993033, 0.771720, 0.154808], + [0.992505, 0.777967, 0.152855], + [0.991897, 0.784239, 0.151042], + [0.991209, 0.790537, 0.149377], + [0.990439, 0.796859, 0.147870], + [0.989587, 0.803205, 0.146529], + [0.988648, 0.809579, 0.145357], + [0.987621, 0.815978, 0.144363], + [0.986509, 0.822401, 0.143557], + [0.985314, 0.828846, 0.142945], + [0.984031, 0.835315, 0.142528], + [0.982653, 0.841812, 0.142303], + [0.981190, 0.848329, 0.142279], + [0.979644, 0.854866, 0.142453], + [0.977995, 0.861432, 0.142808], + [0.976265, 0.868016, 0.143351], + [0.974443, 0.874622, 0.144061], + [0.972530, 0.881250, 0.144923], + [0.970533, 0.887896, 0.145919], + [0.968443, 0.894564, 0.147014], + [0.966271, 0.901249, 0.148180], + [0.964021, 0.907950, 0.149370], + [0.961681, 0.914672, 0.150520], + [0.959276, 0.921407, 0.151566], + [0.956808, 0.928152, 0.152409], + [0.954287, 0.934908, 0.152921], + [0.951726, 0.941671, 0.152925], + [0.949151, 0.948435, 0.152178], + [0.946602, 0.955190, 0.150328], + [0.944152, 0.961916, 0.146861], + [0.941896, 0.968590, 0.140956], + [0.940015, 0.975158, 0.131326]] + +_viridis_data = [[0.267004, 0.004874, 0.329415], + [0.268510, 0.009605, 0.335427], + [0.269944, 0.014625, 0.341379], + [0.271305, 0.019942, 0.347269], + [0.272594, 0.025563, 0.353093], + [0.273809, 0.031497, 0.358853], + [0.274952, 0.037752, 0.364543], + [0.276022, 0.044167, 0.370164], + [0.277018, 0.050344, 0.375715], + [0.277941, 0.056324, 0.381191], + [0.278791, 0.062145, 0.386592], + [0.279566, 0.067836, 0.391917], + [0.280267, 0.073417, 0.397163], + [0.280894, 0.078907, 0.402329], + [0.281446, 0.084320, 0.407414], + [0.281924, 0.089666, 0.412415], + [0.282327, 0.094955, 0.417331], + [0.282656, 0.100196, 0.422160], + [0.282910, 0.105393, 0.426902], + [0.283091, 0.110553, 0.431554], + [0.283197, 0.115680, 0.436115], + [0.283229, 0.120777, 0.440584], + [0.283187, 0.125848, 0.444960], + [0.283072, 0.130895, 0.449241], + [0.282884, 0.135920, 0.453427], + [0.282623, 0.140926, 0.457517], + [0.282290, 0.145912, 0.461510], + [0.281887, 0.150881, 0.465405], + [0.281412, 0.155834, 0.469201], + [0.280868, 0.160771, 0.472899], + [0.280255, 0.165693, 0.476498], + [0.279574, 0.170599, 0.479997], + [0.278826, 0.175490, 0.483397], + [0.278012, 0.180367, 0.486697], + [0.277134, 0.185228, 0.489898], + [0.276194, 0.190074, 0.493001], + [0.275191, 0.194905, 0.496005], + [0.274128, 0.199721, 0.498911], + [0.273006, 0.204520, 0.501721], + [0.271828, 0.209303, 0.504434], + [0.270595, 0.214069, 0.507052], + [0.269308, 0.218818, 0.509577], + [0.267968, 0.223549, 0.512008], + [0.266580, 0.228262, 0.514349], + [0.265145, 0.232956, 0.516599], + [0.263663, 0.237631, 0.518762], + [0.262138, 0.242286, 0.520837], + [0.260571, 0.246922, 0.522828], + [0.258965, 0.251537, 0.524736], + [0.257322, 0.256130, 0.526563], + [0.255645, 0.260703, 0.528312], + [0.253935, 0.265254, 0.529983], + [0.252194, 0.269783, 0.531579], + [0.250425, 0.274290, 0.533103], + [0.248629, 0.278775, 0.534556], + [0.246811, 0.283237, 0.535941], + [0.244972, 0.287675, 0.537260], + [0.243113, 0.292092, 0.538516], + [0.241237, 0.296485, 0.539709], + [0.239346, 0.300855, 0.540844], + [0.237441, 0.305202, 0.541921], + [0.235526, 0.309527, 0.542944], + [0.233603, 0.313828, 0.543914], + [0.231674, 0.318106, 0.544834], + [0.229739, 0.322361, 0.545706], + [0.227802, 0.326594, 0.546532], + [0.225863, 0.330805, 0.547314], + [0.223925, 0.334994, 0.548053], + [0.221989, 0.339161, 0.548752], + [0.220057, 0.343307, 0.549413], + [0.218130, 0.347432, 0.550038], + [0.216210, 0.351535, 0.550627], + [0.214298, 0.355619, 0.551184], + [0.212395, 0.359683, 0.551710], + [0.210503, 0.363727, 0.552206], + [0.208623, 0.367752, 0.552675], + [0.206756, 0.371758, 0.553117], + [0.204903, 0.375746, 0.553533], + [0.203063, 0.379716, 0.553925], + [0.201239, 0.383670, 0.554294], + [0.199430, 0.387607, 0.554642], + [0.197636, 0.391528, 0.554969], + [0.195860, 0.395433, 0.555276], + [0.194100, 0.399323, 0.555565], + [0.192357, 0.403199, 0.555836], + [0.190631, 0.407061, 0.556089], + [0.188923, 0.410910, 0.556326], + [0.187231, 0.414746, 0.556547], + [0.185556, 0.418570, 0.556753], + [0.183898, 0.422383, 0.556944], + [0.182256, 0.426184, 0.557120], + [0.180629, 0.429975, 0.557282], + [0.179019, 0.433756, 0.557430], + [0.177423, 0.437527, 0.557565], + [0.175841, 0.441290, 0.557685], + [0.174274, 0.445044, 0.557792], + [0.172719, 0.448791, 0.557885], + [0.171176, 0.452530, 0.557965], + [0.169646, 0.456262, 0.558030], + [0.168126, 0.459988, 0.558082], + [0.166617, 0.463708, 0.558119], + [0.165117, 0.467423, 0.558141], + [0.163625, 0.471133, 0.558148], + [0.162142, 0.474838, 0.558140], + [0.160665, 0.478540, 0.558115], + [0.159194, 0.482237, 0.558073], + [0.157729, 0.485932, 0.558013], + [0.156270, 0.489624, 0.557936], + [0.154815, 0.493313, 0.557840], + [0.153364, 0.497000, 0.557724], + [0.151918, 0.500685, 0.557587], + [0.150476, 0.504369, 0.557430], + [0.149039, 0.508051, 0.557250], + [0.147607, 0.511733, 0.557049], + [0.146180, 0.515413, 0.556823], + [0.144759, 0.519093, 0.556572], + [0.143343, 0.522773, 0.556295], + [0.141935, 0.526453, 0.555991], + [0.140536, 0.530132, 0.555659], + [0.139147, 0.533812, 0.555298], + [0.137770, 0.537492, 0.554906], + [0.136408, 0.541173, 0.554483], + [0.135066, 0.544853, 0.554029], + [0.133743, 0.548535, 0.553541], + [0.132444, 0.552216, 0.553018], + [0.131172, 0.555899, 0.552459], + [0.129933, 0.559582, 0.551864], + [0.128729, 0.563265, 0.551229], + [0.127568, 0.566949, 0.550556], + [0.126453, 0.570633, 0.549841], + [0.125394, 0.574318, 0.549086], + [0.124395, 0.578002, 0.548287], + [0.123463, 0.581687, 0.547445], + [0.122606, 0.585371, 0.546557], + [0.121831, 0.589055, 0.545623], + [0.121148, 0.592739, 0.544641], + [0.120565, 0.596422, 0.543611], + [0.120092, 0.600104, 0.542530], + [0.119738, 0.603785, 0.541400], + [0.119512, 0.607464, 0.540218], + [0.119423, 0.611141, 0.538982], + [0.119483, 0.614817, 0.537692], + [0.119699, 0.618490, 0.536347], + [0.120081, 0.622161, 0.534946], + [0.120638, 0.625828, 0.533488], + [0.121380, 0.629492, 0.531973], + [0.122312, 0.633153, 0.530398], + [0.123444, 0.636809, 0.528763], + [0.124780, 0.640461, 0.527068], + [0.126326, 0.644107, 0.525311], + [0.128087, 0.647749, 0.523491], + [0.130067, 0.651384, 0.521608], + [0.132268, 0.655014, 0.519661], + [0.134692, 0.658636, 0.517649], + [0.137339, 0.662252, 0.515571], + [0.140210, 0.665859, 0.513427], + [0.143303, 0.669459, 0.511215], + [0.146616, 0.673050, 0.508936], + [0.150148, 0.676631, 0.506589], + [0.153894, 0.680203, 0.504172], + [0.157851, 0.683765, 0.501686], + [0.162016, 0.687316, 0.499129], + [0.166383, 0.690856, 0.496502], + [0.170948, 0.694384, 0.493803], + [0.175707, 0.697900, 0.491033], + [0.180653, 0.701402, 0.488189], + [0.185783, 0.704891, 0.485273], + [0.191090, 0.708366, 0.482284], + [0.196571, 0.711827, 0.479221], + [0.202219, 0.715272, 0.476084], + [0.208030, 0.718701, 0.472873], + [0.214000, 0.722114, 0.469588], + [0.220124, 0.725509, 0.466226], + [0.226397, 0.728888, 0.462789], + [0.232815, 0.732247, 0.459277], + [0.239374, 0.735588, 0.455688], + [0.246070, 0.738910, 0.452024], + [0.252899, 0.742211, 0.448284], + [0.259857, 0.745492, 0.444467], + [0.266941, 0.748751, 0.440573], + [0.274149, 0.751988, 0.436601], + [0.281477, 0.755203, 0.432552], + [0.288921, 0.758394, 0.428426], + [0.296479, 0.761561, 0.424223], + [0.304148, 0.764704, 0.419943], + [0.311925, 0.767822, 0.415586], + [0.319809, 0.770914, 0.411152], + [0.327796, 0.773980, 0.406640], + [0.335885, 0.777018, 0.402049], + [0.344074, 0.780029, 0.397381], + [0.352360, 0.783011, 0.392636], + [0.360741, 0.785964, 0.387814], + [0.369214, 0.788888, 0.382914], + [0.377779, 0.791781, 0.377939], + [0.386433, 0.794644, 0.372886], + [0.395174, 0.797475, 0.367757], + [0.404001, 0.800275, 0.362552], + [0.412913, 0.803041, 0.357269], + [0.421908, 0.805774, 0.351910], + [0.430983, 0.808473, 0.346476], + [0.440137, 0.811138, 0.340967], + [0.449368, 0.813768, 0.335384], + [0.458674, 0.816363, 0.329727], + [0.468053, 0.818921, 0.323998], + [0.477504, 0.821444, 0.318195], + [0.487026, 0.823929, 0.312321], + [0.496615, 0.826376, 0.306377], + [0.506271, 0.828786, 0.300362], + [0.515992, 0.831158, 0.294279], + [0.525776, 0.833491, 0.288127], + [0.535621, 0.835785, 0.281908], + [0.545524, 0.838039, 0.275626], + [0.555484, 0.840254, 0.269281], + [0.565498, 0.842430, 0.262877], + [0.575563, 0.844566, 0.256415], + [0.585678, 0.846661, 0.249897], + [0.595839, 0.848717, 0.243329], + [0.606045, 0.850733, 0.236712], + [0.616293, 0.852709, 0.230052], + [0.626579, 0.854645, 0.223353], + [0.636902, 0.856542, 0.216620], + [0.647257, 0.858400, 0.209861], + [0.657642, 0.860219, 0.203082], + [0.668054, 0.861999, 0.196293], + [0.678489, 0.863742, 0.189503], + [0.688944, 0.865448, 0.182725], + [0.699415, 0.867117, 0.175971], + [0.709898, 0.868751, 0.169257], + [0.720391, 0.870350, 0.162603], + [0.730889, 0.871916, 0.156029], + [0.741388, 0.873449, 0.149561], + [0.751884, 0.874951, 0.143228], + [0.762373, 0.876424, 0.137064], + [0.772852, 0.877868, 0.131109], + [0.783315, 0.879285, 0.125405], + [0.793760, 0.880678, 0.120005], + [0.804182, 0.882046, 0.114965], + [0.814576, 0.883393, 0.110347], + [0.824940, 0.884720, 0.106217], + [0.835270, 0.886029, 0.102646], + [0.845561, 0.887322, 0.099702], + [0.855810, 0.888601, 0.097452], + [0.866013, 0.889868, 0.095953], + [0.876168, 0.891125, 0.095250], + [0.886271, 0.892374, 0.095374], + [0.896320, 0.893616, 0.096335], + [0.906311, 0.894855, 0.098125], + [0.916242, 0.896091, 0.100717], + [0.926106, 0.897330, 0.104071], + [0.935904, 0.898570, 0.108131], + [0.945636, 0.899815, 0.112838], + [0.955300, 0.901065, 0.118128], + [0.964894, 0.902323, 0.123941], + [0.974417, 0.903590, 0.130215], + [0.983868, 0.904867, 0.136897], + [0.993248, 0.906157, 0.143936]] + +_cividis_data = [[0.000000, 0.135112, 0.304751], + [0.000000, 0.138068, 0.311105], + [0.000000, 0.141013, 0.317579], + [0.000000, 0.143951, 0.323982], + [0.000000, 0.146877, 0.330479], + [0.000000, 0.149791, 0.337065], + [0.000000, 0.152673, 0.343704], + [0.000000, 0.155377, 0.350500], + [0.000000, 0.157932, 0.357521], + [0.000000, 0.160495, 0.364534], + [0.000000, 0.163058, 0.371608], + [0.000000, 0.165621, 0.378769], + [0.000000, 0.168204, 0.385902], + [0.000000, 0.170800, 0.393100], + [0.000000, 0.173420, 0.400353], + [0.000000, 0.176082, 0.407577], + [0.000000, 0.178802, 0.414764], + [0.000000, 0.181610, 0.421859], + [0.000000, 0.184550, 0.428802], + [0.000000, 0.186915, 0.435532], + [0.000000, 0.188769, 0.439563], + [0.000000, 0.190950, 0.441085], + [0.000000, 0.193366, 0.441561], + [0.003602, 0.195911, 0.441564], + [0.017852, 0.198528, 0.441248], + [0.032110, 0.201199, 0.440785], + [0.046205, 0.203903, 0.440196], + [0.058378, 0.206629, 0.439531], + [0.068968, 0.209372, 0.438863], + [0.078624, 0.212122, 0.438105], + [0.087465, 0.214879, 0.437342], + [0.095645, 0.217643, 0.436593], + [0.103401, 0.220406, 0.435790], + [0.110658, 0.223170, 0.435067], + [0.117612, 0.225935, 0.434308], + [0.124291, 0.228697, 0.433547], + [0.130669, 0.231458, 0.432840], + [0.136830, 0.234216, 0.432148], + [0.142852, 0.236972, 0.431404], + [0.148638, 0.239724, 0.430752], + [0.154261, 0.242475, 0.430120], + [0.159733, 0.245221, 0.429528], + [0.165113, 0.247965, 0.428908], + [0.170362, 0.250707, 0.428325], + [0.175490, 0.253444, 0.427790], + [0.180503, 0.256180, 0.427299], + [0.185453, 0.258914, 0.426788], + [0.190303, 0.261644, 0.426329], + [0.195057, 0.264372, 0.425924], + [0.199764, 0.267099, 0.425497], + [0.204385, 0.269823, 0.425126], + [0.208926, 0.272546, 0.424809], + [0.213431, 0.275266, 0.424480], + [0.217863, 0.277985, 0.424206], + [0.222264, 0.280702, 0.423914], + [0.226598, 0.283419, 0.423678], + [0.230871, 0.286134, 0.423498], + [0.235120, 0.288848, 0.423304], + [0.239312, 0.291562, 0.423167], + [0.243485, 0.294274, 0.423014], + [0.247605, 0.296986, 0.422917], + [0.251675, 0.299698, 0.422873], + [0.255731, 0.302409, 0.422814], + [0.259740, 0.305120, 0.422810], + [0.263738, 0.307831, 0.422789], + [0.267693, 0.310542, 0.422821], + [0.271639, 0.313253, 0.422837], + [0.275513, 0.315965, 0.422979], + [0.279411, 0.318677, 0.423031], + [0.283240, 0.321390, 0.423211], + [0.287065, 0.324103, 0.423373], + [0.290884, 0.326816, 0.423517], + [0.294669, 0.329531, 0.423716], + [0.298421, 0.332247, 0.423973], + [0.302169, 0.334963, 0.424213], + [0.305886, 0.337681, 0.424512], + [0.309601, 0.340399, 0.424790], + [0.313287, 0.343120, 0.425120], + [0.316941, 0.345842, 0.425512], + [0.320595, 0.348565, 0.425889], + [0.324250, 0.351289, 0.426250], + [0.327875, 0.354016, 0.426670], + [0.331474, 0.356744, 0.427144], + [0.335073, 0.359474, 0.427605], + [0.338673, 0.362206, 0.428053], + [0.342246, 0.364939, 0.428559], + [0.345793, 0.367676, 0.429127], + [0.349341, 0.370414, 0.429685], + [0.352892, 0.373153, 0.430226], + [0.356418, 0.375896, 0.430823], + [0.359916, 0.378641, 0.431501], + [0.363446, 0.381388, 0.432075], + [0.366923, 0.384139, 0.432796], + [0.370430, 0.386890, 0.433428], + [0.373884, 0.389646, 0.434209], + [0.377371, 0.392404, 0.434890], + [0.380830, 0.395164, 0.435653], + [0.384268, 0.397928, 0.436475], + [0.387705, 0.400694, 0.437305], + [0.391151, 0.403464, 0.438096], + [0.394568, 0.406236, 0.438986], + [0.397991, 0.409011, 0.439848], + [0.401418, 0.411790, 0.440708], + [0.404820, 0.414572, 0.441642], + [0.408226, 0.417357, 0.442570], + [0.411607, 0.420145, 0.443577], + [0.414992, 0.422937, 0.444578], + [0.418383, 0.425733, 0.445560], + [0.421748, 0.428531, 0.446640], + [0.425120, 0.431334, 0.447692], + [0.428462, 0.434140, 0.448864], + [0.431817, 0.436950, 0.449982], + [0.435168, 0.439763, 0.451134], + [0.438504, 0.442580, 0.452341], + [0.441810, 0.445402, 0.453659], + [0.445148, 0.448226, 0.454885], + [0.448447, 0.451053, 0.456264], + [0.451759, 0.453887, 0.457582], + [0.455072, 0.456718, 0.458976], + [0.458366, 0.459552, 0.460457], + [0.461616, 0.462405, 0.461969], + [0.464947, 0.465241, 0.463395], + [0.468254, 0.468083, 0.464908], + [0.471501, 0.470960, 0.466357], + [0.474812, 0.473832, 0.467681], + [0.478186, 0.476699, 0.468845], + [0.481622, 0.479573, 0.469767], + [0.485141, 0.482451, 0.470384], + [0.488697, 0.485318, 0.471008], + [0.492278, 0.488198, 0.471453], + [0.495913, 0.491076, 0.471751], + [0.499552, 0.493960, 0.472032], + [0.503185, 0.496851, 0.472305], + [0.506866, 0.499743, 0.472432], + [0.510540, 0.502643, 0.472550], + [0.514226, 0.505546, 0.472640], + [0.517920, 0.508454, 0.472707], + [0.521643, 0.511367, 0.472639], + [0.525348, 0.514285, 0.472660], + [0.529086, 0.517207, 0.472543], + [0.532829, 0.520135, 0.472401], + [0.536553, 0.523067, 0.472352], + [0.540307, 0.526005, 0.472163], + [0.544069, 0.528948, 0.471947], + [0.547840, 0.531895, 0.471704], + [0.551612, 0.534849, 0.471439], + [0.555393, 0.537807, 0.471147], + [0.559181, 0.540771, 0.470829], + [0.562972, 0.543741, 0.470488], + [0.566802, 0.546715, 0.469988], + [0.570607, 0.549695, 0.469593], + [0.574417, 0.552682, 0.469172], + [0.578236, 0.555673, 0.468724], + [0.582087, 0.558670, 0.468118], + [0.585916, 0.561674, 0.467618], + [0.589753, 0.564682, 0.467090], + [0.593622, 0.567697, 0.466401], + [0.597469, 0.570718, 0.465821], + [0.601354, 0.573743, 0.465074], + [0.605211, 0.576777, 0.464441], + [0.609105, 0.579816, 0.463638], + [0.612977, 0.582861, 0.462950], + [0.616852, 0.585913, 0.462237], + [0.620765, 0.588970, 0.461351], + [0.624654, 0.592034, 0.460583], + [0.628576, 0.595104, 0.459641], + [0.632506, 0.598180, 0.458668], + [0.636412, 0.601264, 0.457818], + [0.640352, 0.604354, 0.456791], + [0.644270, 0.607450, 0.455886], + [0.648222, 0.610553, 0.454801], + [0.652178, 0.613664, 0.453689], + [0.656114, 0.616780, 0.452702], + [0.660082, 0.619904, 0.451534], + [0.664055, 0.623034, 0.450338], + [0.668008, 0.626171, 0.449270], + [0.671991, 0.629316, 0.448018], + [0.675981, 0.632468, 0.446736], + [0.679979, 0.635626, 0.445424], + [0.683950, 0.638793, 0.444251], + [0.687957, 0.641966, 0.442886], + [0.691971, 0.645145, 0.441491], + [0.695985, 0.648334, 0.440072], + [0.700008, 0.651529, 0.438624], + [0.704037, 0.654731, 0.437147], + [0.708067, 0.657942, 0.435647], + [0.712105, 0.661160, 0.434117], + [0.716177, 0.664384, 0.432386], + [0.720222, 0.667618, 0.430805], + [0.724274, 0.670859, 0.429194], + [0.728334, 0.674107, 0.427554], + [0.732422, 0.677364, 0.425717], + [0.736488, 0.680629, 0.424028], + [0.740589, 0.683900, 0.422131], + [0.744664, 0.687181, 0.420393], + [0.748772, 0.690470, 0.418448], + [0.752886, 0.693766, 0.416472], + [0.756975, 0.697071, 0.414659], + [0.761096, 0.700384, 0.412638], + [0.765223, 0.703705, 0.410587], + [0.769353, 0.707035, 0.408516], + [0.773486, 0.710373, 0.406422], + [0.777651, 0.713719, 0.404112], + [0.781795, 0.717074, 0.401966], + [0.785965, 0.720438, 0.399613], + [0.790116, 0.723810, 0.397423], + [0.794298, 0.727190, 0.395016], + [0.798480, 0.730580, 0.392597], + [0.802667, 0.733978, 0.390153], + [0.806859, 0.737385, 0.387684], + [0.811054, 0.740801, 0.385198], + [0.815274, 0.744226, 0.382504], + [0.819499, 0.747659, 0.379785], + [0.823729, 0.751101, 0.377043], + [0.827959, 0.754553, 0.374292], + [0.832192, 0.758014, 0.371529], + [0.836429, 0.761483, 0.368747], + [0.840693, 0.764962, 0.365746], + [0.844957, 0.768450, 0.362741], + [0.849223, 0.771947, 0.359729], + [0.853515, 0.775454, 0.356500], + [0.857809, 0.778969, 0.353259], + [0.862105, 0.782494, 0.350011], + [0.866421, 0.786028, 0.346571], + [0.870717, 0.789572, 0.343333], + [0.875057, 0.793125, 0.339685], + [0.879378, 0.796687, 0.336241], + [0.883720, 0.800258, 0.332599], + [0.888081, 0.803839, 0.328770], + [0.892440, 0.807430, 0.324968], + [0.896818, 0.811030, 0.320982], + [0.901195, 0.814639, 0.317021], + [0.905589, 0.818257, 0.312889], + [0.910000, 0.821885, 0.308594], + [0.914407, 0.825522, 0.304348], + [0.918828, 0.829168, 0.299960], + [0.923279, 0.832822, 0.295244], + [0.927724, 0.836486, 0.290611], + [0.932180, 0.840159, 0.285880], + [0.936660, 0.843841, 0.280876], + [0.941147, 0.847530, 0.275815], + [0.945654, 0.851228, 0.270532], + [0.950178, 0.854933, 0.265085], + [0.954725, 0.858646, 0.259365], + [0.959284, 0.862365, 0.253563], + [0.963872, 0.866089, 0.247445], + [0.968469, 0.869819, 0.241310], + [0.973114, 0.873550, 0.234677], + [0.977780, 0.877281, 0.227954], + [0.982497, 0.881008, 0.220878], + [0.987293, 0.884718, 0.213336], + [0.992218, 0.888385, 0.205468], + [0.994847, 0.892954, 0.203445], + [0.995249, 0.898384, 0.207561], + [0.995503, 0.903866, 0.212370], + [0.995737, 0.909344, 0.217772]] + +_twilight_data = [ + [0.88575015840754434, 0.85000924943067835, 0.8879736506427196], + [0.88378520195539056, 0.85072940540310626, 0.88723222096949894], + [0.88172231059285788, 0.85127594077653468, 0.88638056925514819], + [0.8795410528270573, 0.85165675407495722, 0.8854143767924102], + [0.87724880858965482, 0.85187028338870274, 0.88434120381311432], + [0.87485347508575972, 0.85191526123023187, 0.88316926967613829], + [0.87233134085124076, 0.85180165478080894, 0.88189704355001619], + [0.86970474853509816, 0.85152403004797894, 0.88053883390003362], + [0.86696015505333579, 0.8510896085314068, 0.87909766977173343], + [0.86408985081463996, 0.85050391167507788, 0.87757925784892632], + [0.86110245436899846, 0.84976754857001258, 0.87599242923439569], + [0.85798259245670372, 0.84888934810281835, 0.87434038553446281], + [0.85472593189256985, 0.84787488124672816, 0.8726282980930582], + [0.85133714570857189, 0.84672735796116472, 0.87086081657350445], + [0.84780710702577922, 0.8454546229209523, 0.86904036783694438], + [0.8441261828674842, 0.84406482711037389, 0.86716973322690072], + [0.84030420805957784, 0.8425605950855084, 0.865250882410458], + [0.83634031809191178, 0.84094796518951942, 0.86328528001070159], + [0.83222705712934408, 0.83923490627754482, 0.86127563500427884], + [0.82796894316013536, 0.83742600751395202, 0.85922399451306786], + [0.82357429680252847, 0.83552487764795436, 0.85713191328514948], + [0.81904654677937527, 0.8335364929949034, 0.85500206287010105], + [0.81438982121143089, 0.83146558694197847, 0.85283759062147024], + [0.8095999819094809, 0.82931896673505456, 0.85064441601050367], + [0.80469164429814577, 0.82709838780560663, 0.84842449296974021], + [0.79967075421267997, 0.82480781812080928, 0.84618210029578533], + [0.79454305089231114, 0.82245116226304615, 0.84392184786827984], + [0.78931445564608915, 0.82003213188702007, 0.8416486380471222], + [0.78399101042764918, 0.81755426400533426, 0.83936747464036732], + [0.77857892008227592, 0.81502089378742548, 0.8370834463093898], + [0.77308416590170936, 0.81243524735466011, 0.83480172950579679], + [0.76751108504417864, 0.8098007598713145, 0.83252816638059668], + [0.76186907937980286, 0.80711949387647486, 0.830266486168872], + [0.75616443584381976, 0.80439408733477935, 0.82802138994719998], + [0.75040346765406696, 0.80162699008965321, 0.82579737851082424], + [0.74459247771890169, 0.79882047719583249, 0.82359867586156521], + [0.73873771700494939, 0.79597665735031009, 0.82142922780433014], + [0.73284543645523459, 0.79309746468844067, 0.81929263384230377], + [0.72692177512829703, 0.7901846863592763, 0.81719217466726379], + [0.72097280665536778, 0.78723995923452639, 0.81513073920879264], + [0.71500403076252128, 0.78426487091581187, 0.81311116559949914], + [0.70902078134539304, 0.78126088716070907, 0.81113591855117928], + [0.7030297722540817, 0.77822904973358131, 0.80920618848056969], + [0.6970365443886174, 0.77517050008066057, 0.80732335380063447], + [0.69104641009309098, 0.77208629460678091, 0.80548841690679074], + [0.68506446154395928, 0.7689774029354699, 0.80370206267176914], + [0.67909554499882152, 0.76584472131395898, 0.8019646617300199], + [0.67314422559426212, 0.76268908733890484, 0.80027628545809526], + [0.66721479803752815, 0.7595112803730375, 0.79863674654537764], + [0.6613112930078745, 0.75631202708719025, 0.7970456043491897], + [0.65543692326454717, 0.75309208756768431, 0.79550271129031047], + [0.64959573004253479, 0.74985201221941766, 0.79400674021499107], + [0.6437910831099849, 0.7465923800833657, 0.79255653201306053], + [0.63802586828545982, 0.74331376714033193, 0.79115100459573173], + [0.6323027138710603, 0.74001672160131404, 0.78978892762640429], + [0.62662402022604591, 0.73670175403699445, 0.78846901316334561], + [0.62099193064817548, 0.73336934798923203, 0.78718994624696581], + [0.61540846411770478, 0.73001995232739691, 0.78595022706750484], + [0.60987543176093062, 0.72665398759758293, 0.78474835732694714], + [0.60439434200274855, 0.7232718614323369, 0.78358295593535587], + [0.5989665814482068, 0.71987394892246725, 0.78245259899346642], + [0.59359335696837223, 0.7164606049658685, 0.78135588237640097], + [0.58827579780555495, 0.71303214646458135, 0.78029141405636515], + [0.58301487036932409, 0.70958887676997473, 0.77925781820476592], + [0.5778116438998202, 0.70613106157153982, 0.77825345121025524], + [0.5726668948158774, 0.7026589535425779, 0.77727702680911992], + [0.56758117853861967, 0.69917279302646274, 0.77632748534275298], + [0.56255515357219343, 0.69567278381629649, 0.77540359142309845], + [0.55758940419605174, 0.69215911458254054, 0.7745041337932782], + [0.55268450589347129, 0.68863194515166382, 0.7736279426902245], + [0.54784098153018634, 0.68509142218509878, 0.77277386473440868], + [0.54305932424018233, 0.68153767253065878, 0.77194079697835083], + [0.53834015575176275, 0.67797081129095405, 0.77112734439057717], + [0.53368389147728401, 0.67439093705212727, 0.7703325054879735], + [0.529090861832473, 0.67079812302806219, 0.76955552292313134], + [0.52456151470593582, 0.66719242996142225, 0.76879541714230948], + [0.52009627392235558, 0.66357391434030388, 0.76805119403344102], + [0.5156955988596057, 0.65994260812897998, 0.76732191489596169], + [0.51135992541601927, 0.65629853981831865, 0.76660663780645333], + [0.50708969576451657, 0.65264172403146448, 0.76590445660835849], + [0.5028853540415561, 0.64897216734095264, 0.76521446718174913], + [0.49874733661356069, 0.6452898684900934, 0.76453578734180083], + [0.4946761847863938, 0.64159484119504429, 0.76386719002130909], + [0.49067224938561221, 0.63788704858847078, 0.76320812763163837], + [0.4867359599430568, 0.63416646251100506, 0.76255780085924041], + [0.4828677867260272, 0.6304330455306234, 0.76191537149895305], + [0.47906816236197386, 0.62668676251860134, 0.76128000375662419], + [0.47533752394906287, 0.62292757283835809, 0.76065085571817748], + [0.47167629518877091, 0.61915543242884641, 0.76002709227883047], + [0.46808490970531597, 0.61537028695790286, 0.75940789891092741], + [0.46456376716303932, 0.61157208822864151, 0.75879242623025811], + [0.46111326647023881, 0.607760777169989, 0.75817986436807139], + [0.45773377230160567, 0.60393630046586455, 0.75756936901859162], + [0.45442563977552913, 0.60009859503858665, 0.75696013660606487], + [0.45118918687617743, 0.59624762051353541, 0.75635120643246645], + [0.44802470933589172, 0.59238331452146575, 0.75574176474107924], + [0.44493246854215379, 0.5885055998308617, 0.7551311041857901], + [0.44191271766696399, 0.58461441100175571, 0.75451838884410671], + [0.43896563958048396, 0.58070969241098491, 0.75390276208285945], + [0.43609138958356369, 0.57679137998186081, 0.7532834105961016], + [0.43329008867358393, 0.57285941625606673, 0.75265946532566674], + [0.43056179073057571, 0.56891374572457176, 0.75203008099312696], + [0.42790652284925834, 0.5649543060909209, 0.75139443521914839], + [0.42532423665011354, 0.56098104959950301, 0.75075164989005116], + [0.42281485675772662, 0.55699392126996583, 0.75010086988227642], + [0.42037822361396326, 0.55299287158108168, 0.7494412559451894], + [0.41801414079233629, 0.54897785421888889, 0.74877193167001121], + [0.4157223260454232, 0.54494882715350401, 0.74809204459000522], + [0.41350245743314729, 0.54090574771098476, 0.74740073297543086], + [0.41135414697304568, 0.53684857765005933, 0.74669712855065784], + [0.4092768899914751, 0.53277730177130322, 0.74598030635707824], + [0.40727018694219069, 0.52869188011057411, 0.74524942637581271], + [0.40533343789303178, 0.52459228174983119, 0.74450365836708132], + [0.40346600333905397, 0.52047847653840029, 0.74374215223567086], + [0.40166714010896104, 0.51635044969688759, 0.7429640345324835], + [0.39993606933454834, 0.51220818143218516, 0.74216844571317986], + [0.3982719152586337, 0.50805166539276136, 0.74135450918099721], + [0.39667374905665609, 0.50388089053847973, 0.74052138580516735], + [0.39514058808207631, 0.49969585326377758, 0.73966820211715711], + [0.39367135736822567, 0.49549655777451179, 0.738794102296364], + [0.39226494876209317, 0.49128300332899261, 0.73789824784475078], + [0.39092017571994903, 0.48705520251223039, 0.73697977133881254], + [0.38963580160340855, 0.48281316715123496, 0.73603782546932739], + [0.38841053300842432, 0.47855691131792805, 0.73507157641157261], + [0.38724301459330251, 0.47428645933635388, 0.73408016787854391], + [0.38613184178892102, 0.4700018340988123, 0.7330627749243106], + [0.38507556793651387, 0.46570306719930193, 0.73201854033690505], + [0.38407269378943537, 0.46139018782416635, 0.73094665432902683], + [0.38312168084402748, 0.45706323581407199, 0.72984626791353258], + [0.38222094988570376, 0.45272225034283325, 0.72871656144003782], + [0.38136887930454161, 0.44836727669277859, 0.72755671317141346], + [0.38056380696565623, 0.44399837208633719, 0.72636587045135315], + [0.37980403744848751, 0.43961558821222629, 0.72514323778761092], + [0.37908789283110761, 0.43521897612544935, 0.72388798691323131], + [0.378413635091359, 0.43080859411413064, 0.72259931993061044], + [0.37777949753513729, 0.4263845142616835, 0.72127639993530235], + [0.37718371844251231, 0.42194680223454828, 0.71991841524475775], + [0.37662448930806297, 0.41749553747893614, 0.71852454736176108], + [0.37610001286385814, 0.41303079952477062, 0.71709396919920232], + [0.37560846919442398, 0.40855267638072096, 0.71562585091587549], + [0.37514802505380473, 0.4040612609993941, 0.7141193695725726], + [0.37471686019302231, 0.3995566498711684, 0.71257368516500463], + [0.37431313199312338, 0.39503894828283309, 0.71098796522377461], + [0.37393499330475782, 0.39050827529375831, 0.70936134293478448], + [0.3735806215098284, 0.38596474386057539, 0.70769297607310577], + [0.37324816143326384, 0.38140848555753937, 0.70598200974806036], + [0.37293578646665032, 0.37683963835219841, 0.70422755780589941], + [0.37264166757849604, 0.37225835004836849, 0.7024287314570723], + [0.37236397858465387, 0.36766477862108266, 0.70058463496520773], + [0.37210089702443822, 0.36305909736982378, 0.69869434615073722], + [0.3718506155898596, 0.35844148285875221, 0.69675695810256544], + [0.37161133234400479, 0.3538121372967869, 0.69477149919380887], + [0.37138124223736607, 0.34917126878479027, 0.69273703471928827], + [0.37115856636209105, 0.34451911410230168, 0.69065253586464992], + [0.37094151551337329, 0.33985591488818123, 0.68851703379505125], + [0.37072833279422668, 0.33518193808489577, 0.68632948169606767], + [0.37051738634484427, 0.33049741244307851, 0.68408888788857214], + [0.37030682071842685, 0.32580269697872455, 0.68179411684486679], + [0.37009487130772695, 0.3210981375964933, 0.67944405399056851], + [0.36987980329025361, 0.31638410101153364, 0.67703755438090574], + [0.36965987626565955, 0.31166098762951971, 0.67457344743419545], + [0.36943334591276228, 0.30692923551862339, 0.67205052849120617], + [0.36919847837592484, 0.30218932176507068, 0.66946754331614522], + [0.36895355306596778, 0.29744175492366276, 0.66682322089824264], + [0.36869682231895268, 0.29268709856150099, 0.66411625298236909], + [0.36842655638020444, 0.28792596437778462, 0.66134526910944602], + [0.36814101479899719, 0.28315901221182987, 0.65850888806972308], + [0.36783843696531082, 0.27838697181297761, 0.65560566838453704], + [0.36751707094367697, 0.27361063317090978, 0.65263411711618635], + [0.36717513650699446, 0.26883085667326956, 0.64959272297892245], + [0.36681085540107988, 0.26404857724525643, 0.64647991652908243], + [0.36642243251550632, 0.25926481158628106, 0.64329409140765537], + [0.36600853966739794, 0.25448043878086224, 0.64003361803368586], + [0.36556698373538982, 0.24969683475296395, 0.63669675187488584], + [0.36509579845886808, 0.24491536803550484, 0.63328173520055586], + [0.36459308890125008, 0.24013747024823828, 0.62978680155026101], + [0.36405693022088509, 0.23536470386204195, 0.62621013451953023], + [0.36348537610385145, 0.23059876218396419, 0.62254988622392882], + [0.36287643560041027, 0.22584149293287031, 0.61880417410823019], + [0.36222809558295926, 0.22109488427338303, 0.61497112346096128], + [0.36153829010998356, 0.21636111429594002, 0.61104880679640927], + [0.36080493826624654, 0.21164251793458128, 0.60703532172064711], + [0.36002681809096376, 0.20694122817889948, 0.60292845431916875], + [0.35920088560930186, 0.20226037920758122, 0.5987265295935138], + [0.35832489966617809, 0.197602942459778, 0.59442768517501066], + [0.35739663292915563, 0.19297208197842461, 0.59003011251063131], + [0.35641381143126327, 0.18837119869242164, 0.5855320765920552], + [0.35537415306906722, 0.18380392577704466, 0.58093191431832802], + [0.35427534960663759, 0.17927413271618647, 0.57622809660668717], + [0.35311574421123737, 0.17478570377561287, 0.57141871523555288], + [0.35189248608873791, 0.17034320478524959, 0.56650284911216653], + [0.35060304441931012, 0.16595129984720861, 0.56147964703993225], + [0.34924513554955644, 0.16161477763045118, 0.55634837474163779], + [0.34781653238777782, 0.15733863511152979, 0.55110853452703257], + [0.34631507175793091, 0.15312802296627787, 0.5457599924248665], + [0.34473901574536375, 0.14898820589826409, 0.54030245920406539], + [0.34308600291572294, 0.14492465359918028, 0.53473704282067103], + [0.34135411074506483, 0.1409427920655632, 0.52906500940336754], + [0.33954168752669694, 0.13704801896718169, 0.52328797535085236], + [0.33764732090671112, 0.13324562282438077, 0.51740807573979475], + [0.33566978565015315, 0.12954074251271822, 0.51142807215168951], + [0.33360804901486002, 0.12593818301005921, 0.50535164796654897], + [0.33146154891145124, 0.12244245263391232, 0.49918274588431072], + [0.32923005203231409, 0.11905764321981127, 0.49292595612342666], + [0.3269137124539796, 0.1157873496841953, 0.48658646495697461], + [0.32451307931207785, 0.11263459791730848, 0.48017007211645196], + [0.32202882276069322, 0.10960114111258401, 0.47368494725726878], + [0.31946262395497965, 0.10668879882392659, 0.46713728801395243], + [0.31681648089023501, 0.10389861387653518, 0.46053414662739794], + [0.31409278414755532, 0.10123077676403242, 0.45388335612058467], + [0.31129434479712365, 0.098684771934052201, 0.44719313715161618], + [0.30842444457210105, 0.096259385340577736, 0.44047194882050544], + [0.30548675819945936, 0.093952764840823738, 0.43372849999361113], + [0.30248536364574252, 0.091761187397303601, 0.42697404043749887], + [0.29942483960214772, 0.089682253716750038, 0.42021619665853854], + [0.29631000388905288, 0.087713250960463951, 0.41346259134143476], + [0.29314593096985248, 0.085850656889620708, 0.40672178082365834], + [0.28993792445176608, 0.08409078829085731, 0.40000214725256295], + [0.28669151388283165, 0.082429873848480689, 0.39331182532243375], + [0.28341239797185225, 0.080864153365499375, 0.38665868550105914], + [0.28010638576975472, 0.079389994802261526, 0.38005028528138707], + [0.27677939615815589, 0.078003941033788216, 0.37349382846504675], + [0.27343739342450812, 0.076702800237496066, 0.36699616136347685], + [0.27008637749114051, 0.075483675584275545, 0.36056376228111864], + [0.26673233211995284, 0.074344018028546205, 0.35420276066240958], + [0.26338121807151404, 0.073281657939897077, 0.34791888996380105], + [0.26003895187439957, 0.072294781043362205, 0.3417175669546984], + [0.25671191651083902, 0.071380106242082242, 0.33560648984600089], + [0.25340685873736807, 0.070533582926851829, 0.3295945757321303], + [0.25012845306199383, 0.069758206429106989, 0.32368100685760637], + [0.24688226237958999, 0.069053639449204451, 0.31786993834254956], + [0.24367372557466271, 0.068419855150922693, 0.31216524050888372], + [0.24050813332295939, 0.067857103814855602, 0.30657054493678321], + [0.23739062429054825, 0.067365888050555517, 0.30108922184065873], + [0.23433055727563878, 0.066935599661639394, 0.29574009929867601], + [0.23132955273021344, 0.066576186939090592, 0.29051361067988485], + [0.2283917709422868, 0.06628997924139618, 0.28541074411068496], + [0.22552164337737857, 0.066078173119395595, 0.28043398847505197], + [0.22272706739121817, 0.065933790675651943, 0.27559714652053702], + [0.22001251100779617, 0.065857918918907604, 0.27090279994325861], + [0.21737845072382705, 0.065859661233562045, 0.26634209349669508], + [0.21482843531473683, 0.065940385613778491, 0.26191675992376573], + [0.21237411048541005, 0.066085024661758446, 0.25765165093569542], + [0.21001214221188125, 0.066308573918947178, 0.2535289048041211], + [0.2077442377448806, 0.06661453200418091, 0.24954644291943817], + [0.20558051999470117, 0.066990462397868739, 0.24572497420147632], + [0.20352007949514977, 0.067444179612424215, 0.24205576625191821], + [0.20156133764129841, 0.067983271026200248, 0.23852974228695395], + [0.19971571438603364, 0.068592710553704722, 0.23517094067076993], + [0.19794834061899208, 0.069314066071660657, 0.23194647381302336], + [0.1960826032659409, 0.070321227242423623, 0.22874673279569585], + [0.19410351363791453, 0.071608304856891569, 0.22558727307410353], + [0.19199449184606268, 0.073182830649273306, 0.22243385243433622], + [0.18975853639094634, 0.075019861862143766, 0.2193005075652994], + [0.18739228342697645, 0.077102096899588329, 0.21618875376309582], + [0.18488035509396164, 0.079425730279723883, 0.21307651648984993], + [0.18774482037046955, 0.077251588468039312, 0.21387448578597812], + [0.19049578401722037, 0.075311278416787641, 0.2146562337112265], + [0.1931548636579131, 0.073606819040117955, 0.21542362939081539], + [0.19571853588267552, 0.072157781039602742, 0.21617499187076789], + [0.19819343656336558, 0.070974625252738788, 0.21690975060032436], + [0.20058760685133747, 0.070064576149984209, 0.21762721310371608], + [0.20290365333558247, 0.069435248580458964, 0.21833167885096033], + [0.20531725273301316, 0.068919592266397572, 0.21911516689288835], + [0.20785704662965598, 0.068484398797025281, 0.22000133917653536], + [0.21052882914958676, 0.06812195249816172, 0.22098759107715404], + [0.2133313859647627, 0.067830148426026665, 0.22207043213024291], + [0.21625279838647882, 0.067616330270516389, 0.22324568672294431], + [0.21930503925136402, 0.067465786362940039, 0.22451023616807558], + [0.22247308588973624, 0.067388214053092838, 0.22585960379408354], + [0.2257539681670791, 0.067382132300147474, 0.22728984778098055], + [0.22915620278592841, 0.067434730871152565, 0.22879681433956656], + [0.23266299920501882, 0.067557104388479783, 0.23037617493752832], + [0.23627495835774248, 0.06774359820987802, 0.23202360805926608], + [0.23999586188690308, 0.067985029964779953, 0.23373434258507808], + [0.24381149720247919, 0.068289851529011875, 0.23550427698321885], + [0.24772092990501099, 0.068653337909486523, 0.2373288009471749], + [0.25172899728289466, 0.069064630826035506, 0.23920260612763083], + [0.25582135547481771, 0.06953231029187984, 0.24112190491594204], + [0.25999463887892144, 0.070053855603861875, 0.24308218808684579], + [0.26425512207060942, 0.070616595622995437, 0.24507758869355967], + [0.26859095948172862, 0.071226716277922458, 0.24710443563450618], + [0.27299701518897301, 0.071883555446163511, 0.24915847093232929], + [0.27747150809142801, 0.072582969899254779, 0.25123493995942769], + [0.28201746297366942, 0.073315693214040967, 0.25332800295084507], + [0.28662309235899847, 0.074088460826808866, 0.25543478673717029], + [0.29128515387578635, 0.074899049847466703, 0.25755101595750435], + [0.2960004726065818, 0.075745336000958424, 0.25967245030364566], + [0.30077276812918691, 0.076617824336164764, 0.26179294097819672], + [0.30559226007249934, 0.077521963107537312, 0.26391006692119662], + [0.31045520848595526, 0.078456871676182177, 0.2660200572779356], + [0.31535870009205808, 0.079420997315243186, 0.26811904076941961], + [0.32029986557994061, 0.080412994737554838, 0.27020322893039511], + [0.32527888860401261, 0.081428390076546092, 0.27226772884656186], + [0.33029174471181438, 0.08246763389003825, 0.27430929404579435], + [0.33533353224455448, 0.083532434119003962, 0.27632534356790039], + [0.34040164359597463, 0.084622236191702671, 0.27831254595259397], + [0.34549355713871799, 0.085736654965126335, 0.28026769921081435], + [0.35060678246032478, 0.08687555176033529, 0.28218770540182386], + [0.35573889947341125, 0.088038974350243354, 0.2840695897279818], + [0.36088752387578377, 0.089227194362745205, 0.28591050458531014], + [0.36605031412464006, 0.090440685427697898, 0.2877077458811747], + [0.37122508431309342, 0.091679997480262732, 0.28945865397633169], + [0.3764103053221462, 0.092945198093777909, 0.29116024157313919], + [0.38160247377467543, 0.094238731263712183, 0.29281107506269488], + [0.38679939079544168, 0.09556181960083443, 0.29440901248173756], + [0.39199887556812907, 0.09691583650296684, 0.29595212005509081], + [0.39719876876325577, 0.098302320968278623, 0.29743856476285779], + [0.40239692379737496, 0.099722930314950553, 0.29886674369733968], + [0.40759120392688708, 0.10117945586419633, 0.30023519507728602], + [0.41277985630360303, 0.1026734006932461, 0.30154226437468967], + [0.41796105205173684, 0.10420644885760968, 0.30278652039631843], + [0.42313214269556043, 0.10578120994917611, 0.3039675809469457], + [0.42829101315789753, 0.1073997763055258, 0.30508479060294547], + [0.4334355841041439, 0.1090642347484701, 0.30613767928289148], + [0.43856378187931538, 0.11077667828375456, 0.30712600062348083], + [0.44367358645071275, 0.11253912421257944, 0.30804973095465449], + [0.44876299173174822, 0.11435355574622549, 0.30890905921943196], + [0.45383005086999889, 0.11622183788331528, 0.30970441249844921], + [0.45887288947308297, 0.11814571137706886, 0.31043636979038808], + [0.46389102840284874, 0.12012561256850712, 0.31110343446582983], + [0.46888111384598413, 0.12216445576414045, 0.31170911458932665], + [0.473841437035254, 0.12426354237989065, 0.31225470169927194], + [0.47877034239726296, 0.12642401401409453, 0.31274172735821959], + [0.48366628618847957, 0.12864679022013889, 0.31317188565991266], + [0.48852847371852987, 0.13093210934893723, 0.31354553695453014], + [0.49335504375145617, 0.13328091630401023, 0.31386561956734976], + [0.49814435462074153, 0.13569380302451714, 0.314135190862664], + [0.50289524974970612, 0.13817086581280427, 0.31435662153833671], + [0.50760681181053691, 0.14071192654913128, 0.31453200120082569], + [0.51227835105321762, 0.14331656120063752, 0.3146630922831542], + [0.51690848800544464, 0.14598463068714407, 0.31475407592280041], + [0.52149652863229956, 0.14871544765633712, 0.31480767954534428], + [0.52604189625477482, 0.15150818660835483, 0.31482653406646727], + [0.53054420489856446, 0.15436183633886777, 0.31481299789187128], + [0.5350027976174474, 0.15727540775107324, 0.31477085207396532], + [0.53941736649199057, 0.16024769309971934, 0.31470295028655965], + [0.54378771313608565, 0.16327738551419116, 0.31461204226295625], + [0.54811370033467621, 0.1663630904279047, 0.31450102990914708], + [0.55239521572711914, 0.16950338809328983, 0.31437291554615371], + [0.55663229034969341, 0.17269677158182117, 0.31423043195101424], + [0.56082499039117173, 0.17594170887918095, 0.31407639883970623], + [0.56497343529017696, 0.17923664950367169, 0.3139136046337036], + [0.56907784784011428, 0.18258004462335425, 0.31374440956796529], + [0.57313845754107873, 0.18597036007065024, 0.31357126868520002], + [0.57715550812992045, 0.18940601489760422, 0.31339704333572083], + [0.58112932761586555, 0.19288548904692518, 0.31322399394183942], + [0.58506024396466882, 0.19640737049066315, 0.31305401163732732], + [0.58894861935544707, 0.19997020971775276, 0.31288922211590126], + [0.59279480536520257, 0.20357251410079796, 0.31273234839304942], + [0.59659918109122367, 0.207212956082026, 0.31258523031121233], + [0.60036213010411577, 0.21089030138947745, 0.31244934410414688], + [0.60408401696732739, 0.21460331490206347, 0.31232652641170694], + [0.60776523994818654, 0.21835070166659282, 0.31221903291870201], + [0.6114062072731884, 0.22213124697023234, 0.31212881396435238], + [0.61500723236391375, 0.22594402043981826, 0.31205680685765741], + [0.61856865258877192, 0.22978799249179921, 0.31200463838728931], + [0.62209079821082613, 0.2336621873300741, 0.31197383273627388], + [0.62557416500434959, 0.23756535071152696, 0.31196698314912269], + [0.62901892016985872, 0.24149689191922535, 0.31198447195645718], + [0.63242534854210275, 0.24545598775548677, 0.31202765974624452], + [0.6357937104834237, 0.24944185818822678, 0.31209793953300591], + [0.6391243387840212, 0.25345365461983138, 0.31219689612063978], + [0.642417577481186, 0.257490519876798, 0.31232631707560987], + [0.64567349382645434, 0.26155203161615281, 0.31248673753935263], + [0.64889230169458245, 0.26563755336209077, 0.31267941819570189], + [0.65207417290277303, 0.26974650525236699, 0.31290560605819168], + [0.65521932609327127, 0.27387826652410152, 0.3131666792687211], + [0.6583280801134499, 0.27803210957665631, 0.3134643447952643], + [0.66140037532601781, 0.28220778870555907, 0.31379912926498488], + [0.66443632469878844, 0.28640483614256179, 0.31417223403606975], + [0.66743603766369131, 0.29062280081258873, 0.31458483752056837], + [0.67039959547676198, 0.29486126309253047, 0.31503813956872212], + [0.67332725564817331, 0.29911962764489264, 0.31553372323982209], + [0.67621897924409746, 0.30339762792450425, 0.3160724937230589], + [0.67907474028157344, 0.30769497879760166, 0.31665545668946665], + [0.68189457150944521, 0.31201133280550686, 0.31728380489244951], + [0.68467850942494535, 0.31634634821222207, 0.31795870784057567], + [0.68742656435169625, 0.32069970535138104, 0.31868137622277692], + [0.6901389321505248, 0.32507091815606004, 0.31945332332898302], + [0.69281544846764931, 0.32945984647042675, 0.3202754315314667], + [0.69545608346891119, 0.33386622163232865, 0.32114884306985791], + [0.6980608153581771, 0.33828976326048621, 0.32207478855218091], + [0.70062962477242097, 0.34273019305341756, 0.32305449047765694], + [0.70316249458814151, 0.34718723719597999, 0.32408913679491225], + [0.70565951122610093, 0.35166052978120937, 0.32518014084085567], + [0.70812059568420482, 0.35614985523380299, 0.32632861885644465], + [0.7105456546582587, 0.36065500290840113, 0.32753574162788762], + [0.71293466839773467, 0.36517570519856757, 0.3288027427038317], + [0.71528760614847287, 0.36971170225223449, 0.3301308728723546], + [0.71760444908133847, 0.37426272710686193, 0.33152138620958932], + [0.71988521490549851, 0.37882848839337313, 0.33297555200245399], + [0.7221299918421461, 0.38340864508963057, 0.33449469983585844], + [0.72433865647781592, 0.38800301593162145, 0.33607995965691828], + [0.72651122900227549, 0.3926113126792577, 0.3377325942005665], + [0.72864773856716547, 0.39723324476747235, 0.33945384341064017], + [0.73074820754845171, 0.401868526884681, 0.3412449533046818], + [0.73281270506268747, 0.4065168468778026, 0.34310715173410822], + [0.73484133598564938, 0.41117787004519513, 0.34504169470809071], + [0.73683422173585866, 0.41585125850290111, 0.34704978520758401], + [0.73879140024599266, 0.42053672992315327, 0.34913260148542435], + [0.74071301619506091, 0.4252339389526239, 0.35129130890802607], + [0.7425992159973317, 0.42994254036133867, 0.35352709245374592], + [0.74445018676570673, 0.43466217184617112, 0.35584108091122535], + [0.74626615789163442, 0.43939245044973502, 0.35823439142300639], + [0.74804739275559562, 0.44413297780351974, 0.36070813602540136], + [0.74979420547170472, 0.44888333481548809, 0.36326337558360278], + [0.75150685045891663, 0.45364314496866825, 0.36590112443835765], + [0.75318566369046569, 0.45841199172949604, 0.36862236642234769], + [0.75483105066959544, 0.46318942799460555, 0.3714280448394211], + [0.75644341577140706, 0.46797501437948458, 0.37431909037543515], + [0.75802325538455839, 0.4727682731566229, 0.37729635531096678], + [0.75957111105340058, 0.47756871222057079, 0.380360657784311], + [0.7610876378057071, 0.48237579130289127, 0.38351275723852291], + [0.76257333554052609, 0.48718906673415824, 0.38675335037837993], + [0.76402885609288662, 0.49200802533379656, 0.39008308392311997], + [0.76545492593330511, 0.49683212909727231, 0.39350254000115381], + [0.76685228950643891, 0.5016608471009063, 0.39701221751773474], + [0.76822176599735303, 0.50649362371287909, 0.40061257089416885], + [0.7695642334401418, 0.5113298901696085, 0.40430398069682483], + [0.77088091962302474, 0.51616892643469103, 0.40808667584648967], + [0.77217257229605551, 0.5210102658711383, 0.41196089987122869], + [0.77344021829889886, 0.52585332093451564, 0.41592679539764366], + [0.77468494746063199, 0.53069749384776732, 0.41998440356963762], + [0.77590790730685699, 0.53554217882461186, 0.42413367909988375], + [0.7771103295521099, 0.54038674910561235, 0.42837450371258479], + [0.77829345807633121, 0.54523059488426595, 0.432706647838971], + [0.77945862731506643, 0.55007308413977274, 0.43712979856444761], + [0.78060774749483774, 0.55491335744890613, 0.44164332426364639], + [0.78174180478981836, 0.55975098052594863, 0.44624687186865436], + [0.78286225264440912, 0.56458533111166875, 0.45093985823706345], + [0.78397060836414478, 0.56941578326710418, 0.45572154742892063], + [0.78506845019606841, 0.5742417003617839, 0.46059116206904965], + [0.78615737132332963, 0.5790624629815756, 0.46554778281918402], + [0.78723904108188347, 0.58387743744557208, 0.47059039582133383], + [0.78831514045623963, 0.58868600173562435, 0.47571791879076081], + [0.78938737766251943, 0.5934875421745599, 0.48092913815357724], + [0.79045776847727878, 0.59828134277062461, 0.48622257801969754], + [0.79152832843475607, 0.60306670593147205, 0.49159667021646397], + [0.79260034304237448, 0.60784322087037024, 0.49705020621532009], + [0.79367559698664958, 0.61261029334072192, 0.50258161291269432], + [0.79475585972654039, 0.61736734400220705, 0.50818921213102985], + [0.79584292379583765, 0.62211378808451145, 0.51387124091909786], + [0.79693854719951607, 0.62684905679296699, 0.5196258425240281], + [0.79804447815136637, 0.63157258225089552, 0.52545108144834785], + [0.7991624518501963, 0.63628379372029187, 0.53134495942561433], + [0.80029415389753977, 0.64098213306749863, 0.53730535185141037], + [0.80144124292560048, 0.64566703459218766, 0.5433300863249918], + [0.80260531146112946, 0.65033793748103852, 0.54941691584603647], + [0.80378792531077625, 0.65499426549472628, 0.55556350867083815], + [0.80499054790810298, 0.65963545027564163, 0.56176745110546977], + [0.80621460526927058, 0.66426089585282289, 0.56802629178649788], + [0.8074614045096935, 0.6688700095398864, 0.57433746373459582], + [0.80873219170089694, 0.67346216702194517, 0.58069834805576737], + [0.81002809466520687, 0.67803672673971815, 0.58710626908082753], + [0.81135014011763329, 0.68259301546243389, 0.59355848909050757], + [0.81269922039881493, 0.68713033714618876, 0.60005214820435104], + [0.81407611046993344, 0.69164794791482131, 0.6065843782630862], + [0.81548146627279483, 0.69614505508308089, 0.61315221209322646], + [0.81691575775055891, 0.70062083014783982, 0.61975260637257923], + [0.81837931164498223, 0.70507438189635097, 0.62638245478933297], + [0.81987230650455289, 0.70950474978787481, 0.63303857040067113], + [0.8213947205565636, 0.7139109141951604, 0.63971766697672761], + [0.82294635110428427, 0.71829177331290062, 0.6464164243818421], + [0.8245268129450285, 0.72264614312088882, 0.65313137915422603], + [0.82613549710580259, 0.72697275518238258, 0.65985900156216504], + [0.8277716072353446, 0.73127023324078089, 0.66659570204682972], + [0.82943407816481474, 0.7355371221572935, 0.67333772009301907], + [0.83112163529096306, 0.73977184647638616, 0.68008125203631464], + [0.83283277185777982, 0.74397271817459876, 0.68682235874648545], + [0.8345656905566583, 0.7481379479992134, 0.69355697649863846], + [0.83631898844737929, 0.75226548952875261, 0.70027999028864962], + [0.83809123476131964, 0.75635314860808633, 0.70698561390212977], + [0.83987839884120874, 0.76039907199779677, 0.71367147811129228], + [0.84167750766845151, 0.76440101200982946, 0.72033299387284622], + [0.84348529222933699, 0.76835660399870176, 0.72696536998972039], + [0.84529810731955113, 0.77226338601044719, 0.73356368240541492], + [0.84711195507965098, 0.77611880236047159, 0.74012275762807056], + [0.84892245563117641, 0.77992021407650147, 0.74663719293664366], + [0.85072697023178789, 0.78366457342383888, 0.7530974636118285], + [0.85251907207708444, 0.78734936133548439, 0.7594994148789691], + [0.85429219611470464, 0.79097196777091994, 0.76583801477914104], + [0.85604022314725403, 0.79452963601550608, 0.77210610037674143], + [0.85775662943504905, 0.79801963142713928, 0.77829571667247499], + [0.8594346370300241, 0.8014392309950078, 0.78439788751383921], + [0.86107117027565516, 0.80478517909812231, 0.79039529663736285], + [0.86265601051127572, 0.80805523804261525, 0.796282666437655], + [0.86418343723941027, 0.81124644224653542, 0.80204612696863953], + [0.86564934325605325, 0.81435544067514909, 0.80766972324164554], + [0.86705314907048503, 0.81737804041911244, 0.81313419626911398], + [0.86839954695818633, 0.82030875512181523, 0.81841638963128993], + [0.86969131502613806, 0.82314158859569164, 0.82350476683173168], + [0.87093846717297507, 0.82586857889438514, 0.82838497261149613], + [0.87215331978454325, 0.82848052823709672, 0.8330486712880828], + [0.87335171360916275, 0.83096715251272624, 0.83748851001197089], + [0.87453793320260187, 0.83331972948645461, 0.84171925358069011], + [0.87571458709961403, 0.8355302318472394, 0.84575537519027078], + [0.87687848451614692, 0.83759238071186537, 0.84961373549150254], + [0.87802298436649007, 0.83950165618540074, 0.85330645352458923], + [0.87913244240792765, 0.84125554884475906, 0.85685572291039636], + [0.88019293315695812, 0.84285224824778615, 0.86027399927156634], + [0.88119169871341951, 0.84429066717717349, 0.86356595168669881], + [0.88211542489401606, 0.84557007254559347, 0.86673765046233331], + [0.88295168595448525, 0.84668970275699273, 0.86979617048190971], + [0.88369127145898041, 0.84764891761519268, 0.87274147101441557], + [0.88432713054113543, 0.84844741572055415, 0.87556785228242973], + [0.88485138159908572, 0.84908426422893801, 0.87828235285372469], + [0.88525897972630474, 0.84955892810989209, 0.88088414794024839], + [0.88554714811952384, 0.84987174283631584, 0.88336206121170946], + [0.88571155122845646, 0.85002186115856315, 0.88572538990087124]] + +_twilight_shifted_data = (_twilight_data[len(_twilight_data)//2:] + + _twilight_data[:len(_twilight_data)//2]) +_twilight_shifted_data.reverse() +_turbo_data = [[0.18995, 0.07176, 0.23217], + [0.19483, 0.08339, 0.26149], + [0.19956, 0.09498, 0.29024], + [0.20415, 0.10652, 0.31844], + [0.20860, 0.11802, 0.34607], + [0.21291, 0.12947, 0.37314], + [0.21708, 0.14087, 0.39964], + [0.22111, 0.15223, 0.42558], + [0.22500, 0.16354, 0.45096], + [0.22875, 0.17481, 0.47578], + [0.23236, 0.18603, 0.50004], + [0.23582, 0.19720, 0.52373], + [0.23915, 0.20833, 0.54686], + [0.24234, 0.21941, 0.56942], + [0.24539, 0.23044, 0.59142], + [0.24830, 0.24143, 0.61286], + [0.25107, 0.25237, 0.63374], + [0.25369, 0.26327, 0.65406], + [0.25618, 0.27412, 0.67381], + [0.25853, 0.28492, 0.69300], + [0.26074, 0.29568, 0.71162], + [0.26280, 0.30639, 0.72968], + [0.26473, 0.31706, 0.74718], + [0.26652, 0.32768, 0.76412], + [0.26816, 0.33825, 0.78050], + [0.26967, 0.34878, 0.79631], + [0.27103, 0.35926, 0.81156], + [0.27226, 0.36970, 0.82624], + [0.27334, 0.38008, 0.84037], + [0.27429, 0.39043, 0.85393], + [0.27509, 0.40072, 0.86692], + [0.27576, 0.41097, 0.87936], + [0.27628, 0.42118, 0.89123], + [0.27667, 0.43134, 0.90254], + [0.27691, 0.44145, 0.91328], + [0.27701, 0.45152, 0.92347], + [0.27698, 0.46153, 0.93309], + [0.27680, 0.47151, 0.94214], + [0.27648, 0.48144, 0.95064], + [0.27603, 0.49132, 0.95857], + [0.27543, 0.50115, 0.96594], + [0.27469, 0.51094, 0.97275], + [0.27381, 0.52069, 0.97899], + [0.27273, 0.53040, 0.98461], + [0.27106, 0.54015, 0.98930], + [0.26878, 0.54995, 0.99303], + [0.26592, 0.55979, 0.99583], + [0.26252, 0.56967, 0.99773], + [0.25862, 0.57958, 0.99876], + [0.25425, 0.58950, 0.99896], + [0.24946, 0.59943, 0.99835], + [0.24427, 0.60937, 0.99697], + [0.23874, 0.61931, 0.99485], + [0.23288, 0.62923, 0.99202], + [0.22676, 0.63913, 0.98851], + [0.22039, 0.64901, 0.98436], + [0.21382, 0.65886, 0.97959], + [0.20708, 0.66866, 0.97423], + [0.20021, 0.67842, 0.96833], + [0.19326, 0.68812, 0.96190], + [0.18625, 0.69775, 0.95498], + [0.17923, 0.70732, 0.94761], + [0.17223, 0.71680, 0.93981], + [0.16529, 0.72620, 0.93161], + [0.15844, 0.73551, 0.92305], + [0.15173, 0.74472, 0.91416], + [0.14519, 0.75381, 0.90496], + [0.13886, 0.76279, 0.89550], + [0.13278, 0.77165, 0.88580], + [0.12698, 0.78037, 0.87590], + [0.12151, 0.78896, 0.86581], + [0.11639, 0.79740, 0.85559], + [0.11167, 0.80569, 0.84525], + [0.10738, 0.81381, 0.83484], + [0.10357, 0.82177, 0.82437], + [0.10026, 0.82955, 0.81389], + [0.09750, 0.83714, 0.80342], + [0.09532, 0.84455, 0.79299], + [0.09377, 0.85175, 0.78264], + [0.09287, 0.85875, 0.77240], + [0.09267, 0.86554, 0.76230], + [0.09320, 0.87211, 0.75237], + [0.09451, 0.87844, 0.74265], + [0.09662, 0.88454, 0.73316], + [0.09958, 0.89040, 0.72393], + [0.10342, 0.89600, 0.71500], + [0.10815, 0.90142, 0.70599], + [0.11374, 0.90673, 0.69651], + [0.12014, 0.91193, 0.68660], + [0.12733, 0.91701, 0.67627], + [0.13526, 0.92197, 0.66556], + [0.14391, 0.92680, 0.65448], + [0.15323, 0.93151, 0.64308], + [0.16319, 0.93609, 0.63137], + [0.17377, 0.94053, 0.61938], + [0.18491, 0.94484, 0.60713], + [0.19659, 0.94901, 0.59466], + [0.20877, 0.95304, 0.58199], + [0.22142, 0.95692, 0.56914], + [0.23449, 0.96065, 0.55614], + [0.24797, 0.96423, 0.54303], + [0.26180, 0.96765, 0.52981], + [0.27597, 0.97092, 0.51653], + [0.29042, 0.97403, 0.50321], + [0.30513, 0.97697, 0.48987], + [0.32006, 0.97974, 0.47654], + [0.33517, 0.98234, 0.46325], + [0.35043, 0.98477, 0.45002], + [0.36581, 0.98702, 0.43688], + [0.38127, 0.98909, 0.42386], + [0.39678, 0.99098, 0.41098], + [0.41229, 0.99268, 0.39826], + [0.42778, 0.99419, 0.38575], + [0.44321, 0.99551, 0.37345], + [0.45854, 0.99663, 0.36140], + [0.47375, 0.99755, 0.34963], + [0.48879, 0.99828, 0.33816], + [0.50362, 0.99879, 0.32701], + [0.51822, 0.99910, 0.31622], + [0.53255, 0.99919, 0.30581], + [0.54658, 0.99907, 0.29581], + [0.56026, 0.99873, 0.28623], + [0.57357, 0.99817, 0.27712], + [0.58646, 0.99739, 0.26849], + [0.59891, 0.99638, 0.26038], + [0.61088, 0.99514, 0.25280], + [0.62233, 0.99366, 0.24579], + [0.63323, 0.99195, 0.23937], + [0.64362, 0.98999, 0.23356], + [0.65394, 0.98775, 0.22835], + [0.66428, 0.98524, 0.22370], + [0.67462, 0.98246, 0.21960], + [0.68494, 0.97941, 0.21602], + [0.69525, 0.97610, 0.21294], + [0.70553, 0.97255, 0.21032], + [0.71577, 0.96875, 0.20815], + [0.72596, 0.96470, 0.20640], + [0.73610, 0.96043, 0.20504], + [0.74617, 0.95593, 0.20406], + [0.75617, 0.95121, 0.20343], + [0.76608, 0.94627, 0.20311], + [0.77591, 0.94113, 0.20310], + [0.78563, 0.93579, 0.20336], + [0.79524, 0.93025, 0.20386], + [0.80473, 0.92452, 0.20459], + [0.81410, 0.91861, 0.20552], + [0.82333, 0.91253, 0.20663], + [0.83241, 0.90627, 0.20788], + [0.84133, 0.89986, 0.20926], + [0.85010, 0.89328, 0.21074], + [0.85868, 0.88655, 0.21230], + [0.86709, 0.87968, 0.21391], + [0.87530, 0.87267, 0.21555], + [0.88331, 0.86553, 0.21719], + [0.89112, 0.85826, 0.21880], + [0.89870, 0.85087, 0.22038], + [0.90605, 0.84337, 0.22188], + [0.91317, 0.83576, 0.22328], + [0.92004, 0.82806, 0.22456], + [0.92666, 0.82025, 0.22570], + [0.93301, 0.81236, 0.22667], + [0.93909, 0.80439, 0.22744], + [0.94489, 0.79634, 0.22800], + [0.95039, 0.78823, 0.22831], + [0.95560, 0.78005, 0.22836], + [0.96049, 0.77181, 0.22811], + [0.96507, 0.76352, 0.22754], + [0.96931, 0.75519, 0.22663], + [0.97323, 0.74682, 0.22536], + [0.97679, 0.73842, 0.22369], + [0.98000, 0.73000, 0.22161], + [0.98289, 0.72140, 0.21918], + [0.98549, 0.71250, 0.21650], + [0.98781, 0.70330, 0.21358], + [0.98986, 0.69382, 0.21043], + [0.99163, 0.68408, 0.20706], + [0.99314, 0.67408, 0.20348], + [0.99438, 0.66386, 0.19971], + [0.99535, 0.65341, 0.19577], + [0.99607, 0.64277, 0.19165], + [0.99654, 0.63193, 0.18738], + [0.99675, 0.62093, 0.18297], + [0.99672, 0.60977, 0.17842], + [0.99644, 0.59846, 0.17376], + [0.99593, 0.58703, 0.16899], + [0.99517, 0.57549, 0.16412], + [0.99419, 0.56386, 0.15918], + [0.99297, 0.55214, 0.15417], + [0.99153, 0.54036, 0.14910], + [0.98987, 0.52854, 0.14398], + [0.98799, 0.51667, 0.13883], + [0.98590, 0.50479, 0.13367], + [0.98360, 0.49291, 0.12849], + [0.98108, 0.48104, 0.12332], + [0.97837, 0.46920, 0.11817], + [0.97545, 0.45740, 0.11305], + [0.97234, 0.44565, 0.10797], + [0.96904, 0.43399, 0.10294], + [0.96555, 0.42241, 0.09798], + [0.96187, 0.41093, 0.09310], + [0.95801, 0.39958, 0.08831], + [0.95398, 0.38836, 0.08362], + [0.94977, 0.37729, 0.07905], + [0.94538, 0.36638, 0.07461], + [0.94084, 0.35566, 0.07031], + [0.93612, 0.34513, 0.06616], + [0.93125, 0.33482, 0.06218], + [0.92623, 0.32473, 0.05837], + [0.92105, 0.31489, 0.05475], + [0.91572, 0.30530, 0.05134], + [0.91024, 0.29599, 0.04814], + [0.90463, 0.28696, 0.04516], + [0.89888, 0.27824, 0.04243], + [0.89298, 0.26981, 0.03993], + [0.88691, 0.26152, 0.03753], + [0.88066, 0.25334, 0.03521], + [0.87422, 0.24526, 0.03297], + [0.86760, 0.23730, 0.03082], + [0.86079, 0.22945, 0.02875], + [0.85380, 0.22170, 0.02677], + [0.84662, 0.21407, 0.02487], + [0.83926, 0.20654, 0.02305], + [0.83172, 0.19912, 0.02131], + [0.82399, 0.19182, 0.01966], + [0.81608, 0.18462, 0.01809], + [0.80799, 0.17753, 0.01660], + [0.79971, 0.17055, 0.01520], + [0.79125, 0.16368, 0.01387], + [0.78260, 0.15693, 0.01264], + [0.77377, 0.15028, 0.01148], + [0.76476, 0.14374, 0.01041], + [0.75556, 0.13731, 0.00942], + [0.74617, 0.13098, 0.00851], + [0.73661, 0.12477, 0.00769], + [0.72686, 0.11867, 0.00695], + [0.71692, 0.11268, 0.00629], + [0.70680, 0.10680, 0.00571], + [0.69650, 0.10102, 0.00522], + [0.68602, 0.09536, 0.00481], + [0.67535, 0.08980, 0.00449], + [0.66449, 0.08436, 0.00424], + [0.65345, 0.07902, 0.00408], + [0.64223, 0.07380, 0.00401], + [0.63082, 0.06868, 0.00401], + [0.61923, 0.06367, 0.00410], + [0.60746, 0.05878, 0.00427], + [0.59550, 0.05399, 0.00453], + [0.58336, 0.04931, 0.00486], + [0.57103, 0.04474, 0.00529], + [0.55852, 0.04028, 0.00579], + [0.54583, 0.03593, 0.00638], + [0.53295, 0.03169, 0.00705], + [0.51989, 0.02756, 0.00780], + [0.50664, 0.02354, 0.00863], + [0.49321, 0.01963, 0.00955], + [0.47960, 0.01583, 0.01055]] + + +cmaps = { + name: ListedColormap(data, name=name) for name, data in [ + ('magma', _magma_data), + ('inferno', _inferno_data), + ('plasma', _plasma_data), + ('viridis', _viridis_data), + ('cividis', _cividis_data), + ('twilight', _twilight_data), + ('twilight_shifted', _twilight_shifted_data), + ('turbo', _turbo_data), + ]} diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_color_data.py b/.venv/lib/python3.9/site-packages/matplotlib/_color_data.py new file mode 100644 index 00000000..44f97adb --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_color_data.py @@ -0,0 +1,1141 @@ +BASE_COLORS = { + 'b': (0, 0, 1), # blue + 'g': (0, 0.5, 0), # green + 'r': (1, 0, 0), # red + 'c': (0, 0.75, 0.75), # cyan + 'm': (0.75, 0, 0.75), # magenta + 'y': (0.75, 0.75, 0), # yellow + 'k': (0, 0, 0), # black + 'w': (1, 1, 1), # white +} + + +# These colors are from Tableau +TABLEAU_COLORS = { + 'tab:blue': '#1f77b4', + 'tab:orange': '#ff7f0e', + 'tab:green': '#2ca02c', + 'tab:red': '#d62728', + 'tab:purple': '#9467bd', + 'tab:brown': '#8c564b', + 'tab:pink': '#e377c2', + 'tab:gray': '#7f7f7f', + 'tab:olive': '#bcbd22', + 'tab:cyan': '#17becf', +} + + +# This mapping of color names -> hex values is taken from +# a survey run by Randall Munroe see: +# https://blog.xkcd.com/2010/05/03/color-survey-results/ +# for more details. The results are hosted at +# https://xkcd.com/color/rgb/ +# and also available as a text file at +# https://xkcd.com/color/rgb.txt +# +# License: https://creativecommons.org/publicdomain/zero/1.0/ +XKCD_COLORS = { + 'cloudy blue': '#acc2d9', + 'dark pastel green': '#56ae57', + 'dust': '#b2996e', + 'electric lime': '#a8ff04', + 'fresh green': '#69d84f', + 'light eggplant': '#894585', + 'nasty green': '#70b23f', + 'really light blue': '#d4ffff', + 'tea': '#65ab7c', + 'warm purple': '#952e8f', + 'yellowish tan': '#fcfc81', + 'cement': '#a5a391', + 'dark grass green': '#388004', + 'dusty teal': '#4c9085', + 'grey teal': '#5e9b8a', + 'macaroni and cheese': '#efb435', + 'pinkish tan': '#d99b82', + 'spruce': '#0a5f38', + 'strong blue': '#0c06f7', + 'toxic green': '#61de2a', + 'windows blue': '#3778bf', + 'blue blue': '#2242c7', + 'blue with a hint of purple': '#533cc6', + 'booger': '#9bb53c', + 'bright sea green': '#05ffa6', + 'dark green blue': '#1f6357', + 'deep turquoise': '#017374', + 'green teal': '#0cb577', + 'strong pink': '#ff0789', + 'bland': '#afa88b', + 'deep aqua': '#08787f', + 'lavender pink': '#dd85d7', + 'light moss green': '#a6c875', + 'light seafoam green': '#a7ffb5', + 'olive yellow': '#c2b709', + 'pig pink': '#e78ea5', + 'deep lilac': '#966ebd', + 'desert': '#ccad60', + 'dusty lavender': '#ac86a8', + 'purpley grey': '#947e94', + 'purply': '#983fb2', + 'candy pink': '#ff63e9', + 'light pastel green': '#b2fba5', + 'boring green': '#63b365', + 'kiwi green': '#8ee53f', + 'light grey green': '#b7e1a1', + 'orange pink': '#ff6f52', + 'tea green': '#bdf8a3', + 'very light brown': '#d3b683', + 'egg shell': '#fffcc4', + 'eggplant purple': '#430541', + 'powder pink': '#ffb2d0', + 'reddish grey': '#997570', + 'baby shit brown': '#ad900d', + 'liliac': '#c48efd', + 'stormy blue': '#507b9c', + 'ugly brown': '#7d7103', + 'custard': '#fffd78', + 'darkish pink': '#da467d', + 'deep brown': '#410200', + 'greenish beige': '#c9d179', + 'manilla': '#fffa86', + 'off blue': '#5684ae', + 'battleship grey': '#6b7c85', + 'browny green': '#6f6c0a', + 'bruise': '#7e4071', + 'kelley green': '#009337', + 'sickly yellow': '#d0e429', + 'sunny yellow': '#fff917', + 'azul': '#1d5dec', + 'darkgreen': '#054907', + 'green/yellow': '#b5ce08', + 'lichen': '#8fb67b', + 'light light green': '#c8ffb0', + 'pale gold': '#fdde6c', + 'sun yellow': '#ffdf22', + 'tan green': '#a9be70', + 'burple': '#6832e3', + 'butterscotch': '#fdb147', + 'toupe': '#c7ac7d', + 'dark cream': '#fff39a', + 'indian red': '#850e04', + 'light lavendar': '#efc0fe', + 'poison green': '#40fd14', + 'baby puke green': '#b6c406', + 'bright yellow green': '#9dff00', + 'charcoal grey': '#3c4142', + 'squash': '#f2ab15', + 'cinnamon': '#ac4f06', + 'light pea green': '#c4fe82', + 'radioactive green': '#2cfa1f', + 'raw sienna': '#9a6200', + 'baby purple': '#ca9bf7', + 'cocoa': '#875f42', + 'light royal blue': '#3a2efe', + 'orangeish': '#fd8d49', + 'rust brown': '#8b3103', + 'sand brown': '#cba560', + 'swamp': '#698339', + 'tealish green': '#0cdc73', + 'burnt siena': '#b75203', + 'camo': '#7f8f4e', + 'dusk blue': '#26538d', + 'fern': '#63a950', + 'old rose': '#c87f89', + 'pale light green': '#b1fc99', + 'peachy pink': '#ff9a8a', + 'rosy pink': '#f6688e', + 'light bluish green': '#76fda8', + 'light bright green': '#53fe5c', + 'light neon green': '#4efd54', + 'light seafoam': '#a0febf', + 'tiffany blue': '#7bf2da', + 'washed out green': '#bcf5a6', + 'browny orange': '#ca6b02', + 'nice blue': '#107ab0', + 'sapphire': '#2138ab', + 'greyish teal': '#719f91', + 'orangey yellow': '#fdb915', + 'parchment': '#fefcaf', + 'straw': '#fcf679', + 'very dark brown': '#1d0200', + 'terracota': '#cb6843', + 'ugly blue': '#31668a', + 'clear blue': '#247afd', + 'creme': '#ffffb6', + 'foam green': '#90fda9', + 'grey/green': '#86a17d', + 'light gold': '#fddc5c', + 'seafoam blue': '#78d1b6', + 'topaz': '#13bbaf', + 'violet pink': '#fb5ffc', + 'wintergreen': '#20f986', + 'yellow tan': '#ffe36e', + 'dark fuchsia': '#9d0759', + 'indigo blue': '#3a18b1', + 'light yellowish green': '#c2ff89', + 'pale magenta': '#d767ad', + 'rich purple': '#720058', + 'sunflower yellow': '#ffda03', + 'green/blue': '#01c08d', + 'leather': '#ac7434', + 'racing green': '#014600', + 'vivid purple': '#9900fa', + 'dark royal blue': '#02066f', + 'hazel': '#8e7618', + 'muted pink': '#d1768f', + 'booger green': '#96b403', + 'canary': '#fdff63', + 'cool grey': '#95a3a6', + 'dark taupe': '#7f684e', + 'darkish purple': '#751973', + 'true green': '#089404', + 'coral pink': '#ff6163', + 'dark sage': '#598556', + 'dark slate blue': '#214761', + 'flat blue': '#3c73a8', + 'mushroom': '#ba9e88', + 'rich blue': '#021bf9', + 'dirty purple': '#734a65', + 'greenblue': '#23c48b', + 'icky green': '#8fae22', + 'light khaki': '#e6f2a2', + 'warm blue': '#4b57db', + 'dark hot pink': '#d90166', + 'deep sea blue': '#015482', + 'carmine': '#9d0216', + 'dark yellow green': '#728f02', + 'pale peach': '#ffe5ad', + 'plum purple': '#4e0550', + 'golden rod': '#f9bc08', + 'neon red': '#ff073a', + 'old pink': '#c77986', + 'very pale blue': '#d6fffe', + 'blood orange': '#fe4b03', + 'grapefruit': '#fd5956', + 'sand yellow': '#fce166', + 'clay brown': '#b2713d', + 'dark blue grey': '#1f3b4d', + 'flat green': '#699d4c', + 'light green blue': '#56fca2', + 'warm pink': '#fb5581', + 'dodger blue': '#3e82fc', + 'gross green': '#a0bf16', + 'ice': '#d6fffa', + 'metallic blue': '#4f738e', + 'pale salmon': '#ffb19a', + 'sap green': '#5c8b15', + 'algae': '#54ac68', + 'bluey grey': '#89a0b0', + 'greeny grey': '#7ea07a', + 'highlighter green': '#1bfc06', + 'light light blue': '#cafffb', + 'light mint': '#b6ffbb', + 'raw umber': '#a75e09', + 'vivid blue': '#152eff', + 'deep lavender': '#8d5eb7', + 'dull teal': '#5f9e8f', + 'light greenish blue': '#63f7b4', + 'mud green': '#606602', + 'pinky': '#fc86aa', + 'red wine': '#8c0034', + 'shit green': '#758000', + 'tan brown': '#ab7e4c', + 'darkblue': '#030764', + 'rosa': '#fe86a4', + 'lipstick': '#d5174e', + 'pale mauve': '#fed0fc', + 'claret': '#680018', + 'dandelion': '#fedf08', + 'orangered': '#fe420f', + 'poop green': '#6f7c00', + 'ruby': '#ca0147', + 'dark': '#1b2431', + 'greenish turquoise': '#00fbb0', + 'pastel red': '#db5856', + 'piss yellow': '#ddd618', + 'bright cyan': '#41fdfe', + 'dark coral': '#cf524e', + 'algae green': '#21c36f', + 'darkish red': '#a90308', + 'reddy brown': '#6e1005', + 'blush pink': '#fe828c', + 'camouflage green': '#4b6113', + 'lawn green': '#4da409', + 'putty': '#beae8a', + 'vibrant blue': '#0339f8', + 'dark sand': '#a88f59', + 'purple/blue': '#5d21d0', + 'saffron': '#feb209', + 'twilight': '#4e518b', + 'warm brown': '#964e02', + 'bluegrey': '#85a3b2', + 'bubble gum pink': '#ff69af', + 'duck egg blue': '#c3fbf4', + 'greenish cyan': '#2afeb7', + 'petrol': '#005f6a', + 'royal': '#0c1793', + 'butter': '#ffff81', + 'dusty orange': '#f0833a', + 'off yellow': '#f1f33f', + 'pale olive green': '#b1d27b', + 'orangish': '#fc824a', + 'leaf': '#71aa34', + 'light blue grey': '#b7c9e2', + 'dried blood': '#4b0101', + 'lightish purple': '#a552e6', + 'rusty red': '#af2f0d', + 'lavender blue': '#8b88f8', + 'light grass green': '#9af764', + 'light mint green': '#a6fbb2', + 'sunflower': '#ffc512', + 'velvet': '#750851', + 'brick orange': '#c14a09', + 'lightish red': '#fe2f4a', + 'pure blue': '#0203e2', + 'twilight blue': '#0a437a', + 'violet red': '#a50055', + 'yellowy brown': '#ae8b0c', + 'carnation': '#fd798f', + 'muddy yellow': '#bfac05', + 'dark seafoam green': '#3eaf76', + 'deep rose': '#c74767', + 'dusty red': '#b9484e', + 'grey/blue': '#647d8e', + 'lemon lime': '#bffe28', + 'purple/pink': '#d725de', + 'brown yellow': '#b29705', + 'purple brown': '#673a3f', + 'wisteria': '#a87dc2', + 'banana yellow': '#fafe4b', + 'lipstick red': '#c0022f', + 'water blue': '#0e87cc', + 'brown grey': '#8d8468', + 'vibrant purple': '#ad03de', + 'baby green': '#8cff9e', + 'barf green': '#94ac02', + 'eggshell blue': '#c4fff7', + 'sandy yellow': '#fdee73', + 'cool green': '#33b864', + 'pale': '#fff9d0', + 'blue/grey': '#758da3', + 'hot magenta': '#f504c9', + 'greyblue': '#77a1b5', + 'purpley': '#8756e4', + 'baby shit green': '#889717', + 'brownish pink': '#c27e79', + 'dark aquamarine': '#017371', + 'diarrhea': '#9f8303', + 'light mustard': '#f7d560', + 'pale sky blue': '#bdf6fe', + 'turtle green': '#75b84f', + 'bright olive': '#9cbb04', + 'dark grey blue': '#29465b', + 'greeny brown': '#696006', + 'lemon green': '#adf802', + 'light periwinkle': '#c1c6fc', + 'seaweed green': '#35ad6b', + 'sunshine yellow': '#fffd37', + 'ugly purple': '#a442a0', + 'medium pink': '#f36196', + 'puke brown': '#947706', + 'very light pink': '#fff4f2', + 'viridian': '#1e9167', + 'bile': '#b5c306', + 'faded yellow': '#feff7f', + 'very pale green': '#cffdbc', + 'vibrant green': '#0add08', + 'bright lime': '#87fd05', + 'spearmint': '#1ef876', + 'light aquamarine': '#7bfdc7', + 'light sage': '#bcecac', + 'yellowgreen': '#bbf90f', + 'baby poo': '#ab9004', + 'dark seafoam': '#1fb57a', + 'deep teal': '#00555a', + 'heather': '#a484ac', + 'rust orange': '#c45508', + 'dirty blue': '#3f829d', + 'fern green': '#548d44', + 'bright lilac': '#c95efb', + 'weird green': '#3ae57f', + 'peacock blue': '#016795', + 'avocado green': '#87a922', + 'faded orange': '#f0944d', + 'grape purple': '#5d1451', + 'hot green': '#25ff29', + 'lime yellow': '#d0fe1d', + 'mango': '#ffa62b', + 'shamrock': '#01b44c', + 'bubblegum': '#ff6cb5', + 'purplish brown': '#6b4247', + 'vomit yellow': '#c7c10c', + 'pale cyan': '#b7fffa', + 'key lime': '#aeff6e', + 'tomato red': '#ec2d01', + 'lightgreen': '#76ff7b', + 'merlot': '#730039', + 'night blue': '#040348', + 'purpleish pink': '#df4ec8', + 'apple': '#6ecb3c', + 'baby poop green': '#8f9805', + 'green apple': '#5edc1f', + 'heliotrope': '#d94ff5', + 'yellow/green': '#c8fd3d', + 'almost black': '#070d0d', + 'cool blue': '#4984b8', + 'leafy green': '#51b73b', + 'mustard brown': '#ac7e04', + 'dusk': '#4e5481', + 'dull brown': '#876e4b', + 'frog green': '#58bc08', + 'vivid green': '#2fef10', + 'bright light green': '#2dfe54', + 'fluro green': '#0aff02', + 'kiwi': '#9cef43', + 'seaweed': '#18d17b', + 'navy green': '#35530a', + 'ultramarine blue': '#1805db', + 'iris': '#6258c4', + 'pastel orange': '#ff964f', + 'yellowish orange': '#ffab0f', + 'perrywinkle': '#8f8ce7', + 'tealish': '#24bca8', + 'dark plum': '#3f012c', + 'pear': '#cbf85f', + 'pinkish orange': '#ff724c', + 'midnight purple': '#280137', + 'light urple': '#b36ff6', + 'dark mint': '#48c072', + 'greenish tan': '#bccb7a', + 'light burgundy': '#a8415b', + 'turquoise blue': '#06b1c4', + 'ugly pink': '#cd7584', + 'sandy': '#f1da7a', + 'electric pink': '#ff0490', + 'muted purple': '#805b87', + 'mid green': '#50a747', + 'greyish': '#a8a495', + 'neon yellow': '#cfff04', + 'banana': '#ffff7e', + 'carnation pink': '#ff7fa7', + 'tomato': '#ef4026', + 'sea': '#3c9992', + 'muddy brown': '#886806', + 'turquoise green': '#04f489', + 'buff': '#fef69e', + 'fawn': '#cfaf7b', + 'muted blue': '#3b719f', + 'pale rose': '#fdc1c5', + 'dark mint green': '#20c073', + 'amethyst': '#9b5fc0', + 'blue/green': '#0f9b8e', + 'chestnut': '#742802', + 'sick green': '#9db92c', + 'pea': '#a4bf20', + 'rusty orange': '#cd5909', + 'stone': '#ada587', + 'rose red': '#be013c', + 'pale aqua': '#b8ffeb', + 'deep orange': '#dc4d01', + 'earth': '#a2653e', + 'mossy green': '#638b27', + 'grassy green': '#419c03', + 'pale lime green': '#b1ff65', + 'light grey blue': '#9dbcd4', + 'pale grey': '#fdfdfe', + 'asparagus': '#77ab56', + 'blueberry': '#464196', + 'purple red': '#990147', + 'pale lime': '#befd73', + 'greenish teal': '#32bf84', + 'caramel': '#af6f09', + 'deep magenta': '#a0025c', + 'light peach': '#ffd8b1', + 'milk chocolate': '#7f4e1e', + 'ocher': '#bf9b0c', + 'off green': '#6ba353', + 'purply pink': '#f075e6', + 'lightblue': '#7bc8f6', + 'dusky blue': '#475f94', + 'golden': '#f5bf03', + 'light beige': '#fffeb6', + 'butter yellow': '#fffd74', + 'dusky purple': '#895b7b', + 'french blue': '#436bad', + 'ugly yellow': '#d0c101', + 'greeny yellow': '#c6f808', + 'orangish red': '#f43605', + 'shamrock green': '#02c14d', + 'orangish brown': '#b25f03', + 'tree green': '#2a7e19', + 'deep violet': '#490648', + 'gunmetal': '#536267', + 'blue/purple': '#5a06ef', + 'cherry': '#cf0234', + 'sandy brown': '#c4a661', + 'warm grey': '#978a84', + 'dark indigo': '#1f0954', + 'midnight': '#03012d', + 'bluey green': '#2bb179', + 'grey pink': '#c3909b', + 'soft purple': '#a66fb5', + 'blood': '#770001', + 'brown red': '#922b05', + 'medium grey': '#7d7f7c', + 'berry': '#990f4b', + 'poo': '#8f7303', + 'purpley pink': '#c83cb9', + 'light salmon': '#fea993', + 'snot': '#acbb0d', + 'easter purple': '#c071fe', + 'light yellow green': '#ccfd7f', + 'dark navy blue': '#00022e', + 'drab': '#828344', + 'light rose': '#ffc5cb', + 'rouge': '#ab1239', + 'purplish red': '#b0054b', + 'slime green': '#99cc04', + 'baby poop': '#937c00', + 'irish green': '#019529', + 'pink/purple': '#ef1de7', + 'dark navy': '#000435', + 'greeny blue': '#42b395', + 'light plum': '#9d5783', + 'pinkish grey': '#c8aca9', + 'dirty orange': '#c87606', + 'rust red': '#aa2704', + 'pale lilac': '#e4cbff', + 'orangey red': '#fa4224', + 'primary blue': '#0804f9', + 'kermit green': '#5cb200', + 'brownish purple': '#76424e', + 'murky green': '#6c7a0e', + 'wheat': '#fbdd7e', + 'very dark purple': '#2a0134', + 'bottle green': '#044a05', + 'watermelon': '#fd4659', + 'deep sky blue': '#0d75f8', + 'fire engine red': '#fe0002', + 'yellow ochre': '#cb9d06', + 'pumpkin orange': '#fb7d07', + 'pale olive': '#b9cc81', + 'light lilac': '#edc8ff', + 'lightish green': '#61e160', + 'carolina blue': '#8ab8fe', + 'mulberry': '#920a4e', + 'shocking pink': '#fe02a2', + 'auburn': '#9a3001', + 'bright lime green': '#65fe08', + 'celadon': '#befdb7', + 'pinkish brown': '#b17261', + 'poo brown': '#885f01', + 'bright sky blue': '#02ccfe', + 'celery': '#c1fd95', + 'dirt brown': '#836539', + 'strawberry': '#fb2943', + 'dark lime': '#84b701', + 'copper': '#b66325', + 'medium brown': '#7f5112', + 'muted green': '#5fa052', + "robin's egg": '#6dedfd', + 'bright aqua': '#0bf9ea', + 'bright lavender': '#c760ff', + 'ivory': '#ffffcb', + 'very light purple': '#f6cefc', + 'light navy': '#155084', + 'pink red': '#f5054f', + 'olive brown': '#645403', + 'poop brown': '#7a5901', + 'mustard green': '#a8b504', + 'ocean green': '#3d9973', + 'very dark blue': '#000133', + 'dusty green': '#76a973', + 'light navy blue': '#2e5a88', + 'minty green': '#0bf77d', + 'adobe': '#bd6c48', + 'barney': '#ac1db8', + 'jade green': '#2baf6a', + 'bright light blue': '#26f7fd', + 'light lime': '#aefd6c', + 'dark khaki': '#9b8f55', + 'orange yellow': '#ffad01', + 'ocre': '#c69c04', + 'maize': '#f4d054', + 'faded pink': '#de9dac', + 'british racing green': '#05480d', + 'sandstone': '#c9ae74', + 'mud brown': '#60460f', + 'light sea green': '#98f6b0', + 'robin egg blue': '#8af1fe', + 'aqua marine': '#2ee8bb', + 'dark sea green': '#11875d', + 'soft pink': '#fdb0c0', + 'orangey brown': '#b16002', + 'cherry red': '#f7022a', + 'burnt yellow': '#d5ab09', + 'brownish grey': '#86775f', + 'camel': '#c69f59', + 'purplish grey': '#7a687f', + 'marine': '#042e60', + 'greyish pink': '#c88d94', + 'pale turquoise': '#a5fbd5', + 'pastel yellow': '#fffe71', + 'bluey purple': '#6241c7', + 'canary yellow': '#fffe40', + 'faded red': '#d3494e', + 'sepia': '#985e2b', + 'coffee': '#a6814c', + 'bright magenta': '#ff08e8', + 'mocha': '#9d7651', + 'ecru': '#feffca', + 'purpleish': '#98568d', + 'cranberry': '#9e003a', + 'darkish green': '#287c37', + 'brown orange': '#b96902', + 'dusky rose': '#ba6873', + 'melon': '#ff7855', + 'sickly green': '#94b21c', + 'silver': '#c5c9c7', + 'purply blue': '#661aee', + 'purpleish blue': '#6140ef', + 'hospital green': '#9be5aa', + 'shit brown': '#7b5804', + 'mid blue': '#276ab3', + 'amber': '#feb308', + 'easter green': '#8cfd7e', + 'soft blue': '#6488ea', + 'cerulean blue': '#056eee', + 'golden brown': '#b27a01', + 'bright turquoise': '#0ffef9', + 'red pink': '#fa2a55', + 'red purple': '#820747', + 'greyish brown': '#7a6a4f', + 'vermillion': '#f4320c', + 'russet': '#a13905', + 'steel grey': '#6f828a', + 'lighter purple': '#a55af4', + 'bright violet': '#ad0afd', + 'prussian blue': '#004577', + 'slate green': '#658d6d', + 'dirty pink': '#ca7b80', + 'dark blue green': '#005249', + 'pine': '#2b5d34', + 'yellowy green': '#bff128', + 'dark gold': '#b59410', + 'bluish': '#2976bb', + 'darkish blue': '#014182', + 'dull red': '#bb3f3f', + 'pinky red': '#fc2647', + 'bronze': '#a87900', + 'pale teal': '#82cbb2', + 'military green': '#667c3e', + 'barbie pink': '#fe46a5', + 'bubblegum pink': '#fe83cc', + 'pea soup green': '#94a617', + 'dark mustard': '#a88905', + 'shit': '#7f5f00', + 'medium purple': '#9e43a2', + 'very dark green': '#062e03', + 'dirt': '#8a6e45', + 'dusky pink': '#cc7a8b', + 'red violet': '#9e0168', + 'lemon yellow': '#fdff38', + 'pistachio': '#c0fa8b', + 'dull yellow': '#eedc5b', + 'dark lime green': '#7ebd01', + 'denim blue': '#3b5b92', + 'teal blue': '#01889f', + 'lightish blue': '#3d7afd', + 'purpley blue': '#5f34e7', + 'light indigo': '#6d5acf', + 'swamp green': '#748500', + 'brown green': '#706c11', + 'dark maroon': '#3c0008', + 'hot purple': '#cb00f5', + 'dark forest green': '#002d04', + 'faded blue': '#658cbb', + 'drab green': '#749551', + 'light lime green': '#b9ff66', + 'snot green': '#9dc100', + 'yellowish': '#faee66', + 'light blue green': '#7efbb3', + 'bordeaux': '#7b002c', + 'light mauve': '#c292a1', + 'ocean': '#017b92', + 'marigold': '#fcc006', + 'muddy green': '#657432', + 'dull orange': '#d8863b', + 'steel': '#738595', + 'electric purple': '#aa23ff', + 'fluorescent green': '#08ff08', + 'yellowish brown': '#9b7a01', + 'blush': '#f29e8e', + 'soft green': '#6fc276', + 'bright orange': '#ff5b00', + 'lemon': '#fdff52', + 'purple grey': '#866f85', + 'acid green': '#8ffe09', + 'pale lavender': '#eecffe', + 'violet blue': '#510ac9', + 'light forest green': '#4f9153', + 'burnt red': '#9f2305', + 'khaki green': '#728639', + 'cerise': '#de0c62', + 'faded purple': '#916e99', + 'apricot': '#ffb16d', + 'dark olive green': '#3c4d03', + 'grey brown': '#7f7053', + 'green grey': '#77926f', + 'true blue': '#010fcc', + 'pale violet': '#ceaefa', + 'periwinkle blue': '#8f99fb', + 'light sky blue': '#c6fcff', + 'blurple': '#5539cc', + 'green brown': '#544e03', + 'bluegreen': '#017a79', + 'bright teal': '#01f9c6', + 'brownish yellow': '#c9b003', + 'pea soup': '#929901', + 'forest': '#0b5509', + 'barney purple': '#a00498', + 'ultramarine': '#2000b1', + 'purplish': '#94568c', + 'puke yellow': '#c2be0e', + 'bluish grey': '#748b97', + 'dark periwinkle': '#665fd1', + 'dark lilac': '#9c6da5', + 'reddish': '#c44240', + 'light maroon': '#a24857', + 'dusty purple': '#825f87', + 'terra cotta': '#c9643b', + 'avocado': '#90b134', + 'marine blue': '#01386a', + 'teal green': '#25a36f', + 'slate grey': '#59656d', + 'lighter green': '#75fd63', + 'electric green': '#21fc0d', + 'dusty blue': '#5a86ad', + 'golden yellow': '#fec615', + 'bright yellow': '#fffd01', + 'light lavender': '#dfc5fe', + 'umber': '#b26400', + 'poop': '#7f5e00', + 'dark peach': '#de7e5d', + 'jungle green': '#048243', + 'eggshell': '#ffffd4', + 'denim': '#3b638c', + 'yellow brown': '#b79400', + 'dull purple': '#84597e', + 'chocolate brown': '#411900', + 'wine red': '#7b0323', + 'neon blue': '#04d9ff', + 'dirty green': '#667e2c', + 'light tan': '#fbeeac', + 'ice blue': '#d7fffe', + 'cadet blue': '#4e7496', + 'dark mauve': '#874c62', + 'very light blue': '#d5ffff', + 'grey purple': '#826d8c', + 'pastel pink': '#ffbacd', + 'very light green': '#d1ffbd', + 'dark sky blue': '#448ee4', + 'evergreen': '#05472a', + 'dull pink': '#d5869d', + 'aubergine': '#3d0734', + 'mahogany': '#4a0100', + 'reddish orange': '#f8481c', + 'deep green': '#02590f', + 'vomit green': '#89a203', + 'purple pink': '#e03fd8', + 'dusty pink': '#d58a94', + 'faded green': '#7bb274', + 'camo green': '#526525', + 'pinky purple': '#c94cbe', + 'pink purple': '#db4bda', + 'brownish red': '#9e3623', + 'dark rose': '#b5485d', + 'mud': '#735c12', + 'brownish': '#9c6d57', + 'emerald green': '#028f1e', + 'pale brown': '#b1916e', + 'dull blue': '#49759c', + 'burnt umber': '#a0450e', + 'medium green': '#39ad48', + 'clay': '#b66a50', + 'light aqua': '#8cffdb', + 'light olive green': '#a4be5c', + 'brownish orange': '#cb7723', + 'dark aqua': '#05696b', + 'purplish pink': '#ce5dae', + 'dark salmon': '#c85a53', + 'greenish grey': '#96ae8d', + 'jade': '#1fa774', + 'ugly green': '#7a9703', + 'dark beige': '#ac9362', + 'emerald': '#01a049', + 'pale red': '#d9544d', + 'light magenta': '#fa5ff7', + 'sky': '#82cafc', + 'light cyan': '#acfffc', + 'yellow orange': '#fcb001', + 'reddish purple': '#910951', + 'reddish pink': '#fe2c54', + 'orchid': '#c875c4', + 'dirty yellow': '#cdc50a', + 'orange red': '#fd411e', + 'deep red': '#9a0200', + 'orange brown': '#be6400', + 'cobalt blue': '#030aa7', + 'neon pink': '#fe019a', + 'rose pink': '#f7879a', + 'greyish purple': '#887191', + 'raspberry': '#b00149', + 'aqua green': '#12e193', + 'salmon pink': '#fe7b7c', + 'tangerine': '#ff9408', + 'brownish green': '#6a6e09', + 'red brown': '#8b2e16', + 'greenish brown': '#696112', + 'pumpkin': '#e17701', + 'pine green': '#0a481e', + 'charcoal': '#343837', + 'baby pink': '#ffb7ce', + 'cornflower': '#6a79f7', + 'blue violet': '#5d06e9', + 'chocolate': '#3d1c02', + 'greyish green': '#82a67d', + 'scarlet': '#be0119', + 'green yellow': '#c9ff27', + 'dark olive': '#373e02', + 'sienna': '#a9561e', + 'pastel purple': '#caa0ff', + 'terracotta': '#ca6641', + 'aqua blue': '#02d8e9', + 'sage green': '#88b378', + 'blood red': '#980002', + 'deep pink': '#cb0162', + 'grass': '#5cac2d', + 'moss': '#769958', + 'pastel blue': '#a2bffe', + 'bluish green': '#10a674', + 'green blue': '#06b48b', + 'dark tan': '#af884a', + 'greenish blue': '#0b8b87', + 'pale orange': '#ffa756', + 'vomit': '#a2a415', + 'forrest green': '#154406', + 'dark lavender': '#856798', + 'dark violet': '#34013f', + 'purple blue': '#632de9', + 'dark cyan': '#0a888a', + 'olive drab': '#6f7632', + 'pinkish': '#d46a7e', + 'cobalt': '#1e488f', + 'neon purple': '#bc13fe', + 'light turquoise': '#7ef4cc', + 'apple green': '#76cd26', + 'dull green': '#74a662', + 'wine': '#80013f', + 'powder blue': '#b1d1fc', + 'off white': '#ffffe4', + 'electric blue': '#0652ff', + 'dark turquoise': '#045c5a', + 'blue purple': '#5729ce', + 'azure': '#069af3', + 'bright red': '#ff000d', + 'pinkish red': '#f10c45', + 'cornflower blue': '#5170d7', + 'light olive': '#acbf69', + 'grape': '#6c3461', + 'greyish blue': '#5e819d', + 'purplish blue': '#601ef9', + 'yellowish green': '#b0dd16', + 'greenish yellow': '#cdfd02', + 'medium blue': '#2c6fbb', + 'dusty rose': '#c0737a', + 'light violet': '#d6b4fc', + 'midnight blue': '#020035', + 'bluish purple': '#703be7', + 'red orange': '#fd3c06', + 'dark magenta': '#960056', + 'greenish': '#40a368', + 'ocean blue': '#03719c', + 'coral': '#fc5a50', + 'cream': '#ffffc2', + 'reddish brown': '#7f2b0a', + 'burnt sienna': '#b04e0f', + 'brick': '#a03623', + 'sage': '#87ae73', + 'grey green': '#789b73', + 'white': '#ffffff', + "robin's egg blue": '#98eff9', + 'moss green': '#658b38', + 'steel blue': '#5a7d9a', + 'eggplant': '#380835', + 'light yellow': '#fffe7a', + 'leaf green': '#5ca904', + 'light grey': '#d8dcd6', + 'puke': '#a5a502', + 'pinkish purple': '#d648d7', + 'sea blue': '#047495', + 'pale purple': '#b790d4', + 'slate blue': '#5b7c99', + 'blue grey': '#607c8e', + 'hunter green': '#0b4008', + 'fuchsia': '#ed0dd9', + 'crimson': '#8c000f', + 'pale yellow': '#ffff84', + 'ochre': '#bf9005', + 'mustard yellow': '#d2bd0a', + 'light red': '#ff474c', + 'cerulean': '#0485d1', + 'pale pink': '#ffcfdc', + 'deep blue': '#040273', + 'rust': '#a83c09', + 'light teal': '#90e4c1', + 'slate': '#516572', + 'goldenrod': '#fac205', + 'dark yellow': '#d5b60a', + 'dark grey': '#363737', + 'army green': '#4b5d16', + 'grey blue': '#6b8ba4', + 'seafoam': '#80f9ad', + 'puce': '#a57e52', + 'spring green': '#a9f971', + 'dark orange': '#c65102', + 'sand': '#e2ca76', + 'pastel green': '#b0ff9d', + 'mint': '#9ffeb0', + 'light orange': '#fdaa48', + 'bright pink': '#fe01b1', + 'chartreuse': '#c1f80a', + 'deep purple': '#36013f', + 'dark brown': '#341c02', + 'taupe': '#b9a281', + 'pea green': '#8eab12', + 'puke green': '#9aae07', + 'kelly green': '#02ab2e', + 'seafoam green': '#7af9ab', + 'blue green': '#137e6d', + 'khaki': '#aaa662', + 'burgundy': '#610023', + 'dark teal': '#014d4e', + 'brick red': '#8f1402', + 'royal purple': '#4b006e', + 'plum': '#580f41', + 'mint green': '#8fff9f', + 'gold': '#dbb40c', + 'baby blue': '#a2cffe', + 'yellow green': '#c0fb2d', + 'bright purple': '#be03fd', + 'dark red': '#840000', + 'pale blue': '#d0fefe', + 'grass green': '#3f9b0b', + 'navy': '#01153e', + 'aquamarine': '#04d8b2', + 'burnt orange': '#c04e01', + 'neon green': '#0cff0c', + 'bright blue': '#0165fc', + 'rose': '#cf6275', + 'light pink': '#ffd1df', + 'mustard': '#ceb301', + 'indigo': '#380282', + 'lime': '#aaff32', + 'sea green': '#53fca1', + 'periwinkle': '#8e82fe', + 'dark pink': '#cb416b', + 'olive green': '#677a04', + 'peach': '#ffb07c', + 'pale green': '#c7fdb5', + 'light brown': '#ad8150', + 'hot pink': '#ff028d', + 'black': '#000000', + 'lilac': '#cea2fd', + 'navy blue': '#001146', + 'royal blue': '#0504aa', + 'beige': '#e6daa6', + 'salmon': '#ff796c', + 'olive': '#6e750e', + 'maroon': '#650021', + 'bright green': '#01ff07', + 'dark purple': '#35063e', + 'mauve': '#ae7181', + 'forest green': '#06470c', + 'aqua': '#13eac9', + 'cyan': '#00ffff', + 'tan': '#d1b26f', + 'dark blue': '#00035b', + 'lavender': '#c79fef', + 'turquoise': '#06c2ac', + 'dark green': '#033500', + 'violet': '#9a0eea', + 'light purple': '#bf77f6', + 'lime green': '#89fe05', + 'grey': '#929591', + 'sky blue': '#75bbfd', + 'yellow': '#ffff14', + 'magenta': '#c20078', + 'light green': '#96f97b', + 'orange': '#f97306', + 'teal': '#029386', + 'light blue': '#95d0fc', + 'red': '#e50000', + 'brown': '#653700', + 'pink': '#ff81c0', + 'blue': '#0343df', + 'green': '#15b01a', + 'purple': '#7e1e9c'} + +# Normalize name to "xkcd:" to avoid name collisions. +XKCD_COLORS = {'xkcd:' + name: value for name, value in XKCD_COLORS.items()} + + +# https://drafts.csswg.org/css-color-4/#named-colors +CSS4_COLORS = { + 'aliceblue': '#F0F8FF', + 'antiquewhite': '#FAEBD7', + 'aqua': '#00FFFF', + 'aquamarine': '#7FFFD4', + 'azure': '#F0FFFF', + 'beige': '#F5F5DC', + 'bisque': '#FFE4C4', + 'black': '#000000', + 'blanchedalmond': '#FFEBCD', + 'blue': '#0000FF', + 'blueviolet': '#8A2BE2', + 'brown': '#A52A2A', + 'burlywood': '#DEB887', + 'cadetblue': '#5F9EA0', + 'chartreuse': '#7FFF00', + 'chocolate': '#D2691E', + 'coral': '#FF7F50', + 'cornflowerblue': '#6495ED', + 'cornsilk': '#FFF8DC', + 'crimson': '#DC143C', + 'cyan': '#00FFFF', + 'darkblue': '#00008B', + 'darkcyan': '#008B8B', + 'darkgoldenrod': '#B8860B', + 'darkgray': '#A9A9A9', + 'darkgreen': '#006400', + 'darkgrey': '#A9A9A9', + 'darkkhaki': '#BDB76B', + 'darkmagenta': '#8B008B', + 'darkolivegreen': '#556B2F', + 'darkorange': '#FF8C00', + 'darkorchid': '#9932CC', + 'darkred': '#8B0000', + 'darksalmon': '#E9967A', + 'darkseagreen': '#8FBC8F', + 'darkslateblue': '#483D8B', + 'darkslategray': '#2F4F4F', + 'darkslategrey': '#2F4F4F', + 'darkturquoise': '#00CED1', + 'darkviolet': '#9400D3', + 'deeppink': '#FF1493', + 'deepskyblue': '#00BFFF', + 'dimgray': '#696969', + 'dimgrey': '#696969', + 'dodgerblue': '#1E90FF', + 'firebrick': '#B22222', + 'floralwhite': '#FFFAF0', + 'forestgreen': '#228B22', + 'fuchsia': '#FF00FF', + 'gainsboro': '#DCDCDC', + 'ghostwhite': '#F8F8FF', + 'gold': '#FFD700', + 'goldenrod': '#DAA520', + 'gray': '#808080', + 'green': '#008000', + 'greenyellow': '#ADFF2F', + 'grey': '#808080', + 'honeydew': '#F0FFF0', + 'hotpink': '#FF69B4', + 'indianred': '#CD5C5C', + 'indigo': '#4B0082', + 'ivory': '#FFFFF0', + 'khaki': '#F0E68C', + 'lavender': '#E6E6FA', + 'lavenderblush': '#FFF0F5', + 'lawngreen': '#7CFC00', + 'lemonchiffon': '#FFFACD', + 'lightblue': '#ADD8E6', + 'lightcoral': '#F08080', + 'lightcyan': '#E0FFFF', + 'lightgoldenrodyellow': '#FAFAD2', + 'lightgray': '#D3D3D3', + 'lightgreen': '#90EE90', + 'lightgrey': '#D3D3D3', + 'lightpink': '#FFB6C1', + 'lightsalmon': '#FFA07A', + 'lightseagreen': '#20B2AA', + 'lightskyblue': '#87CEFA', + 'lightslategray': '#778899', + 'lightslategrey': '#778899', + 'lightsteelblue': '#B0C4DE', + 'lightyellow': '#FFFFE0', + 'lime': '#00FF00', + 'limegreen': '#32CD32', + 'linen': '#FAF0E6', + 'magenta': '#FF00FF', + 'maroon': '#800000', + 'mediumaquamarine': '#66CDAA', + 'mediumblue': '#0000CD', + 'mediumorchid': '#BA55D3', + 'mediumpurple': '#9370DB', + 'mediumseagreen': '#3CB371', + 'mediumslateblue': '#7B68EE', + 'mediumspringgreen': '#00FA9A', + 'mediumturquoise': '#48D1CC', + 'mediumvioletred': '#C71585', + 'midnightblue': '#191970', + 'mintcream': '#F5FFFA', + 'mistyrose': '#FFE4E1', + 'moccasin': '#FFE4B5', + 'navajowhite': '#FFDEAD', + 'navy': '#000080', + 'oldlace': '#FDF5E6', + 'olive': '#808000', + 'olivedrab': '#6B8E23', + 'orange': '#FFA500', + 'orangered': '#FF4500', + 'orchid': '#DA70D6', + 'palegoldenrod': '#EEE8AA', + 'palegreen': '#98FB98', + 'paleturquoise': '#AFEEEE', + 'palevioletred': '#DB7093', + 'papayawhip': '#FFEFD5', + 'peachpuff': '#FFDAB9', + 'peru': '#CD853F', + 'pink': '#FFC0CB', + 'plum': '#DDA0DD', + 'powderblue': '#B0E0E6', + 'purple': '#800080', + 'rebeccapurple': '#663399', + 'red': '#FF0000', + 'rosybrown': '#BC8F8F', + 'royalblue': '#4169E1', + 'saddlebrown': '#8B4513', + 'salmon': '#FA8072', + 'sandybrown': '#F4A460', + 'seagreen': '#2E8B57', + 'seashell': '#FFF5EE', + 'sienna': '#A0522D', + 'silver': '#C0C0C0', + 'skyblue': '#87CEEB', + 'slateblue': '#6A5ACD', + 'slategray': '#708090', + 'slategrey': '#708090', + 'snow': '#FFFAFA', + 'springgreen': '#00FF7F', + 'steelblue': '#4682B4', + 'tan': '#D2B48C', + 'teal': '#008080', + 'thistle': '#D8BFD8', + 'tomato': '#FF6347', + 'turquoise': '#40E0D0', + 'violet': '#EE82EE', + 'wheat': '#F5DEB3', + 'white': '#FFFFFF', + 'whitesmoke': '#F5F5F5', + 'yellow': '#FFFF00', + 'yellowgreen': '#9ACD32'} diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_constrained_layout.py b/.venv/lib/python3.9/site-packages/matplotlib/_constrained_layout.py new file mode 100644 index 00000000..d360eb66 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_constrained_layout.py @@ -0,0 +1,713 @@ +""" +Adjust subplot layouts so that there are no overlapping axes or axes +decorations. All axes decorations are dealt with (labels, ticks, titles, +ticklabels) and some dependent artists are also dealt with (colorbar, +suptitle). + +Layout is done via `~matplotlib.gridspec`, with one constraint per gridspec, +so it is possible to have overlapping axes if the gridspecs overlap (i.e. +using `~matplotlib.gridspec.GridSpecFromSubplotSpec`). Axes placed using +``figure.subplots()`` or ``figure.add_subplots()`` will participate in the +layout. Axes manually placed via ``figure.add_axes()`` will not. + +See Tutorial: :doc:`/tutorials/intermediate/constrainedlayout_guide` +""" + +import logging + +import numpy as np + +from matplotlib import _api +import matplotlib.transforms as mtransforms +import matplotlib._layoutgrid as mlayoutgrid + + +_log = logging.getLogger(__name__) + +""" +General idea: +------------- + +First, a figure has a gridspec that divides the figure into nrows and ncols, +with heights and widths set by ``height_ratios`` and ``width_ratios``, +often just set to 1 for an equal grid. + +Subplotspecs that are derived from this gridspec can contain either a +``SubPanel``, a ``GridSpecFromSubplotSpec``, or an axes. The ``SubPanel`` and +``GridSpecFromSubplotSpec`` are dealt with recursively and each contain an +analogous layout. + +Each ``GridSpec`` has a ``_layoutgrid`` attached to it. The ``_layoutgrid`` +has the same logical layout as the ``GridSpec``. Each row of the grid spec +has a top and bottom "margin" and each column has a left and right "margin". +The "inner" height of each row is constrained to be the same (or as modified +by ``height_ratio``), and the "inner" width of each column is +constrained to be the same (as modified by ``width_ratio``), where "inner" +is the width or height of each column/row minus the size of the margins. + +Then the size of the margins for each row and column are determined as the +max width of the decorators on each axes that has decorators in that margin. +For instance, a normal axes would have a left margin that includes the +left ticklabels, and the ylabel if it exists. The right margin may include a +colorbar, the bottom margin the xaxis decorations, and the top margin the +title. + +With these constraints, the solver then finds appropriate bounds for the +columns and rows. It's possible that the margins take up the whole figure, +in which case the algorithm is not applied and a warning is raised. + +See the tutorial doc:`/tutorials/intermediate/constrainedlayout_guide` +for more discussion of the algorithm with examples. +""" + + +###################################################### +def do_constrained_layout(fig, renderer, h_pad, w_pad, + hspace=None, wspace=None): + """ + Do the constrained_layout. Called at draw time in + ``figure.constrained_layout()`` + + Parameters + ---------- + fig : Figure + ``Figure`` instance to do the layout in. + + renderer : Renderer + Renderer to use. + + h_pad, w_pad : float + Padding around the axes elements in figure-normalized units. + + hspace, wspace : float + Fraction of the figure to dedicate to space between the + axes. These are evenly spread between the gaps between the axes. + A value of 0.2 for a three-column layout would have a space + of 0.1 of the figure width between each column. + If h/wspace < h/w_pad, then the pads are used instead. + + Returns + ------- + layoutgrid : private debugging structure + """ + + # make layoutgrid tree... + layoutgrids = make_layoutgrids(fig, None) + if not layoutgrids['hasgrids']: + _api.warn_external('There are no gridspecs with layoutgrids. ' + 'Possibly did not call parent GridSpec with the' + ' "figure" keyword') + return + + for _ in range(2): + # do the algorithm twice. This has to be done because decorations + # change size after the first re-position (i.e. x/yticklabels get + # larger/smaller). This second reposition tends to be much milder, + # so doing twice makes things work OK. + + # make margins for all the axes and subfigures in the + # figure. Add margins for colorbars... + make_layout_margins(layoutgrids, fig, renderer, h_pad=h_pad, + w_pad=w_pad, hspace=hspace, wspace=wspace) + make_margin_suptitles(layoutgrids, fig, renderer, h_pad=h_pad, + w_pad=w_pad) + + # if a layout is such that a columns (or rows) margin has no + # constraints, we need to make all such instances in the grid + # match in margin size. + match_submerged_margins(layoutgrids, fig) + + # update all the variables in the layout. + layoutgrids[fig].update_variables() + + if check_no_collapsed_axes(layoutgrids, fig): + reposition_axes(layoutgrids, fig, renderer, h_pad=h_pad, + w_pad=w_pad, hspace=hspace, wspace=wspace) + else: + _api.warn_external('constrained_layout not applied because ' + 'axes sizes collapsed to zero. Try making ' + 'figure larger or axes decorations smaller.') + reset_margins(layoutgrids, fig) + return layoutgrids + + +def make_layoutgrids(fig, layoutgrids): + """ + Make the layoutgrid tree. + + (Sub)Figures get a layoutgrid so we can have figure margins. + + Gridspecs that are attached to axes get a layoutgrid so axes + can have margins. + """ + + if layoutgrids is None: + layoutgrids = dict() + layoutgrids['hasgrids'] = False + if not hasattr(fig, '_parent'): + # top figure + layoutgrids[fig] = mlayoutgrid.LayoutGrid(parent=None, name='figlb') + else: + # subfigure + gs = fig._subplotspec.get_gridspec() + # it is possible the gridspec containing this subfigure hasn't + # been added to the tree yet: + layoutgrids = make_layoutgrids_gs(layoutgrids, gs) + # add the layoutgrid for the subfigure: + parentlb = layoutgrids[gs] + layoutgrids[fig] = mlayoutgrid.LayoutGrid( + parent=parentlb, + name='panellb', + parent_inner=True, + nrows=1, ncols=1, + parent_pos=(fig._subplotspec.rowspan, + fig._subplotspec.colspan)) + # recursively do all subfigures in this figure... + for sfig in fig.subfigs: + layoutgrids = make_layoutgrids(sfig, layoutgrids) + + # for each axes at the local level add its gridspec: + for ax in fig._localaxes.as_list(): + if hasattr(ax, 'get_subplotspec'): + gs = ax.get_subplotspec().get_gridspec() + layoutgrids = make_layoutgrids_gs(layoutgrids, gs) + + return layoutgrids + + +def make_layoutgrids_gs(layoutgrids, gs): + """ + Make the layoutgrid for a gridspec (and anything nested in the gridspec) + """ + + if gs in layoutgrids or gs.figure is None: + return layoutgrids + # in order to do constrained_layout there has to be at least *one* + # gridspec in the tree: + layoutgrids['hasgrids'] = True + if not hasattr(gs, '_subplot_spec'): + # normal gridspec + parent = layoutgrids[gs.figure] + layoutgrids[gs] = mlayoutgrid.LayoutGrid( + parent=parent, + parent_inner=True, + name='gridspec', + ncols=gs._ncols, nrows=gs._nrows, + width_ratios=gs.get_width_ratios(), + height_ratios=gs.get_height_ratios()) + else: + # this is a gridspecfromsubplotspec: + subplot_spec = gs._subplot_spec + parentgs = subplot_spec.get_gridspec() + # if a nested gridspec it is possible the parent is not in there yet: + if parentgs not in layoutgrids: + layoutgrids = make_layoutgrids_gs(layoutgrids, parentgs) + subspeclb = layoutgrids[parentgs] + # gridspecfromsubplotspec need an outer container: + if f'{gs}top' not in layoutgrids: + layoutgrids[f'{gs}top'] = mlayoutgrid.LayoutGrid( + parent=subspeclb, + name='top', + nrows=1, ncols=1, + parent_pos=(subplot_spec.rowspan, subplot_spec.colspan)) + layoutgrids[gs] = mlayoutgrid.LayoutGrid( + parent=layoutgrids[f'{gs}top'], + name='gridspec', + nrows=gs._nrows, ncols=gs._ncols, + width_ratios=gs.get_width_ratios(), + height_ratios=gs.get_height_ratios()) + return layoutgrids + + +def check_no_collapsed_axes(layoutgrids, fig): + """ + Check that no axes have collapsed to zero size. + """ + for sfig in fig.subfigs: + ok = check_no_collapsed_axes(layoutgrids, sfig) + if not ok: + return False + + for ax in fig.axes: + if hasattr(ax, 'get_subplotspec'): + gs = ax.get_subplotspec().get_gridspec() + if gs in layoutgrids: + lg = layoutgrids[gs] + for i in range(gs.nrows): + for j in range(gs.ncols): + bb = lg.get_inner_bbox(i, j) + if bb.width <= 0 or bb.height <= 0: + return False + return True + + +def get_margin_from_padding(obj, *, w_pad=0, h_pad=0, + hspace=0, wspace=0): + + ss = obj._subplotspec + gs = ss.get_gridspec() + + if hasattr(gs, 'hspace'): + _hspace = (gs.hspace if gs.hspace is not None else hspace) + _wspace = (gs.wspace if gs.wspace is not None else wspace) + else: + _hspace = (gs._hspace if gs._hspace is not None else hspace) + _wspace = (gs._wspace if gs._wspace is not None else wspace) + + _wspace = _wspace / 2 + _hspace = _hspace / 2 + + nrows, ncols = gs.get_geometry() + # there are two margins for each direction. The "cb" + # margins are for pads and colorbars, the non-"cb" are + # for the axes decorations (labels etc). + margin = {'leftcb': w_pad, 'rightcb': w_pad, + 'bottomcb': h_pad, 'topcb': h_pad, + 'left': 0, 'right': 0, + 'top': 0, 'bottom': 0} + if _wspace / ncols > w_pad: + if ss.colspan.start > 0: + margin['leftcb'] = _wspace / ncols + if ss.colspan.stop < ncols: + margin['rightcb'] = _wspace / ncols + if _hspace / nrows > h_pad: + if ss.rowspan.stop < nrows: + margin['bottomcb'] = _hspace / nrows + if ss.rowspan.start > 0: + margin['topcb'] = _hspace / nrows + + return margin + + +def make_layout_margins(layoutgrids, fig, renderer, *, w_pad=0, h_pad=0, + hspace=0, wspace=0): + """ + For each axes, make a margin between the *pos* layoutbox and the + *axes* layoutbox be a minimum size that can accommodate the + decorations on the axis. + + Then make room for colorbars. + """ + for sfig in fig.subfigs: # recursively make child panel margins + ss = sfig._subplotspec + make_layout_margins(layoutgrids, sfig, renderer, + w_pad=w_pad, h_pad=h_pad, + hspace=hspace, wspace=wspace) + + margins = get_margin_from_padding(sfig, w_pad=0, h_pad=0, + hspace=hspace, wspace=wspace) + layoutgrids[sfig].parent.edit_outer_margin_mins(margins, ss) + + for ax in fig._localaxes.as_list(): + if not hasattr(ax, 'get_subplotspec') or not ax.get_in_layout(): + continue + + ss = ax.get_subplotspec() + gs = ss.get_gridspec() + + if gs not in layoutgrids: + return + + margin = get_margin_from_padding(ax, w_pad=w_pad, h_pad=h_pad, + hspace=hspace, wspace=wspace) + pos, bbox = get_pos_and_bbox(ax, renderer) + # the margin is the distance between the bounding box of the axes + # and its position (plus the padding from above) + margin['left'] += pos.x0 - bbox.x0 + margin['right'] += bbox.x1 - pos.x1 + # remember that rows are ordered from top: + margin['bottom'] += pos.y0 - bbox.y0 + margin['top'] += bbox.y1 - pos.y1 + + # make margin for colorbars. These margins go in the + # padding margin, versus the margin for axes decorators. + for cbax in ax._colorbars: + # note pad is a fraction of the parent width... + pad = colorbar_get_pad(layoutgrids, cbax) + # colorbars can be child of more than one subplot spec: + cbp_rspan, cbp_cspan = get_cb_parent_spans(cbax) + loc = cbax._colorbar_info['location'] + cbpos, cbbbox = get_pos_and_bbox(cbax, renderer) + if loc == 'right': + if cbp_cspan.stop == ss.colspan.stop: + # only increase if the colorbar is on the right edge + margin['rightcb'] += cbbbox.width + pad + elif loc == 'left': + if cbp_cspan.start == ss.colspan.start: + # only increase if the colorbar is on the left edge + margin['leftcb'] += cbbbox.width + pad + elif loc == 'top': + if cbp_rspan.start == ss.rowspan.start: + margin['topcb'] += cbbbox.height + pad + else: + if cbp_rspan.stop == ss.rowspan.stop: + margin['bottomcb'] += cbbbox.height + pad + # If the colorbars are wider than the parent box in the + # cross direction + if loc in ['top', 'bottom']: + if (cbp_cspan.start == ss.colspan.start and + cbbbox.x0 < bbox.x0): + margin['left'] += bbox.x0 - cbbbox.x0 + if (cbp_cspan.stop == ss.colspan.stop and + cbbbox.x1 > bbox.x1): + margin['right'] += cbbbox.x1 - bbox.x1 + # or taller: + if loc in ['left', 'right']: + if (cbp_rspan.stop == ss.rowspan.stop and + cbbbox.y0 < bbox.y0): + margin['bottom'] += bbox.y0 - cbbbox.y0 + if (cbp_rspan.start == ss.rowspan.start and + cbbbox.y1 > bbox.y1): + margin['top'] += cbbbox.y1 - bbox.y1 + # pass the new margins down to the layout grid for the solution... + layoutgrids[gs].edit_outer_margin_mins(margin, ss) + + +def make_margin_suptitles(layoutgrids, fig, renderer, *, w_pad=0, h_pad=0): + # Figure out how large the suptitle is and make the + # top level figure margin larger. + + inv_trans_fig = fig.transFigure.inverted().transform_bbox + # get the h_pad and w_pad as distances in the local subfigure coordinates: + padbox = mtransforms.Bbox([[0, 0], [w_pad, h_pad]]) + padbox = (fig.transFigure - + fig.transSubfigure).transform_bbox(padbox) + h_pad_local = padbox.height + w_pad_local = padbox.width + + for sfig in fig.subfigs: + make_margin_suptitles(layoutgrids, sfig, renderer, + w_pad=w_pad, h_pad=h_pad) + + if fig._suptitle is not None and fig._suptitle.get_in_layout(): + p = fig._suptitle.get_position() + if getattr(fig._suptitle, '_autopos', False): + fig._suptitle.set_position((p[0], 1 - h_pad_local)) + bbox = inv_trans_fig(fig._suptitle.get_tightbbox(renderer)) + layoutgrids[fig].edit_margin_min('top', bbox.height + 2 * h_pad) + + if fig._supxlabel is not None and fig._supxlabel.get_in_layout(): + p = fig._supxlabel.get_position() + if getattr(fig._supxlabel, '_autopos', False): + fig._supxlabel.set_position((p[0], h_pad_local)) + bbox = inv_trans_fig(fig._supxlabel.get_tightbbox(renderer)) + layoutgrids[fig].edit_margin_min('bottom', + bbox.height + 2 * h_pad) + + if fig._supylabel is not None and fig._supylabel.get_in_layout(): + p = fig._supylabel.get_position() + if getattr(fig._supylabel, '_autopos', False): + fig._supylabel.set_position((w_pad_local, p[1])) + bbox = inv_trans_fig(fig._supylabel.get_tightbbox(renderer)) + layoutgrids[fig].edit_margin_min('left', bbox.width + 2 * w_pad) + + +def match_submerged_margins(layoutgrids, fig): + """ + Make the margins that are submerged inside an Axes the same size. + + This allows axes that span two columns (or rows) that are offset + from one another to have the same size. + + This gives the proper layout for something like:: + fig = plt.figure(constrained_layout=True) + axs = fig.subplot_mosaic("AAAB\nCCDD") + + Without this routine, the axes D will be wider than C, because the + margin width between the two columns in C has no width by default, + whereas the margins between the two columns of D are set by the + width of the margin between A and B. However, obviously the user would + like C and D to be the same size, so we need to add constraints to these + "submerged" margins. + + This routine makes all the interior margins the same, and the spacing + between the three columns in A and the two column in C are all set to the + margins between the two columns of D. + + See test_constrained_layout::test_constrained_layout12 for an example. + """ + + for sfig in fig.subfigs: + match_submerged_margins(layoutgrids, sfig) + + axs = [a for a in fig.get_axes() if (hasattr(a, 'get_subplotspec') + and a.get_in_layout())] + + for ax1 in axs: + ss1 = ax1.get_subplotspec() + if ss1.get_gridspec() not in layoutgrids: + axs.remove(ax1) + continue + lg1 = layoutgrids[ss1.get_gridspec()] + + # interior columns: + if len(ss1.colspan) > 1: + maxsubl = np.max( + lg1.margin_vals['left'][ss1.colspan[1:]] + + lg1.margin_vals['leftcb'][ss1.colspan[1:]] + ) + maxsubr = np.max( + lg1.margin_vals['right'][ss1.colspan[:-1]] + + lg1.margin_vals['rightcb'][ss1.colspan[:-1]] + ) + for ax2 in axs: + ss2 = ax2.get_subplotspec() + lg2 = layoutgrids[ss2.get_gridspec()] + if lg2 is not None and len(ss2.colspan) > 1: + maxsubl2 = np.max( + lg2.margin_vals['left'][ss2.colspan[1:]] + + lg2.margin_vals['leftcb'][ss2.colspan[1:]]) + if maxsubl2 > maxsubl: + maxsubl = maxsubl2 + maxsubr2 = np.max( + lg2.margin_vals['right'][ss2.colspan[:-1]] + + lg2.margin_vals['rightcb'][ss2.colspan[:-1]]) + if maxsubr2 > maxsubr: + maxsubr = maxsubr2 + for i in ss1.colspan[1:]: + lg1.edit_margin_min('left', maxsubl, cell=i) + for i in ss1.colspan[:-1]: + lg1.edit_margin_min('right', maxsubr, cell=i) + + # interior rows: + if len(ss1.rowspan) > 1: + maxsubt = np.max( + lg1.margin_vals['top'][ss1.rowspan[1:]] + + lg1.margin_vals['topcb'][ss1.rowspan[1:]] + ) + maxsubb = np.max( + lg1.margin_vals['bottom'][ss1.rowspan[:-1]] + + lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]] + ) + + for ax2 in axs: + ss2 = ax2.get_subplotspec() + lg2 = layoutgrids[ss2.get_gridspec()] + if lg2 is not None: + if len(ss2.rowspan) > 1: + maxsubt = np.max([np.max( + lg2.margin_vals['top'][ss2.rowspan[1:]] + + lg2.margin_vals['topcb'][ss2.rowspan[1:]] + ), maxsubt]) + maxsubb = np.max([np.max( + lg2.margin_vals['bottom'][ss2.rowspan[:-1]] + + lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]] + ), maxsubb]) + for i in ss1.rowspan[1:]: + lg1.edit_margin_min('top', maxsubt, cell=i) + for i in ss1.rowspan[:-1]: + lg1.edit_margin_min('bottom', maxsubb, cell=i) + + +def get_cb_parent_spans(cbax): + """ + Figure out which subplotspecs this colorbar belongs to: + """ + rowstart = np.inf + rowstop = -np.inf + colstart = np.inf + colstop = -np.inf + for parent in cbax._colorbar_info['parents']: + ss = parent.get_subplotspec() + rowstart = min(ss.rowspan.start, rowstart) + rowstop = max(ss.rowspan.stop, rowstop) + colstart = min(ss.colspan.start, colstart) + colstop = max(ss.colspan.stop, colstop) + + rowspan = range(rowstart, rowstop) + colspan = range(colstart, colstop) + return rowspan, colspan + + +def get_pos_and_bbox(ax, renderer): + """ + Get the position and the bbox for the axes. + + Parameters + ---------- + ax + renderer + + Returns + ------- + pos : Bbox + Position in figure coordinates. + bbox : Bbox + Tight bounding box in figure coordinates. + + """ + fig = ax.figure + pos = ax.get_position(original=True) + # pos is in panel co-ords, but we need in figure for the layout + pos = pos.transformed(fig.transSubfigure - fig.transFigure) + try: + tightbbox = ax.get_tightbbox(renderer=renderer, for_layout_only=True) + except TypeError: + tightbbox = ax.get_tightbbox(renderer=renderer) + + if tightbbox is None: + bbox = pos + else: + bbox = tightbbox.transformed(fig.transFigure.inverted()) + return pos, bbox + + +def reposition_axes(layoutgrids, fig, renderer, *, + w_pad=0, h_pad=0, hspace=0, wspace=0): + """ + Reposition all the axes based on the new inner bounding box. + """ + trans_fig_to_subfig = fig.transFigure - fig.transSubfigure + for sfig in fig.subfigs: + bbox = layoutgrids[sfig].get_outer_bbox() + sfig._redo_transform_rel_fig( + bbox=bbox.transformed(trans_fig_to_subfig)) + reposition_axes(layoutgrids, sfig, renderer, + w_pad=w_pad, h_pad=h_pad, + wspace=wspace, hspace=hspace) + + for ax in fig._localaxes.as_list(): + if not hasattr(ax, 'get_subplotspec') or not ax.get_in_layout(): + continue + + # grid bbox is in Figure coordinates, but we specify in panel + # coordinates... + ss = ax.get_subplotspec() + gs = ss.get_gridspec() + nrows, ncols = gs.get_geometry() + if gs not in layoutgrids: + return + + bbox = layoutgrids[gs].get_inner_bbox(rows=ss.rowspan, + cols=ss.colspan) + + # transform from figure to panel for set_position: + newbbox = trans_fig_to_subfig.transform_bbox(bbox) + ax._set_position(newbbox) + + # move the colorbars: + # we need to keep track of oldw and oldh if there is more than + # one colorbar: + offset = {'left': 0, 'right': 0, 'bottom': 0, 'top': 0} + for nn, cbax in enumerate(ax._colorbars[::-1]): + if ax == cbax._colorbar_info['parents'][0]: + reposition_colorbar(layoutgrids, cbax, renderer, + offset=offset) + + +def reposition_colorbar(layoutgrids, cbax, renderer, *, offset=None): + """ + Place the colorbar in its new place. + + Parameters + ---------- + cbax : Axes + Axes for the colorbar + + renderer : + w_pad, h_pad : float + width and height padding (in fraction of figure) + hspace, wspace : float + width and height padding as fraction of figure size divided by + number of columns or rows + margin : array-like + offset the colorbar needs to be pushed to in order to + account for multiple colorbars + """ + + parents = cbax._colorbar_info['parents'] + gs = parents[0].get_gridspec() + fig = cbax.figure + trans_fig_to_subfig = fig.transFigure - fig.transSubfigure + + cb_rspans, cb_cspans = get_cb_parent_spans(cbax) + bboxparent = layoutgrids[gs].get_bbox_for_cb(rows=cb_rspans, + cols=cb_cspans) + pb = layoutgrids[gs].get_inner_bbox(rows=cb_rspans, cols=cb_cspans) + + location = cbax._colorbar_info['location'] + anchor = cbax._colorbar_info['anchor'] + fraction = cbax._colorbar_info['fraction'] + aspect = cbax._colorbar_info['aspect'] + shrink = cbax._colorbar_info['shrink'] + + cbpos, cbbbox = get_pos_and_bbox(cbax, renderer) + + # Colorbar gets put at extreme edge of outer bbox of the subplotspec + # It needs to be moved in by: 1) a pad 2) its "margin" 3) by + # any colorbars already added at this location: + cbpad = colorbar_get_pad(layoutgrids, cbax) + if location in ('left', 'right'): + # fraction and shrink are fractions of parent + pbcb = pb.shrunk(fraction, shrink).anchored(anchor, pb) + # The colorbar is at the left side of the parent. Need + # to translate to right (or left) + if location == 'right': + lmargin = cbpos.x0 - cbbbox.x0 + dx = bboxparent.x1 - pbcb.x0 + offset['right'] + dx += cbpad + lmargin + offset['right'] += cbbbox.width + cbpad + pbcb = pbcb.translated(dx, 0) + else: + lmargin = cbpos.x0 - cbbbox.x0 + dx = bboxparent.x0 - pbcb.x0 # edge of parent + dx += -cbbbox.width - cbpad + lmargin - offset['left'] + offset['left'] += cbbbox.width + cbpad + pbcb = pbcb.translated(dx, 0) + else: # horizontal axes: + pbcb = pb.shrunk(shrink, fraction).anchored(anchor, pb) + if location == 'top': + bmargin = cbpos.y0 - cbbbox.y0 + dy = bboxparent.y1 - pbcb.y0 + offset['top'] + dy += cbpad + bmargin + offset['top'] += cbbbox.height + cbpad + pbcb = pbcb.translated(0, dy) + else: + bmargin = cbpos.y0 - cbbbox.y0 + dy = bboxparent.y0 - pbcb.y0 + dy += -cbbbox.height - cbpad + bmargin - offset['bottom'] + offset['bottom'] += cbbbox.height + cbpad + pbcb = pbcb.translated(0, dy) + + pbcb = trans_fig_to_subfig.transform_bbox(pbcb) + cbax.set_transform(fig.transSubfigure) + cbax._set_position(pbcb) + cbax.set_anchor(anchor) + if location in ['bottom', 'top']: + aspect = 1 / aspect + cbax.set_box_aspect(aspect) + cbax.set_aspect('auto') + return offset + + +def reset_margins(layoutgrids, fig): + """ + Reset the margins in the layoutboxes of fig. + + Margins are usually set as a minimum, so if the figure gets smaller + the minimum needs to be zero in order for it to grow again. + """ + for sfig in fig.subfigs: + reset_margins(layoutgrids, sfig) + for ax in fig.axes: + if hasattr(ax, 'get_subplotspec') and ax.get_in_layout(): + ss = ax.get_subplotspec() + gs = ss.get_gridspec() + if gs in layoutgrids: + layoutgrids[gs].reset_margins() + layoutgrids[fig].reset_margins() + + +def colorbar_get_pad(layoutgrids, cax): + parents = cax._colorbar_info['parents'] + gs = parents[0].get_gridspec() + + cb_rspans, cb_cspans = get_cb_parent_spans(cax) + bboxouter = layoutgrids[gs].get_inner_bbox(rows=cb_rspans, cols=cb_cspans) + + if cax._colorbar_info['location'] in ['right', 'left']: + size = bboxouter.width + else: + size = bboxouter.height + + return cax._colorbar_info['pad'] * size diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_contour.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/matplotlib/_contour.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..b9b8463c Binary files /dev/null and b/.venv/lib/python3.9/site-packages/matplotlib/_contour.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_enums.py b/.venv/lib/python3.9/site-packages/matplotlib/_enums.py new file mode 100644 index 00000000..3e4393e7 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_enums.py @@ -0,0 +1,183 @@ +""" +Enums representing sets of strings that Matplotlib uses as input parameters. + +Matplotlib often uses simple data types like strings or tuples to define a +concept; e.g. the line capstyle can be specified as one of 'butt', 'round', +or 'projecting'. The classes in this module are used internally and serve to +document these concepts formally. + +As an end-user you will not use these classes directly, but only the values +they define. +""" + +from enum import Enum, auto +from matplotlib import docstring + + +class _AutoStringNameEnum(Enum): + """Automate the ``name = 'name'`` part of making a (str, Enum).""" + + def _generate_next_value_(name, start, count, last_values): + return name + + def __hash__(self): + return str(self).__hash__() + + +class JoinStyle(str, _AutoStringNameEnum): + """ + Define how the connection between two line segments is drawn. + + For a visual impression of each *JoinStyle*, `view these docs online + `, or run `JoinStyle.demo`. + + Lines in Matplotlib are typically defined by a 1D `~.path.Path` and a + finite ``linewidth``, where the underlying 1D `~.path.Path` represents the + center of the stroked line. + + By default, `~.backend_bases.GraphicsContextBase` defines the boundaries of + a stroked line to simply be every point within some radius, + ``linewidth/2``, away from any point of the center line. However, this + results in corners appearing "rounded", which may not be the desired + behavior if you are drawing, for example, a polygon or pointed star. + + **Supported values:** + + .. rst-class:: value-list + + 'miter' + the "arrow-tip" style. Each boundary of the filled-in area will + extend in a straight line parallel to the tangent vector of the + centerline at the point it meets the corner, until they meet in a + sharp point. + 'round' + stokes every point within a radius of ``linewidth/2`` of the center + lines. + 'bevel' + the "squared-off" style. It can be thought of as a rounded corner + where the "circular" part of the corner has been cut off. + + .. note:: + + Very long miter tips are cut off (to form a *bevel*) after a + backend-dependent limit called the "miter limit", which specifies the + maximum allowed ratio of miter length to line width. For example, the + PDF backend uses the default value of 10 specified by the PDF standard, + while the SVG backend does not even specify the miter limit, resulting + in a default value of 4 per the SVG specification. Matplotlib does not + currently allow the user to adjust this parameter. + + A more detailed description of the effect of a miter limit can be found + in the `Mozilla Developer Docs + `_ + + .. plot:: + :alt: Demo of possible JoinStyle's + + from matplotlib._enums import JoinStyle + JoinStyle.demo() + + """ + + miter = auto() + round = auto() + bevel = auto() + + @staticmethod + def demo(): + """Demonstrate how each JoinStyle looks for various join angles.""" + import numpy as np + import matplotlib.pyplot as plt + + def plot_angle(ax, x, y, angle, style): + phi = np.radians(angle) + xx = [x + .5, x, x + .5*np.cos(phi)] + yy = [y, y, y + .5*np.sin(phi)] + ax.plot(xx, yy, lw=12, color='tab:blue', solid_joinstyle=style) + ax.plot(xx, yy, lw=1, color='black') + ax.plot(xx[1], yy[1], 'o', color='tab:red', markersize=3) + + fig, ax = plt.subplots(figsize=(5, 4), constrained_layout=True) + ax.set_title('Join style') + for x, style in enumerate(['miter', 'round', 'bevel']): + ax.text(x, 5, style) + for y, angle in enumerate([20, 45, 60, 90, 120]): + plot_angle(ax, x, y, angle, style) + if x == 0: + ax.text(-1.3, y, f'{angle} degrees') + ax.set_xlim(-1.5, 2.75) + ax.set_ylim(-.5, 5.5) + ax.set_axis_off() + fig.show() + + +JoinStyle.input_description = "{" \ + + ", ".join([f"'{js.name}'" for js in JoinStyle]) \ + + "}" + + +class CapStyle(str, _AutoStringNameEnum): + r""" + Define how the two endpoints (caps) of an unclosed line are drawn. + + How to draw the start and end points of lines that represent a closed curve + (i.e. that end in a `~.path.Path.CLOSEPOLY`) is controlled by the line's + `JoinStyle`. For all other lines, how the start and end points are drawn is + controlled by the *CapStyle*. + + For a visual impression of each *CapStyle*, `view these docs online + ` or run `CapStyle.demo`. + + **Supported values:** + + .. rst-class:: value-list + + 'butt' + the line is squared off at its endpoint. + 'projecting' + the line is squared off as in *butt*, but the filled in area + extends beyond the endpoint a distance of ``linewidth/2``. + 'round' + like *butt*, but a semicircular cap is added to the end of the + line, of radius ``linewidth/2``. + + .. plot:: + :alt: Demo of possible CapStyle's + + from matplotlib._enums import CapStyle + CapStyle.demo() + + """ + butt = 'butt' + projecting = 'projecting' + round = 'round' + + @staticmethod + def demo(): + """Demonstrate how each CapStyle looks for a thick line segment.""" + import matplotlib.pyplot as plt + + fig = plt.figure(figsize=(4, 1.2)) + ax = fig.add_axes([0, 0, 1, 0.8]) + ax.set_title('Cap style') + + for x, style in enumerate(['butt', 'round', 'projecting']): + ax.text(x+0.25, 0.85, style, ha='center') + xx = [x, x+0.5] + yy = [0, 0] + ax.plot(xx, yy, lw=12, color='tab:blue', solid_capstyle=style) + ax.plot(xx, yy, lw=1, color='black') + ax.plot(xx, yy, 'o', color='tab:red', markersize=3) + ax.text(2.25, 0.55, '(default)', ha='center') + + ax.set_ylim(-.5, 1.5) + ax.set_axis_off() + fig.show() + + +CapStyle.input_description = "{" \ + + ", ".join([f"'{cs.name}'" for cs in CapStyle]) \ + + "}" + +docstring.interpd.update({'JoinStyle': JoinStyle.input_description, + 'CapStyle': CapStyle.input_description}) diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_image.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/matplotlib/_image.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..396d0e85 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/matplotlib/_image.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_internal_utils.py b/.venv/lib/python3.9/site-packages/matplotlib/_internal_utils.py new file mode 100644 index 00000000..0223aa59 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_internal_utils.py @@ -0,0 +1,64 @@ +""" +Internal debugging utilities, that are not expected to be used in the rest of +the codebase. + +WARNING: Code in this module may change without prior notice! +""" + +from io import StringIO +from pathlib import Path +import subprocess + +from matplotlib.transforms import TransformNode + + +def graphviz_dump_transform(transform, dest, *, highlight=None): + """ + Generate a graphical representation of the transform tree for *transform* + using the :program:`dot` program (which this function depends on). The + output format (png, dot, etc.) is determined from the suffix of *dest*. + + Parameters + ---------- + transform : `~matplotlib.transform.Transform` + The represented transform. + dest : str + Output filename. The extension must be one of the formats supported + by :program:`dot`, e.g. png, svg, dot, ... + (see https://www.graphviz.org/doc/info/output.html). + highlight : list of `~matplotlib.transform.Transform` or None + The transforms in the tree to be drawn in bold. + If *None*, *transform* is highlighted. + """ + + if highlight is None: + highlight = [transform] + seen = set() + + def recurse(root, buf): + if id(root) in seen: + return + seen.add(id(root)) + props = {} + label = type(root).__name__ + if root._invalid: + label = f'[{label}]' + if root in highlight: + props['style'] = 'bold' + props['shape'] = 'box' + props['label'] = '"%s"' % label + props = ' '.join(map('{0[0]}={0[1]}'.format, props.items())) + buf.write(f'{id(root)} [{props}];\n') + for key, val in vars(root).items(): + if isinstance(val, TransformNode) and id(root) in val._parents: + buf.write(f'"{id(root)}" -> "{id(val)}" ' + f'[label="{key}", fontsize=10];\n') + recurse(val, buf) + + buf = StringIO() + buf.write('digraph G {\n') + recurse(transform, buf) + buf.write('}\n') + subprocess.run( + ['dot', '-T', Path(dest).suffix[1:], '-o', dest], + input=buf.getvalue().encode('utf-8'), check=True) diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_layoutgrid.py b/.venv/lib/python3.9/site-packages/matplotlib/_layoutgrid.py new file mode 100644 index 00000000..8b7b140f --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_layoutgrid.py @@ -0,0 +1,563 @@ +""" +A layoutgrid is a nrows by ncols set of boxes, meant to be used by +`._constrained_layout`, each box is analogous to a subplotspec element of +a gridspec. + +Each box is defined by left[ncols], right[ncols], bottom[nrows] and top[nrows], +and by two editable margins for each side. The main margin gets its value +set by the size of ticklabels, titles, etc on each axes that is in the figure. +The outer margin is the padding around the axes, and space for any +colorbars. + +The "inner" widths and heights of these boxes are then constrained to be the +same (relative the values of `width_ratios[ncols]` and `height_ratios[nrows]`). + +The layoutgrid is then constrained to be contained within a parent layoutgrid, +its column(s) and row(s) specified when it is created. +""" + +import itertools +import kiwisolver as kiwi +import logging +import numpy as np +from matplotlib.transforms import Bbox + +_log = logging.getLogger(__name__) + + +class LayoutGrid: + """ + Analogous to a gridspec, and contained in another LayoutGrid. + """ + + def __init__(self, parent=None, parent_pos=(0, 0), + parent_inner=False, name='', ncols=1, nrows=1, + h_pad=None, w_pad=None, width_ratios=None, + height_ratios=None): + Variable = kiwi.Variable + self.parent = parent + self.parent_pos = parent_pos + self.parent_inner = parent_inner + self.name = name + seq_id() + if parent is not None: + self.name = f'{parent.name}.{self.name}' + self.nrows = nrows + self.ncols = ncols + self.height_ratios = np.atleast_1d(height_ratios) + if height_ratios is None: + self.height_ratios = np.ones(nrows) + self.width_ratios = np.atleast_1d(width_ratios) + if width_ratios is None: + self.width_ratios = np.ones(ncols) + + sn = self.name + '_' + if parent is None: + self.parent = None + self.solver = kiwi.Solver() + else: + self.parent = parent + parent.add_child(self, *parent_pos) + self.solver = self.parent.solver + # keep track of artist associated w/ this layout. Can be none + self.artists = np.empty((nrows, ncols), dtype=object) + self.children = np.empty((nrows, ncols), dtype=object) + + self.margins = {} + self.margin_vals = {} + # all the boxes in each column share the same left/right margins: + for todo in ['left', 'right', 'leftcb', 'rightcb']: + # track the value so we can change only if a margin is larger + # than the current value + self.margin_vals[todo] = np.zeros(ncols) + + sol = self.solver + + # These are redundant, but make life easier if + # we define them all. All that is really + # needed is left/right, margin['left'], and margin['right'] + self.widths = [Variable(f'{sn}widths[{i}]') for i in range(ncols)] + self.lefts = [Variable(f'{sn}lefts[{i}]') for i in range(ncols)] + self.rights = [Variable(f'{sn}rights[{i}]') for i in range(ncols)] + self.inner_widths = [Variable(f'{sn}inner_widths[{i}]') + for i in range(ncols)] + for todo in ['left', 'right', 'leftcb', 'rightcb']: + self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]') + for i in range(ncols)] + for i in range(ncols): + sol.addEditVariable(self.margins[todo][i], 'strong') + + for todo in ['bottom', 'top', 'bottomcb', 'topcb']: + self.margins[todo] = np.empty((nrows), dtype=object) + self.margin_vals[todo] = np.zeros(nrows) + + self.heights = [Variable(f'{sn}heights[{i}]') for i in range(nrows)] + self.inner_heights = [Variable(f'{sn}inner_heights[{i}]') + for i in range(nrows)] + self.bottoms = [Variable(f'{sn}bottoms[{i}]') for i in range(nrows)] + self.tops = [Variable(f'{sn}tops[{i}]') for i in range(nrows)] + for todo in ['bottom', 'top', 'bottomcb', 'topcb']: + self.margins[todo] = [Variable(f'{sn}margins[{todo}][{i}]') + for i in range(nrows)] + for i in range(nrows): + sol.addEditVariable(self.margins[todo][i], 'strong') + + # set these margins to zero by default. They will be edited as + # children are filled. + self.reset_margins() + self.add_constraints() + + self.h_pad = h_pad + self.w_pad = w_pad + + def __repr__(self): + str = f'LayoutBox: {self.name:25s} {self.nrows}x{self.ncols},\n' + for i in range(self.nrows): + for j in range(self.ncols): + str += f'{i}, {j}: '\ + f'L({self.lefts[j].value():1.3f}, ' \ + f'B{self.bottoms[i].value():1.3f}, ' \ + f'W{self.widths[j].value():1.3f}, ' \ + f'H{self.heights[i].value():1.3f}, ' \ + f'innerW{self.inner_widths[j].value():1.3f}, ' \ + f'innerH{self.inner_heights[i].value():1.3f}, ' \ + f'ML{self.margins["left"][j].value():1.3f}, ' \ + f'MR{self.margins["right"][j].value():1.3f}, \n' + return str + + def reset_margins(self): + """ + Reset all the margins to zero. Must do this after changing + figure size, for instance, because the relative size of the + axes labels etc changes. + """ + for todo in ['left', 'right', 'bottom', 'top', + 'leftcb', 'rightcb', 'bottomcb', 'topcb']: + self.edit_margins(todo, 0.0) + + def add_constraints(self): + # define self-consistent constraints + self.hard_constraints() + # define relationship with parent layoutgrid: + self.parent_constraints() + # define relative widths of the grid cells to each other + # and stack horizontally and vertically. + self.grid_constraints() + + def hard_constraints(self): + """ + These are the redundant constraints, plus ones that make the + rest of the code easier. + """ + for i in range(self.ncols): + hc = [self.rights[i] >= self.lefts[i], + (self.rights[i] - self.margins['right'][i] - + self.margins['rightcb'][i] >= + self.lefts[i] - self.margins['left'][i] - + self.margins['leftcb'][i]) + ] + for c in hc: + self.solver.addConstraint(c | 'required') + + for i in range(self.nrows): + hc = [self.tops[i] >= self.bottoms[i], + (self.tops[i] - self.margins['top'][i] - + self.margins['topcb'][i] >= + self.bottoms[i] - self.margins['bottom'][i] - + self.margins['bottomcb'][i]) + ] + for c in hc: + self.solver.addConstraint(c | 'required') + + def add_child(self, child, i=0, j=0): + # np.ix_ returns the cross product of i and j indices + self.children[np.ix_(np.atleast_1d(i), np.atleast_1d(j))] = child + + def parent_constraints(self): + # constraints that are due to the parent... + # i.e. the first column's left is equal to the + # parent's left, the last column right equal to the + # parent's right... + parent = self.parent + if parent is None: + hc = [self.lefts[0] == 0, + self.rights[-1] == 1, + # top and bottom reversed order... + self.tops[0] == 1, + self.bottoms[-1] == 0] + else: + rows, cols = self.parent_pos + rows = np.atleast_1d(rows) + cols = np.atleast_1d(cols) + + left = parent.lefts[cols[0]] + right = parent.rights[cols[-1]] + top = parent.tops[rows[0]] + bottom = parent.bottoms[rows[-1]] + if self.parent_inner: + # the layout grid is contained inside the inner + # grid of the parent. + left += parent.margins['left'][cols[0]] + left += parent.margins['leftcb'][cols[0]] + right -= parent.margins['right'][cols[-1]] + right -= parent.margins['rightcb'][cols[-1]] + top -= parent.margins['top'][rows[0]] + top -= parent.margins['topcb'][rows[0]] + bottom += parent.margins['bottom'][rows[-1]] + bottom += parent.margins['bottomcb'][rows[-1]] + hc = [self.lefts[0] == left, + self.rights[-1] == right, + # from top to bottom + self.tops[0] == top, + self.bottoms[-1] == bottom] + for c in hc: + self.solver.addConstraint(c | 'required') + + def grid_constraints(self): + # constrain the ratio of the inner part of the grids + # to be the same (relative to width_ratios) + + # constrain widths: + w = (self.rights[0] - self.margins['right'][0] - + self.margins['rightcb'][0]) + w = (w - self.lefts[0] - self.margins['left'][0] - + self.margins['leftcb'][0]) + w0 = w / self.width_ratios[0] + # from left to right + for i in range(1, self.ncols): + w = (self.rights[i] - self.margins['right'][i] - + self.margins['rightcb'][i]) + w = (w - self.lefts[i] - self.margins['left'][i] - + self.margins['leftcb'][i]) + c = (w == w0 * self.width_ratios[i]) + self.solver.addConstraint(c | 'strong') + # constrain the grid cells to be directly next to each other. + c = (self.rights[i - 1] == self.lefts[i]) + self.solver.addConstraint(c | 'strong') + + # constrain heights: + h = self.tops[0] - self.margins['top'][0] - self.margins['topcb'][0] + h = (h - self.bottoms[0] - self.margins['bottom'][0] - + self.margins['bottomcb'][0]) + h0 = h / self.height_ratios[0] + # from top to bottom: + for i in range(1, self.nrows): + h = (self.tops[i] - self.margins['top'][i] - + self.margins['topcb'][i]) + h = (h - self.bottoms[i] - self.margins['bottom'][i] - + self.margins['bottomcb'][i]) + c = (h == h0 * self.height_ratios[i]) + self.solver.addConstraint(c | 'strong') + # constrain the grid cells to be directly above each other. + c = (self.bottoms[i - 1] == self.tops[i]) + self.solver.addConstraint(c | 'strong') + + # Margin editing: The margins are variable and meant to + # contain things of a fixed size like axes labels, tick labels, titles + # etc + def edit_margin(self, todo, size, cell): + """ + Change the size of the margin for one cell. + + Parameters + ---------- + todo : string (one of 'left', 'right', 'bottom', 'top') + margin to alter. + + size : float + Size of the margin. If it is larger than the existing minimum it + updates the margin size. Fraction of figure size. + + cell : int + Cell column or row to edit. + """ + self.solver.suggestValue(self.margins[todo][cell], size) + self.margin_vals[todo][cell] = size + + def edit_margin_min(self, todo, size, cell=0): + """ + Change the minimum size of the margin for one cell. + + Parameters + ---------- + todo : string (one of 'left', 'right', 'bottom', 'top') + margin to alter. + + size : float + Minimum size of the margin . If it is larger than the + existing minimum it updates the margin size. Fraction of + figure size. + + cell : int + Cell column or row to edit. + """ + + if size > self.margin_vals[todo][cell]: + self.edit_margin(todo, size, cell) + + def edit_margins(self, todo, size): + """ + Change the size of all the margin of all the cells in the layout grid. + + Parameters + ---------- + todo : string (one of 'left', 'right', 'bottom', 'top') + margin to alter. + + size : float + Size to set the margins. Fraction of figure size. + """ + + for i in range(len(self.margin_vals[todo])): + self.edit_margin(todo, size, i) + + def edit_all_margins_min(self, todo, size): + """ + Change the minimum size of all the margin of all + the cells in the layout grid. + + Parameters + ---------- + todo : {'left', 'right', 'bottom', 'top'} + The margin to alter. + + size : float + Minimum size of the margin. If it is larger than the + existing minimum it updates the margin size. Fraction of + figure size. + """ + + for i in range(len(self.margin_vals[todo])): + self.edit_margin_min(todo, size, i) + + def edit_outer_margin_mins(self, margin, ss): + """ + Edit all four margin minimums in one statement. + + Parameters + ---------- + margin : dict + size of margins in a dict with keys 'left', 'right', 'bottom', + 'top' + + ss : SubplotSpec + defines the subplotspec these margins should be applied to + """ + + self.edit_margin_min('left', margin['left'], ss.colspan.start) + self.edit_margin_min('leftcb', margin['leftcb'], ss.colspan.start) + self.edit_margin_min('right', margin['right'], ss.colspan.stop - 1) + self.edit_margin_min('rightcb', margin['rightcb'], ss.colspan.stop - 1) + # rows are from the top down: + self.edit_margin_min('top', margin['top'], ss.rowspan.start) + self.edit_margin_min('topcb', margin['topcb'], ss.rowspan.start) + self.edit_margin_min('bottom', margin['bottom'], ss.rowspan.stop - 1) + self.edit_margin_min('bottomcb', margin['bottomcb'], + ss.rowspan.stop - 1) + + def get_margins(self, todo, col): + """Return the margin at this position""" + return self.margin_vals[todo][col] + + def get_outer_bbox(self, rows=0, cols=0): + """ + Return the outer bounding box of the subplot specs + given by rows and cols. rows and cols can be spans. + """ + rows = np.atleast_1d(rows) + cols = np.atleast_1d(cols) + + bbox = Bbox.from_extents( + self.lefts[cols[0]].value(), + self.bottoms[rows[-1]].value(), + self.rights[cols[-1]].value(), + self.tops[rows[0]].value()) + return bbox + + def get_inner_bbox(self, rows=0, cols=0): + """ + Return the inner bounding box of the subplot specs + given by rows and cols. rows and cols can be spans. + """ + rows = np.atleast_1d(rows) + cols = np.atleast_1d(cols) + + bbox = Bbox.from_extents( + (self.lefts[cols[0]].value() + + self.margins['left'][cols[0]].value() + + self.margins['leftcb'][cols[0]].value()), + (self.bottoms[rows[-1]].value() + + self.margins['bottom'][rows[-1]].value() + + self.margins['bottomcb'][rows[-1]].value()), + (self.rights[cols[-1]].value() - + self.margins['right'][cols[-1]].value() - + self.margins['rightcb'][cols[-1]].value()), + (self.tops[rows[0]].value() - + self.margins['top'][rows[0]].value() - + self.margins['topcb'][rows[0]].value()) + ) + return bbox + + def get_bbox_for_cb(self, rows=0, cols=0): + """ + Return the bounding box that includes the + decorations but, *not* the colorbar... + """ + rows = np.atleast_1d(rows) + cols = np.atleast_1d(cols) + + bbox = Bbox.from_extents( + (self.lefts[cols[0]].value() + + self.margins['leftcb'][cols[0]].value()), + (self.bottoms[rows[-1]].value() + + self.margins['bottomcb'][rows[-1]].value()), + (self.rights[cols[-1]].value() - + self.margins['rightcb'][cols[-1]].value()), + (self.tops[rows[0]].value() - + self.margins['topcb'][rows[0]].value()) + ) + return bbox + + def get_left_margin_bbox(self, rows=0, cols=0): + """ + Return the left margin bounding box of the subplot specs + given by rows and cols. rows and cols can be spans. + """ + rows = np.atleast_1d(rows) + cols = np.atleast_1d(cols) + + bbox = Bbox.from_extents( + (self.lefts[cols[0]].value() + + self.margins['leftcb'][cols[0]].value()), + (self.bottoms[rows[-1]].value()), + (self.lefts[cols[0]].value() + + self.margins['leftcb'][cols[0]].value() + + self.margins['left'][cols[0]].value()), + (self.tops[rows[0]].value())) + return bbox + + def get_bottom_margin_bbox(self, rows=0, cols=0): + """ + Return the left margin bounding box of the subplot specs + given by rows and cols. rows and cols can be spans. + """ + rows = np.atleast_1d(rows) + cols = np.atleast_1d(cols) + + bbox = Bbox.from_extents( + (self.lefts[cols[0]].value()), + (self.bottoms[rows[-1]].value() + + self.margins['bottomcb'][rows[-1]].value()), + (self.rights[cols[-1]].value()), + (self.bottoms[rows[-1]].value() + + self.margins['bottom'][rows[-1]].value() + + self.margins['bottomcb'][rows[-1]].value() + )) + return bbox + + def get_right_margin_bbox(self, rows=0, cols=0): + """ + Return the left margin bounding box of the subplot specs + given by rows and cols. rows and cols can be spans. + """ + rows = np.atleast_1d(rows) + cols = np.atleast_1d(cols) + + bbox = Bbox.from_extents( + (self.rights[cols[-1]].value() - + self.margins['right'][cols[-1]].value() - + self.margins['rightcb'][cols[-1]].value()), + (self.bottoms[rows[-1]].value()), + (self.rights[cols[-1]].value() - + self.margins['rightcb'][cols[-1]].value()), + (self.tops[rows[0]].value())) + return bbox + + def get_top_margin_bbox(self, rows=0, cols=0): + """ + Return the left margin bounding box of the subplot specs + given by rows and cols. rows and cols can be spans. + """ + rows = np.atleast_1d(rows) + cols = np.atleast_1d(cols) + + bbox = Bbox.from_extents( + (self.lefts[cols[0]].value()), + (self.tops[rows[0]].value() - + self.margins['topcb'][rows[0]].value()), + (self.rights[cols[-1]].value()), + (self.tops[rows[0]].value() - + self.margins['topcb'][rows[0]].value() - + self.margins['top'][rows[0]].value())) + return bbox + + def update_variables(self): + """ + Update the variables for the solver attached to this layoutgrid. + """ + self.solver.updateVariables() + +_layoutboxobjnum = itertools.count() + + +def seq_id(): + """Generate a short sequential id for layoutbox objects.""" + return '%06d' % next(_layoutboxobjnum) + + +def print_children(lb): + """Print the children of the layoutbox.""" + for child in lb.children: + print_children(child) + + +def plot_children(fig, lg=None, level=0, printit=False): + """Simple plotting to show where boxes are.""" + import matplotlib.pyplot as plt + import matplotlib.patches as mpatches + + if lg is None: + _layoutgrids = fig.execute_constrained_layout() + lg = _layoutgrids[fig] + colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] + col = colors[level] + for i in range(lg.nrows): + for j in range(lg.ncols): + bb = lg.get_outer_bbox(rows=i, cols=j) + fig.add_artist( + mpatches.Rectangle(bb.p0, bb.width, bb.height, linewidth=1, + edgecolor='0.7', facecolor='0.7', + alpha=0.2, transform=fig.transFigure, + zorder=-3)) + bbi = lg.get_inner_bbox(rows=i, cols=j) + fig.add_artist( + mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=2, + edgecolor=col, facecolor='none', + transform=fig.transFigure, zorder=-2)) + + bbi = lg.get_left_margin_bbox(rows=i, cols=j) + fig.add_artist( + mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0, + edgecolor='none', alpha=0.2, + facecolor=[0.5, 0.7, 0.5], + transform=fig.transFigure, zorder=-2)) + bbi = lg.get_right_margin_bbox(rows=i, cols=j) + fig.add_artist( + mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0, + edgecolor='none', alpha=0.2, + facecolor=[0.7, 0.5, 0.5], + transform=fig.transFigure, zorder=-2)) + bbi = lg.get_bottom_margin_bbox(rows=i, cols=j) + fig.add_artist( + mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0, + edgecolor='none', alpha=0.2, + facecolor=[0.5, 0.5, 0.7], + transform=fig.transFigure, zorder=-2)) + bbi = lg.get_top_margin_bbox(rows=i, cols=j) + fig.add_artist( + mpatches.Rectangle(bbi.p0, bbi.width, bbi.height, linewidth=0, + edgecolor='none', alpha=0.2, + facecolor=[0.7, 0.2, 0.7], + transform=fig.transFigure, zorder=-2)) + for ch in lg.children.flat: + if ch is not None: + plot_children(fig, ch, level=level+1) diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_mathtext.py b/.venv/lib/python3.9/site-packages/matplotlib/_mathtext.py new file mode 100644 index 00000000..ea652ba9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_mathtext.py @@ -0,0 +1,2942 @@ +""" +Implementation details for :mod:`.mathtext`. +""" + +from collections import namedtuple +import enum +import functools +from io import StringIO +import logging +import os +import types +import unicodedata + +import numpy as np +from pyparsing import ( + Combine, Empty, Forward, Group, Literal, oneOf, OneOrMore, + Optional, ParseBaseException, ParseFatalException, ParserElement, + ParseResults, QuotedString, Regex, StringEnd, Suppress, White, ZeroOrMore) + +import matplotlib as mpl +from . import _api, cbook +from ._mathtext_data import ( + latex_to_bakoma, latex_to_standard, stix_glyph_fixes, stix_virtual_fonts, + tex2uni) +from .afm import AFM +from .font_manager import FontProperties, findfont, get_font +from .ft2font import KERNING_DEFAULT + + +ParserElement.enablePackrat() +_log = logging.getLogger("matplotlib.mathtext") + + +############################################################################## +# FONTS + + +def get_unicode_index(symbol, math=True): + r""" + Return the integer index (from the Unicode table) of *symbol*. + + Parameters + ---------- + symbol : str + A single unicode character, a TeX command (e.g. r'\pi') or a Type1 + symbol name (e.g. 'phi'). + math : bool, default: True + If False, always treat as a single unicode character. + """ + # for a non-math symbol, simply return its unicode index + if not math: + return ord(symbol) + # From UTF #25: U+2212 minus sign is the preferred + # representation of the unary and binary minus sign rather than + # the ASCII-derived U+002D hyphen-minus, because minus sign is + # unambiguous and because it is rendered with a more desirable + # length, usually longer than a hyphen. + if symbol == '-': + return 0x2212 + try: # This will succeed if symbol is a single unicode char + return ord(symbol) + except TypeError: + pass + try: # Is symbol a TeX symbol (i.e. \alpha) + return tex2uni[symbol.strip("\\")] + except KeyError as err: + raise ValueError( + "'{}' is not a valid Unicode character or TeX/Type1 symbol" + .format(symbol)) from err + + +class Fonts: + """ + An abstract base class for a system of fonts to use for mathtext. + + The class must be able to take symbol keys and font file names and + return the character metrics. It also delegates to a backend class + to do the actual drawing. + """ + + def __init__(self, default_font_prop, mathtext_backend): + """ + Parameters + ---------- + default_font_prop : `~.font_manager.FontProperties` + The default non-math font, or the base font for Unicode (generic) + font rendering. + mathtext_backend : `MathtextBackend` subclass + Backend to which rendering is actually delegated. + """ + self.default_font_prop = default_font_prop + self.mathtext_backend = mathtext_backend + self.used_characters = {} + + @_api.deprecated("3.4") + def destroy(self): + """ + Fix any cyclical references before the object is about + to be destroyed. + """ + self.used_characters = None + + def get_kern(self, font1, fontclass1, sym1, fontsize1, + font2, fontclass2, sym2, fontsize2, dpi): + """ + Get the kerning distance for font between *sym1* and *sym2*. + + See `~.Fonts.get_metrics` for a detailed description of the parameters. + """ + return 0. + + def get_metrics(self, font, font_class, sym, fontsize, dpi, math=True): + r""" + Parameters + ---------- + font : str + One of the TeX font names: "tt", "it", "rm", "cal", "sf", "bf", + "default", "regular", "bb", "frak", "scr". "default" and "regular" + are synonyms and use the non-math font. + font_class : str + One of the TeX font names (as for *font*), but **not** "bb", + "frak", or "scr". This is used to combine two font classes. The + only supported combination currently is ``get_metrics("frak", "bf", + ...)``. + sym : str + A symbol in raw TeX form, e.g., "1", "x", or "\sigma". + fontsize : float + Font size in points. + dpi : float + Rendering dots-per-inch. + math : bool + Whether we are currently in math mode or not. + + Returns + ------- + object + + The returned object has the following attributes (all floats, + except *slanted*): + + - *advance*: The advance distance (in points) of the glyph. + - *height*: The height of the glyph in points. + - *width*: The width of the glyph in points. + - *xmin*, *xmax*, *ymin*, *ymax*: The ink rectangle of the glyph + - *iceberg*: The distance from the baseline to the top of the + glyph. (This corresponds to TeX's definition of "height".) + - *slanted*: Whether the glyph should be considered as "slanted" + (currently used for kerning sub/superscripts). + """ + info = self._get_info(font, font_class, sym, fontsize, dpi, math) + return info.metrics + + def set_canvas_size(self, w, h, d): + """ + Set the size of the buffer used to render the math expression. + Only really necessary for the bitmap backends. + """ + self.width, self.height, self.depth = np.ceil([w, h, d]) + self.mathtext_backend.set_canvas_size( + self.width, self.height, self.depth) + + @_api.rename_parameter("3.4", "facename", "font") + def render_glyph(self, ox, oy, font, font_class, sym, fontsize, dpi): + """ + At position (*ox*, *oy*), draw the glyph specified by the remaining + parameters (see `get_metrics` for their detailed description). + """ + info = self._get_info(font, font_class, sym, fontsize, dpi) + self.used_characters.setdefault(info.font.fname, set()).add(info.num) + self.mathtext_backend.render_glyph(ox, oy, info) + + def render_rect_filled(self, x1, y1, x2, y2): + """ + Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*). + """ + self.mathtext_backend.render_rect_filled(x1, y1, x2, y2) + + def get_xheight(self, font, fontsize, dpi): + """ + Get the xheight for the given *font* and *fontsize*. + """ + raise NotImplementedError() + + def get_underline_thickness(self, font, fontsize, dpi): + """ + Get the line thickness that matches the given font. Used as a + base unit for drawing lines such as in a fraction or radical. + """ + raise NotImplementedError() + + def get_used_characters(self): + """ + Get the set of characters that were used in the math + expression. Used by backends that need to subset fonts so + they know which glyphs to include. + """ + return self.used_characters + + def get_results(self, box): + """ + Get the data needed by the backend to render the math + expression. The return value is backend-specific. + """ + result = self.mathtext_backend.get_results( + box, self.get_used_characters()) + if self.destroy != TruetypeFonts.destroy.__get__(self): + destroy = _api.deprecate_method_override( + __class__.destroy, self, since="3.4") + if destroy: + destroy() + return result + + def get_sized_alternatives_for_symbol(self, fontname, sym): + """ + Override if your font provides multiple sizes of the same + symbol. Should return a list of symbols matching *sym* in + various sizes. The expression renderer will select the most + appropriate size for a given situation from this list. + """ + return [(fontname, sym)] + + +class TruetypeFonts(Fonts): + """ + A generic base class for all font setups that use Truetype fonts + (through FT2Font). + """ + def __init__(self, default_font_prop, mathtext_backend): + super().__init__(default_font_prop, mathtext_backend) + self.glyphd = {} + self._fonts = {} + + filename = findfont(default_font_prop) + default_font = get_font(filename) + self._fonts['default'] = default_font + self._fonts['regular'] = default_font + + @_api.deprecated("3.4") + def destroy(self): + self.glyphd = None + super().destroy() + + def _get_font(self, font): + if font in self.fontmap: + basename = self.fontmap[font] + else: + basename = font + cached_font = self._fonts.get(basename) + if cached_font is None and os.path.exists(basename): + cached_font = get_font(basename) + self._fonts[basename] = cached_font + self._fonts[cached_font.postscript_name] = cached_font + self._fonts[cached_font.postscript_name.lower()] = cached_font + return cached_font + + def _get_offset(self, font, glyph, fontsize, dpi): + if font.postscript_name == 'Cmex10': + return (glyph.height / 64 / 2) + (fontsize/3 * dpi/72) + return 0. + + def _get_info(self, fontname, font_class, sym, fontsize, dpi, math=True): + key = fontname, font_class, sym, fontsize, dpi + bunch = self.glyphd.get(key) + if bunch is not None: + return bunch + + font, num, symbol_name, fontsize, slanted = \ + self._get_glyph(fontname, font_class, sym, fontsize, math) + + font.set_size(fontsize, dpi) + glyph = font.load_char( + num, + flags=self.mathtext_backend.get_hinting_type()) + + xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox] + offset = self._get_offset(font, glyph, fontsize, dpi) + metrics = types.SimpleNamespace( + advance = glyph.linearHoriAdvance/65536.0, + height = glyph.height/64.0, + width = glyph.width/64.0, + xmin = xmin, + xmax = xmax, + ymin = ymin+offset, + ymax = ymax+offset, + # iceberg is the equivalent of TeX's "height" + iceberg = glyph.horiBearingY/64.0 + offset, + slanted = slanted + ) + + result = self.glyphd[key] = types.SimpleNamespace( + font = font, + fontsize = fontsize, + postscript_name = font.postscript_name, + metrics = metrics, + symbol_name = symbol_name, + num = num, + glyph = glyph, + offset = offset + ) + return result + + def get_xheight(self, fontname, fontsize, dpi): + font = self._get_font(fontname) + font.set_size(fontsize, dpi) + pclt = font.get_sfnt_table('pclt') + if pclt is None: + # Some fonts don't store the xHeight, so we do a poor man's xHeight + metrics = self.get_metrics( + fontname, mpl.rcParams['mathtext.default'], 'x', fontsize, dpi) + return metrics.iceberg + xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0) + return xHeight + + def get_underline_thickness(self, font, fontsize, dpi): + # This function used to grab underline thickness from the font + # metrics, but that information is just too un-reliable, so it + # is now hardcoded. + return ((0.75 / 12.0) * fontsize * dpi) / 72.0 + + def get_kern(self, font1, fontclass1, sym1, fontsize1, + font2, fontclass2, sym2, fontsize2, dpi): + if font1 == font2 and fontsize1 == fontsize2: + info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi) + info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi) + font = info1.font + return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64 + return super().get_kern(font1, fontclass1, sym1, fontsize1, + font2, fontclass2, sym2, fontsize2, dpi) + + +class BakomaFonts(TruetypeFonts): + """ + Use the Bakoma TrueType fonts for rendering. + + Symbols are strewn about a number of font files, each of which has + its own proprietary 8-bit encoding. + """ + _fontmap = { + 'cal': 'cmsy10', + 'rm': 'cmr10', + 'tt': 'cmtt10', + 'it': 'cmmi10', + 'bf': 'cmb10', + 'sf': 'cmss10', + 'ex': 'cmex10', + } + + def __init__(self, *args, **kwargs): + self._stix_fallback = StixFonts(*args, **kwargs) + + super().__init__(*args, **kwargs) + self.fontmap = {} + for key, val in self._fontmap.items(): + fullpath = findfont(val) + self.fontmap[key] = fullpath + self.fontmap[val] = fullpath + + _slanted_symbols = set(r"\int \oint".split()) + + def _get_glyph(self, fontname, font_class, sym, fontsize, math=True): + symbol_name = None + font = None + if fontname in self.fontmap and sym in latex_to_bakoma: + basename, num = latex_to_bakoma[sym] + slanted = (basename == "cmmi10") or sym in self._slanted_symbols + font = self._get_font(basename) + elif len(sym) == 1: + slanted = (fontname == "it") + font = self._get_font(fontname) + if font is not None: + num = ord(sym) + + if font is not None: + gid = font.get_char_index(num) + if gid != 0: + symbol_name = font.get_glyph_name(gid) + + if symbol_name is None: + return self._stix_fallback._get_glyph( + fontname, font_class, sym, fontsize, math) + + return font, num, symbol_name, fontsize, slanted + + # The Bakoma fonts contain many pre-sized alternatives for the + # delimiters. The AutoSizedChar class will use these alternatives + # and select the best (closest sized) glyph. + _size_alternatives = { + '(': [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'), + ('ex', '\xb5'), ('ex', '\xc3')], + ')': [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'), + ('ex', '\xb6'), ('ex', '\x21')], + '{': [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'), + ('ex', '\xbd'), ('ex', '\x28')], + '}': [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'), + ('ex', '\xbe'), ('ex', '\x29')], + # The fourth size of '[' is mysteriously missing from the BaKoMa + # font, so I've omitted it for both '[' and ']' + '[': [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'), + ('ex', '\x22')], + ']': [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'), + ('ex', '\x23')], + r'\lfloor': [('ex', '\xa5'), ('ex', '\x6a'), + ('ex', '\xb9'), ('ex', '\x24')], + r'\rfloor': [('ex', '\xa6'), ('ex', '\x6b'), + ('ex', '\xba'), ('ex', '\x25')], + r'\lceil': [('ex', '\xa7'), ('ex', '\x6c'), + ('ex', '\xbb'), ('ex', '\x26')], + r'\rceil': [('ex', '\xa8'), ('ex', '\x6d'), + ('ex', '\xbc'), ('ex', '\x27')], + r'\langle': [('ex', '\xad'), ('ex', '\x44'), + ('ex', '\xbf'), ('ex', '\x2a')], + r'\rangle': [('ex', '\xae'), ('ex', '\x45'), + ('ex', '\xc0'), ('ex', '\x2b')], + r'\__sqrt__': [('ex', '\x70'), ('ex', '\x71'), + ('ex', '\x72'), ('ex', '\x73')], + r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'), + ('ex', '\xc2'), ('ex', '\x2d')], + r'/': [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'), + ('ex', '\xcb'), ('ex', '\x2c')], + r'\widehat': [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'), + ('ex', '\x64')], + r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'), + ('ex', '\x67')], + r'<': [('cal', 'h'), ('ex', 'D')], + r'>': [('cal', 'i'), ('ex', 'E')] + } + + for alias, target in [(r'\leftparen', '('), + (r'\rightparent', ')'), + (r'\leftbrace', '{'), + (r'\rightbrace', '}'), + (r'\leftbracket', '['), + (r'\rightbracket', ']'), + (r'\{', '{'), + (r'\}', '}'), + (r'\[', '['), + (r'\]', ']')]: + _size_alternatives[alias] = _size_alternatives[target] + + def get_sized_alternatives_for_symbol(self, fontname, sym): + return self._size_alternatives.get(sym, [(fontname, sym)]) + + +class UnicodeFonts(TruetypeFonts): + """ + An abstract base class for handling Unicode fonts. + + While some reasonably complete Unicode fonts (such as DejaVu) may + work in some situations, the only Unicode font I'm aware of with a + complete set of math symbols is STIX. + + This class will "fallback" on the Bakoma fonts when a required + symbol can not be found in the font. + """ + use_cmex = True # Unused; delete once mathtext becomes private. + + def __init__(self, *args, **kwargs): + # This must come first so the backend's owner is set correctly + fallback_rc = mpl.rcParams['mathtext.fallback'] + font_cls = {'stix': StixFonts, + 'stixsans': StixSansFonts, + 'cm': BakomaFonts + }.get(fallback_rc) + self.cm_fallback = font_cls(*args, **kwargs) if font_cls else None + + super().__init__(*args, **kwargs) + self.fontmap = {} + for texfont in "cal rm tt it bf sf".split(): + prop = mpl.rcParams['mathtext.' + texfont] + font = findfont(prop) + self.fontmap[texfont] = font + prop = FontProperties('cmex10') + font = findfont(prop) + self.fontmap['ex'] = font + + # include STIX sized alternatives for glyphs if fallback is STIX + if isinstance(self.cm_fallback, StixFonts): + stixsizedaltfonts = { + 0: 'STIXGeneral', + 1: 'STIXSizeOneSym', + 2: 'STIXSizeTwoSym', + 3: 'STIXSizeThreeSym', + 4: 'STIXSizeFourSym', + 5: 'STIXSizeFiveSym'} + + for size, name in stixsizedaltfonts.items(): + fullpath = findfont(name) + self.fontmap[size] = fullpath + self.fontmap[name] = fullpath + + _slanted_symbols = set(r"\int \oint".split()) + + def _map_virtual_font(self, fontname, font_class, uniindex): + return fontname, uniindex + + def _get_glyph(self, fontname, font_class, sym, fontsize, math=True): + try: + uniindex = get_unicode_index(sym, math) + found_symbol = True + except ValueError: + uniindex = ord('?') + found_symbol = False + _log.warning("No TeX to unicode mapping for {!a}.".format(sym)) + + fontname, uniindex = self._map_virtual_font( + fontname, font_class, uniindex) + + new_fontname = fontname + + # Only characters in the "Letter" class should be italicized in 'it' + # mode. Greek capital letters should be Roman. + if found_symbol: + if fontname == 'it' and uniindex < 0x10000: + char = chr(uniindex) + if (unicodedata.category(char)[0] != "L" + or unicodedata.name(char).startswith("GREEK CAPITAL")): + new_fontname = 'rm' + + slanted = (new_fontname == 'it') or sym in self._slanted_symbols + found_symbol = False + font = self._get_font(new_fontname) + if font is not None: + if font.family_name == "cmr10" and uniindex == 0x2212: + # minus sign exists in cmsy10 (not cmr10) + font = get_font( + cbook._get_data_path("fonts/ttf/cmsy10.ttf")) + uniindex = 0xa1 + glyphindex = font.get_char_index(uniindex) + if glyphindex != 0: + found_symbol = True + + if not found_symbol: + if self.cm_fallback: + if (fontname in ('it', 'regular') + and isinstance(self.cm_fallback, StixFonts)): + fontname = 'rm' + + g = self.cm_fallback._get_glyph(fontname, font_class, + sym, fontsize) + fname = g[0].family_name + if fname in list(BakomaFonts._fontmap.values()): + fname = "Computer Modern" + _log.info("Substituting symbol %s from %s", sym, fname) + return g + + else: + if (fontname in ('it', 'regular') + and isinstance(self, StixFonts)): + return self._get_glyph('rm', font_class, sym, fontsize) + _log.warning("Font {!r} does not have a glyph for {!a} " + "[U+{:x}], substituting with a dummy " + "symbol.".format(new_fontname, sym, uniindex)) + fontname = 'rm' + font = self._get_font(fontname) + uniindex = 0xA4 # currency char, for lack of anything better + glyphindex = font.get_char_index(uniindex) + slanted = False + + symbol_name = font.get_glyph_name(glyphindex) + return font, uniindex, symbol_name, fontsize, slanted + + def get_sized_alternatives_for_symbol(self, fontname, sym): + if self.cm_fallback: + return self.cm_fallback.get_sized_alternatives_for_symbol( + fontname, sym) + return [(fontname, sym)] + + +class DejaVuFonts(UnicodeFonts): + use_cmex = False # Unused; delete once mathtext becomes private. + + def __init__(self, *args, **kwargs): + # This must come first so the backend's owner is set correctly + if isinstance(self, DejaVuSerifFonts): + self.cm_fallback = StixFonts(*args, **kwargs) + else: + self.cm_fallback = StixSansFonts(*args, **kwargs) + self.bakoma = BakomaFonts(*args, **kwargs) + TruetypeFonts.__init__(self, *args, **kwargs) + self.fontmap = {} + # Include Stix sized alternatives for glyphs + self._fontmap.update({ + 1: 'STIXSizeOneSym', + 2: 'STIXSizeTwoSym', + 3: 'STIXSizeThreeSym', + 4: 'STIXSizeFourSym', + 5: 'STIXSizeFiveSym', + }) + for key, name in self._fontmap.items(): + fullpath = findfont(name) + self.fontmap[key] = fullpath + self.fontmap[name] = fullpath + + def _get_glyph(self, fontname, font_class, sym, fontsize, math=True): + # Override prime symbol to use Bakoma. + if sym == r'\prime': + return self.bakoma._get_glyph( + fontname, font_class, sym, fontsize, math) + else: + # check whether the glyph is available in the display font + uniindex = get_unicode_index(sym) + font = self._get_font('ex') + if font is not None: + glyphindex = font.get_char_index(uniindex) + if glyphindex != 0: + return super()._get_glyph( + 'ex', font_class, sym, fontsize, math) + # otherwise return regular glyph + return super()._get_glyph( + fontname, font_class, sym, fontsize, math) + + +class DejaVuSerifFonts(DejaVuFonts): + """ + A font handling class for the DejaVu Serif fonts + + If a glyph is not found it will fallback to Stix Serif + """ + _fontmap = { + 'rm': 'DejaVu Serif', + 'it': 'DejaVu Serif:italic', + 'bf': 'DejaVu Serif:weight=bold', + 'sf': 'DejaVu Sans', + 'tt': 'DejaVu Sans Mono', + 'ex': 'DejaVu Serif Display', + 0: 'DejaVu Serif', + } + + +class DejaVuSansFonts(DejaVuFonts): + """ + A font handling class for the DejaVu Sans fonts + + If a glyph is not found it will fallback to Stix Sans + """ + _fontmap = { + 'rm': 'DejaVu Sans', + 'it': 'DejaVu Sans:italic', + 'bf': 'DejaVu Sans:weight=bold', + 'sf': 'DejaVu Sans', + 'tt': 'DejaVu Sans Mono', + 'ex': 'DejaVu Sans Display', + 0: 'DejaVu Sans', + } + + +class StixFonts(UnicodeFonts): + """ + A font handling class for the STIX fonts. + + In addition to what UnicodeFonts provides, this class: + + - supports "virtual fonts" which are complete alpha numeric + character sets with different font styles at special Unicode + code points, such as "Blackboard". + + - handles sized alternative characters for the STIXSizeX fonts. + """ + _fontmap = { + 'rm': 'STIXGeneral', + 'it': 'STIXGeneral:italic', + 'bf': 'STIXGeneral:weight=bold', + 'nonunirm': 'STIXNonUnicode', + 'nonuniit': 'STIXNonUnicode:italic', + 'nonunibf': 'STIXNonUnicode:weight=bold', + 0: 'STIXGeneral', + 1: 'STIXSizeOneSym', + 2: 'STIXSizeTwoSym', + 3: 'STIXSizeThreeSym', + 4: 'STIXSizeFourSym', + 5: 'STIXSizeFiveSym', + } + use_cmex = False # Unused; delete once mathtext becomes private. + cm_fallback = False + _sans = False + + def __init__(self, *args, **kwargs): + TruetypeFonts.__init__(self, *args, **kwargs) + self.fontmap = {} + for key, name in self._fontmap.items(): + fullpath = findfont(name) + self.fontmap[key] = fullpath + self.fontmap[name] = fullpath + + def _map_virtual_font(self, fontname, font_class, uniindex): + # Handle these "fonts" that are actually embedded in + # other fonts. + mapping = stix_virtual_fonts.get(fontname) + if (self._sans and mapping is None + and fontname not in ('regular', 'default')): + mapping = stix_virtual_fonts['sf'] + doing_sans_conversion = True + else: + doing_sans_conversion = False + + if mapping is not None: + if isinstance(mapping, dict): + try: + mapping = mapping[font_class] + except KeyError: + mapping = mapping['rm'] + + # Binary search for the source glyph + lo = 0 + hi = len(mapping) + while lo < hi: + mid = (lo+hi)//2 + range = mapping[mid] + if uniindex < range[0]: + hi = mid + elif uniindex <= range[1]: + break + else: + lo = mid + 1 + + if range[0] <= uniindex <= range[1]: + uniindex = uniindex - range[0] + range[3] + fontname = range[2] + elif not doing_sans_conversion: + # This will generate a dummy character + uniindex = 0x1 + fontname = mpl.rcParams['mathtext.default'] + + # Fix some incorrect glyphs. + if fontname in ('rm', 'it'): + uniindex = stix_glyph_fixes.get(uniindex, uniindex) + + # Handle private use area glyphs + if fontname in ('it', 'rm', 'bf') and 0xe000 <= uniindex <= 0xf8ff: + fontname = 'nonuni' + fontname + + return fontname, uniindex + + @functools.lru_cache() + def get_sized_alternatives_for_symbol(self, fontname, sym): + fixes = { + '\\{': '{', '\\}': '}', '\\[': '[', '\\]': ']', + '<': '\N{MATHEMATICAL LEFT ANGLE BRACKET}', + '>': '\N{MATHEMATICAL RIGHT ANGLE BRACKET}', + } + sym = fixes.get(sym, sym) + try: + uniindex = get_unicode_index(sym) + except ValueError: + return [(fontname, sym)] + alternatives = [(i, chr(uniindex)) for i in range(6) + if self._get_font(i).get_char_index(uniindex) != 0] + # The largest size of the radical symbol in STIX has incorrect + # metrics that cause it to be disconnected from the stem. + if sym == r'\__sqrt__': + alternatives = alternatives[:-1] + return alternatives + + +class StixSansFonts(StixFonts): + """ + A font handling class for the STIX fonts (that uses sans-serif + characters by default). + """ + _sans = True + + +class StandardPsFonts(Fonts): + """ + Use the standard postscript fonts for rendering to backend_ps + + Unlike the other font classes, BakomaFont and UnicodeFont, this + one requires the Ps backend. + """ + basepath = str(cbook._get_data_path('fonts/afm')) + + fontmap = { + 'cal': 'pzcmi8a', # Zapf Chancery + 'rm': 'pncr8a', # New Century Schoolbook + 'tt': 'pcrr8a', # Courier + 'it': 'pncri8a', # New Century Schoolbook Italic + 'sf': 'phvr8a', # Helvetica + 'bf': 'pncb8a', # New Century Schoolbook Bold + None: 'psyr', # Symbol + } + + def __init__(self, default_font_prop, mathtext_backend=None): + if mathtext_backend is None: + # Circular import, can be dropped after public access to + # StandardPsFonts is removed and mathtext_backend made a required + # parameter. + from . import mathtext + mathtext_backend = mathtext.MathtextBackendPath() + super().__init__(default_font_prop, mathtext_backend) + self.glyphd = {} + self.fonts = {} + + filename = findfont(default_font_prop, fontext='afm', + directory=self.basepath) + if filename is None: + filename = findfont('Helvetica', fontext='afm', + directory=self.basepath) + with open(filename, 'rb') as fd: + default_font = AFM(fd) + default_font.fname = filename + + self.fonts['default'] = default_font + self.fonts['regular'] = default_font + + pswriter = _api.deprecated("3.4")(property(lambda self: StringIO())) + + def _get_font(self, font): + if font in self.fontmap: + basename = self.fontmap[font] + else: + basename = font + + cached_font = self.fonts.get(basename) + if cached_font is None: + fname = os.path.join(self.basepath, basename + ".afm") + with open(fname, 'rb') as fd: + cached_font = AFM(fd) + cached_font.fname = fname + self.fonts[basename] = cached_font + self.fonts[cached_font.get_fontname()] = cached_font + return cached_font + + def _get_info(self, fontname, font_class, sym, fontsize, dpi, math=True): + """Load the cmfont, metrics and glyph with caching.""" + key = fontname, sym, fontsize, dpi + tup = self.glyphd.get(key) + + if tup is not None: + return tup + + # Only characters in the "Letter" class should really be italicized. + # This class includes greek letters, so we're ok + if (fontname == 'it' and + (len(sym) > 1 + or not unicodedata.category(sym).startswith("L"))): + fontname = 'rm' + + found_symbol = False + + if sym in latex_to_standard: + fontname, num = latex_to_standard[sym] + glyph = chr(num) + found_symbol = True + elif len(sym) == 1: + glyph = sym + num = ord(glyph) + found_symbol = True + else: + _log.warning( + "No TeX to built-in Postscript mapping for {!r}".format(sym)) + + slanted = (fontname == 'it') + font = self._get_font(fontname) + + if found_symbol: + try: + symbol_name = font.get_name_char(glyph) + except KeyError: + _log.warning( + "No glyph in standard Postscript font {!r} for {!r}" + .format(font.get_fontname(), sym)) + found_symbol = False + + if not found_symbol: + glyph = '?' + num = ord(glyph) + symbol_name = font.get_name_char(glyph) + + offset = 0 + + scale = 0.001 * fontsize + + xmin, ymin, xmax, ymax = [val * scale + for val in font.get_bbox_char(glyph)] + metrics = types.SimpleNamespace( + advance = font.get_width_char(glyph) * scale, + width = font.get_width_char(glyph) * scale, + height = font.get_height_char(glyph) * scale, + xmin = xmin, + xmax = xmax, + ymin = ymin+offset, + ymax = ymax+offset, + # iceberg is the equivalent of TeX's "height" + iceberg = ymax + offset, + slanted = slanted + ) + + self.glyphd[key] = types.SimpleNamespace( + font = font, + fontsize = fontsize, + postscript_name = font.get_fontname(), + metrics = metrics, + symbol_name = symbol_name, + num = num, + glyph = glyph, + offset = offset + ) + + return self.glyphd[key] + + def get_kern(self, font1, fontclass1, sym1, fontsize1, + font2, fontclass2, sym2, fontsize2, dpi): + if font1 == font2 and fontsize1 == fontsize2: + info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi) + info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi) + font = info1.font + return (font.get_kern_dist(info1.glyph, info2.glyph) + * 0.001 * fontsize1) + return super().get_kern(font1, fontclass1, sym1, fontsize1, + font2, fontclass2, sym2, fontsize2, dpi) + + def get_xheight(self, font, fontsize, dpi): + font = self._get_font(font) + return font.get_xheight() * 0.001 * fontsize + + def get_underline_thickness(self, font, fontsize, dpi): + font = self._get_font(font) + return font.get_underline_thickness() * 0.001 * fontsize + + +############################################################################## +# TeX-LIKE BOX MODEL + +# The following is based directly on the document 'woven' from the +# TeX82 source code. This information is also available in printed +# form: +# +# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B: +# TeX: The Program. Addison-Wesley Professional. +# +# The most relevant "chapters" are: +# Data structures for boxes and their friends +# Shipping pages out (Ship class) +# Packaging (hpack and vpack) +# Data structures for math mode +# Subroutines for math mode +# Typesetting math formulas +# +# Many of the docstrings below refer to a numbered "node" in that +# book, e.g., node123 +# +# Note that (as TeX) y increases downward, unlike many other parts of +# matplotlib. + +# How much text shrinks when going to the next-smallest level. GROW_FACTOR +# must be the inverse of SHRINK_FACTOR. +SHRINK_FACTOR = 0.7 +GROW_FACTOR = 1 / SHRINK_FACTOR +# The number of different sizes of chars to use, beyond which they will not +# get any smaller +NUM_SIZE_LEVELS = 6 + + +class FontConstantsBase: + """ + A set of constants that controls how certain things, such as sub- + and superscripts are laid out. These are all metrics that can't + be reliably retrieved from the font metrics in the font itself. + """ + # Percentage of x-height of additional horiz. space after sub/superscripts + script_space = 0.05 + + # Percentage of x-height that sub/superscripts drop below the baseline + subdrop = 0.4 + + # Percentage of x-height that superscripts are raised from the baseline + sup1 = 0.7 + + # Percentage of x-height that subscripts drop below the baseline + sub1 = 0.3 + + # Percentage of x-height that subscripts drop below the baseline when a + # superscript is present + sub2 = 0.5 + + # Percentage of x-height that sub/supercripts are offset relative to the + # nucleus edge for non-slanted nuclei + delta = 0.025 + + # Additional percentage of last character height above 2/3 of the + # x-height that supercripts are offset relative to the subscript + # for slanted nuclei + delta_slanted = 0.2 + + # Percentage of x-height that supercripts and subscripts are offset for + # integrals + delta_integral = 0.1 + + +class ComputerModernFontConstants(FontConstantsBase): + script_space = 0.075 + subdrop = 0.2 + sup1 = 0.45 + sub1 = 0.2 + sub2 = 0.3 + delta = 0.075 + delta_slanted = 0.3 + delta_integral = 0.3 + + +class STIXFontConstants(FontConstantsBase): + script_space = 0.1 + sup1 = 0.8 + sub2 = 0.6 + delta = 0.05 + delta_slanted = 0.3 + delta_integral = 0.3 + + +class STIXSansFontConstants(FontConstantsBase): + script_space = 0.05 + sup1 = 0.8 + delta_slanted = 0.6 + delta_integral = 0.3 + + +class DejaVuSerifFontConstants(FontConstantsBase): + pass + + +class DejaVuSansFontConstants(FontConstantsBase): + pass + + +# Maps font family names to the FontConstantBase subclass to use +_font_constant_mapping = { + 'DejaVu Sans': DejaVuSansFontConstants, + 'DejaVu Sans Mono': DejaVuSansFontConstants, + 'DejaVu Serif': DejaVuSerifFontConstants, + 'cmb10': ComputerModernFontConstants, + 'cmex10': ComputerModernFontConstants, + 'cmmi10': ComputerModernFontConstants, + 'cmr10': ComputerModernFontConstants, + 'cmss10': ComputerModernFontConstants, + 'cmsy10': ComputerModernFontConstants, + 'cmtt10': ComputerModernFontConstants, + 'STIXGeneral': STIXFontConstants, + 'STIXNonUnicode': STIXFontConstants, + 'STIXSizeFiveSym': STIXFontConstants, + 'STIXSizeFourSym': STIXFontConstants, + 'STIXSizeThreeSym': STIXFontConstants, + 'STIXSizeTwoSym': STIXFontConstants, + 'STIXSizeOneSym': STIXFontConstants, + # Map the fonts we used to ship, just for good measure + 'Bitstream Vera Sans': DejaVuSansFontConstants, + 'Bitstream Vera': DejaVuSansFontConstants, + } + + +def _get_font_constant_set(state): + constants = _font_constant_mapping.get( + state.font_output._get_font(state.font).family_name, + FontConstantsBase) + # STIX sans isn't really its own fonts, just different code points + # in the STIX fonts, so we have to detect this one separately. + if (constants is STIXFontConstants and + isinstance(state.font_output, StixSansFonts)): + return STIXSansFontConstants + return constants + + +class Node: + """A node in the TeX box model.""" + + def __init__(self): + self.size = 0 + + def __repr__(self): + return self.__class__.__name__ + + def get_kerning(self, next): + return 0.0 + + def shrink(self): + """ + Shrinks one level smaller. There are only three levels of + sizes, after which things will no longer get smaller. + """ + self.size += 1 + + def grow(self): + """ + Grows one level larger. There is no limit to how big + something can get. + """ + self.size -= 1 + + def render(self, x, y): + pass + + +class Box(Node): + """A node with a physical location.""" + + def __init__(self, width, height, depth): + super().__init__() + self.width = width + self.height = height + self.depth = depth + + def shrink(self): + super().shrink() + if self.size < NUM_SIZE_LEVELS: + self.width *= SHRINK_FACTOR + self.height *= SHRINK_FACTOR + self.depth *= SHRINK_FACTOR + + def grow(self): + super().grow() + self.width *= GROW_FACTOR + self.height *= GROW_FACTOR + self.depth *= GROW_FACTOR + + def render(self, x1, y1, x2, y2): + pass + + +class Vbox(Box): + """A box with only height (zero width).""" + + def __init__(self, height, depth): + super().__init__(0., height, depth) + + +class Hbox(Box): + """A box with only width (zero height and depth).""" + + def __init__(self, width): + super().__init__(width, 0., 0.) + + +class Char(Node): + """ + A single character. + + Unlike TeX, the font information and metrics are stored with each `Char` + to make it easier to lookup the font metrics when needed. Note that TeX + boxes have a width, height, and depth, unlike Type1 and TrueType which use + a full bounding box and an advance in the x-direction. The metrics must + be converted to the TeX model, and the advance (if different from width) + must be converted into a `Kern` node when the `Char` is added to its parent + `Hlist`. + """ + + def __init__(self, c, state, math=True): + super().__init__() + self.c = c + self.font_output = state.font_output + self.font = state.font + self.font_class = state.font_class + self.fontsize = state.fontsize + self.dpi = state.dpi + self.math = math + # The real width, height and depth will be set during the + # pack phase, after we know the real fontsize + self._update_metrics() + + def __repr__(self): + return '`%s`' % self.c + + def _update_metrics(self): + metrics = self._metrics = self.font_output.get_metrics( + self.font, self.font_class, self.c, self.fontsize, self.dpi, + self.math) + if self.c == ' ': + self.width = metrics.advance + else: + self.width = metrics.width + self.height = metrics.iceberg + self.depth = -(metrics.iceberg - metrics.height) + + def is_slanted(self): + return self._metrics.slanted + + def get_kerning(self, next): + """ + Return the amount of kerning between this and the given character. + + This method is called when characters are strung together into `Hlist` + to create `Kern` nodes. + """ + advance = self._metrics.advance - self.width + kern = 0. + if isinstance(next, Char): + kern = self.font_output.get_kern( + self.font, self.font_class, self.c, self.fontsize, + next.font, next.font_class, next.c, next.fontsize, + self.dpi) + return advance + kern + + def render(self, x, y): + """ + Render the character to the canvas + """ + self.font_output.render_glyph( + x, y, + self.font, self.font_class, self.c, self.fontsize, self.dpi) + + def shrink(self): + super().shrink() + if self.size < NUM_SIZE_LEVELS: + self.fontsize *= SHRINK_FACTOR + self.width *= SHRINK_FACTOR + self.height *= SHRINK_FACTOR + self.depth *= SHRINK_FACTOR + + def grow(self): + super().grow() + self.fontsize *= GROW_FACTOR + self.width *= GROW_FACTOR + self.height *= GROW_FACTOR + self.depth *= GROW_FACTOR + + +class Accent(Char): + """ + The font metrics need to be dealt with differently for accents, + since they are already offset correctly from the baseline in + TrueType fonts. + """ + def _update_metrics(self): + metrics = self._metrics = self.font_output.get_metrics( + self.font, self.font_class, self.c, self.fontsize, self.dpi) + self.width = metrics.xmax - metrics.xmin + self.height = metrics.ymax - metrics.ymin + self.depth = 0 + + def shrink(self): + super().shrink() + self._update_metrics() + + def grow(self): + super().grow() + self._update_metrics() + + def render(self, x, y): + """ + Render the character to the canvas. + """ + self.font_output.render_glyph( + x - self._metrics.xmin, y + self._metrics.ymin, + self.font, self.font_class, self.c, self.fontsize, self.dpi) + + +class List(Box): + """A list of nodes (either horizontal or vertical).""" + + def __init__(self, elements): + super().__init__(0., 0., 0.) + self.shift_amount = 0. # An arbitrary offset + self.children = elements # The child nodes of this list + # The following parameters are set in the vpack and hpack functions + self.glue_set = 0. # The glue setting of this list + self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching + self.glue_order = 0 # The order of infinity (0 - 3) for the glue + + def __repr__(self): + return '[%s <%.02f %.02f %.02f %.02f> %s]' % ( + super().__repr__(), + self.width, self.height, + self.depth, self.shift_amount, + ' '.join([repr(x) for x in self.children])) + + @staticmethod + def _determine_order(totals): + """ + Determine the highest order of glue used by the members of this list. + + Helper function used by vpack and hpack. + """ + for i in range(len(totals))[::-1]: + if totals[i] != 0: + return i + return 0 + + def _set_glue(self, x, sign, totals, error_type): + o = self._determine_order(totals) + self.glue_order = o + self.glue_sign = sign + if totals[o] != 0.: + self.glue_set = x / totals[o] + else: + self.glue_sign = 0 + self.glue_ratio = 0. + if o == 0: + if len(self.children): + _log.warning("%s %s: %r", + error_type, self.__class__.__name__, self) + + def shrink(self): + for child in self.children: + child.shrink() + super().shrink() + if self.size < NUM_SIZE_LEVELS: + self.shift_amount *= SHRINK_FACTOR + self.glue_set *= SHRINK_FACTOR + + def grow(self): + for child in self.children: + child.grow() + super().grow() + self.shift_amount *= GROW_FACTOR + self.glue_set *= GROW_FACTOR + + +class Hlist(List): + """A horizontal list of boxes.""" + + def __init__(self, elements, w=0., m='additional', do_kern=True): + super().__init__(elements) + if do_kern: + self.kern() + self.hpack() + + def kern(self): + """ + Insert `Kern` nodes between `Char` nodes to set kerning. + + The `Char` nodes themselves determine the amount of kerning they need + (in `~Char.get_kerning`), and this function just creates the correct + linked list. + """ + new_children = [] + num_children = len(self.children) + if num_children: + for i in range(num_children): + elem = self.children[i] + if i < num_children - 1: + next = self.children[i + 1] + else: + next = None + + new_children.append(elem) + kerning_distance = elem.get_kerning(next) + if kerning_distance != 0.: + kern = Kern(kerning_distance) + new_children.append(kern) + self.children = new_children + + # This is a failed experiment to fake cross-font kerning. +# def get_kerning(self, next): +# if len(self.children) >= 2 and isinstance(self.children[-2], Char): +# if isinstance(next, Char): +# print "CASE A" +# return self.children[-2].get_kerning(next) +# elif (isinstance(next, Hlist) and len(next.children) +# and isinstance(next.children[0], Char)): +# print "CASE B" +# result = self.children[-2].get_kerning(next.children[0]) +# print result +# return result +# return 0.0 + + def hpack(self, w=0., m='additional'): + r""" + Compute the dimensions of the resulting boxes, and adjust the glue if + one of those dimensions is pre-specified. The computed sizes normally + enclose all of the material inside the new box; but some items may + stick out if negative glue is used, if the box is overfull, or if a + ``\vbox`` includes other boxes that have been shifted left. + + Parameters + ---------- + w : float, default: 0 + A width. + m : {'exactly', 'additional'}, default: 'additional' + Whether to produce a box whose width is 'exactly' *w*; or a box + with the natural width of the contents, plus *w* ('additional'). + + Notes + ----- + The defaults produce a box with the natural width of the contents. + """ + # I don't know why these get reset in TeX. Shift_amount is pretty + # much useless if we do. + # self.shift_amount = 0. + h = 0. + d = 0. + x = 0. + total_stretch = [0.] * 4 + total_shrink = [0.] * 4 + for p in self.children: + if isinstance(p, Char): + x += p.width + h = max(h, p.height) + d = max(d, p.depth) + elif isinstance(p, Box): + x += p.width + if not np.isinf(p.height) and not np.isinf(p.depth): + s = getattr(p, 'shift_amount', 0.) + h = max(h, p.height - s) + d = max(d, p.depth + s) + elif isinstance(p, Glue): + glue_spec = p.glue_spec + x += glue_spec.width + total_stretch[glue_spec.stretch_order] += glue_spec.stretch + total_shrink[glue_spec.shrink_order] += glue_spec.shrink + elif isinstance(p, Kern): + x += p.width + self.height = h + self.depth = d + + if m == 'additional': + w += x + self.width = w + x = w - x + + if x == 0.: + self.glue_sign = 0 + self.glue_order = 0 + self.glue_ratio = 0. + return + if x > 0.: + self._set_glue(x, 1, total_stretch, "Overfull") + else: + self._set_glue(x, -1, total_shrink, "Underfull") + + +class Vlist(List): + """A vertical list of boxes.""" + + def __init__(self, elements, h=0., m='additional'): + super().__init__(elements) + self.vpack() + + def vpack(self, h=0., m='additional', l=np.inf): + """ + Compute the dimensions of the resulting boxes, and to adjust the glue + if one of those dimensions is pre-specified. + + Parameters + ---------- + h : float, default: 0 + A height. + m : {'exactly', 'additional'}, default: 'additional' + Whether to produce a box whose height is 'exactly' *w*; or a box + with the natural height of the contents, plus *w* ('additional'). + l : float, default: np.inf + The maximum height. + + Notes + ----- + The defaults produce a box with the natural height of the contents. + """ + # I don't know why these get reset in TeX. Shift_amount is pretty + # much useless if we do. + # self.shift_amount = 0. + w = 0. + d = 0. + x = 0. + total_stretch = [0.] * 4 + total_shrink = [0.] * 4 + for p in self.children: + if isinstance(p, Box): + x += d + p.height + d = p.depth + if not np.isinf(p.width): + s = getattr(p, 'shift_amount', 0.) + w = max(w, p.width + s) + elif isinstance(p, Glue): + x += d + d = 0. + glue_spec = p.glue_spec + x += glue_spec.width + total_stretch[glue_spec.stretch_order] += glue_spec.stretch + total_shrink[glue_spec.shrink_order] += glue_spec.shrink + elif isinstance(p, Kern): + x += d + p.width + d = 0. + elif isinstance(p, Char): + raise RuntimeError( + "Internal mathtext error: Char node found in Vlist") + + self.width = w + if d > l: + x += d - l + self.depth = l + else: + self.depth = d + + if m == 'additional': + h += x + self.height = h + x = h - x + + if x == 0: + self.glue_sign = 0 + self.glue_order = 0 + self.glue_ratio = 0. + return + + if x > 0.: + self._set_glue(x, 1, total_stretch, "Overfull") + else: + self._set_glue(x, -1, total_shrink, "Underfull") + + +class Rule(Box): + """ + A solid black rectangle. + + It has *width*, *depth*, and *height* fields just as in an `Hlist`. + However, if any of these dimensions is inf, the actual value will be + determined by running the rule up to the boundary of the innermost + enclosing box. This is called a "running dimension". The width is never + running in an `Hlist`; the height and depth are never running in a `Vlist`. + """ + + def __init__(self, width, height, depth, state): + super().__init__(width, height, depth) + self.font_output = state.font_output + + def render(self, x, y, w, h): + self.font_output.render_rect_filled(x, y, x + w, y + h) + + +class Hrule(Rule): + """Convenience class to create a horizontal rule.""" + + def __init__(self, state, thickness=None): + if thickness is None: + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + height = depth = thickness * 0.5 + super().__init__(np.inf, height, depth, state) + + +class Vrule(Rule): + """Convenience class to create a vertical rule.""" + + def __init__(self, state): + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + super().__init__(thickness, np.inf, np.inf, state) + + +_GlueSpec = namedtuple( + "_GlueSpec", "width stretch stretch_order shrink shrink_order") +_GlueSpec._named = { + 'fil': _GlueSpec(0., 1., 1, 0., 0), + 'fill': _GlueSpec(0., 1., 2, 0., 0), + 'filll': _GlueSpec(0., 1., 3, 0., 0), + 'neg_fil': _GlueSpec(0., 0., 0, 1., 1), + 'neg_fill': _GlueSpec(0., 0., 0, 1., 2), + 'neg_filll': _GlueSpec(0., 0., 0, 1., 3), + 'empty': _GlueSpec(0., 0., 0, 0., 0), + 'ss': _GlueSpec(0., 1., 1, -1., 1), +} + + +class Glue(Node): + """ + Most of the information in this object is stored in the underlying + ``_GlueSpec`` class, which is shared between multiple glue objects. + (This is a memory optimization which probably doesn't matter anymore, but + it's easier to stick to what TeX does.) + """ + + def __init__(self, glue_type): + super().__init__() + if isinstance(glue_type, str): + glue_spec = _GlueSpec._named[glue_type] + elif isinstance(glue_type, _GlueSpec): + glue_spec = glue_type + else: + raise ValueError("glue_type must be a glue spec name or instance") + self.glue_spec = glue_spec + + def shrink(self): + super().shrink() + if self.size < NUM_SIZE_LEVELS: + g = self.glue_spec + self.glue_spec = g._replace(width=g.width * SHRINK_FACTOR) + + def grow(self): + super().grow() + g = self.glue_spec + self.glue_spec = g._replace(width=g.width * GROW_FACTOR) + + +class HCentered(Hlist): + """ + A convenience class to create an `Hlist` whose contents are + centered within its enclosing box. + """ + + def __init__(self, elements): + super().__init__([Glue('ss'), *elements, Glue('ss')], do_kern=False) + + +class VCentered(Vlist): + """ + A convenience class to create a `Vlist` whose contents are + centered within its enclosing box. + """ + + def __init__(self, elements): + super().__init__([Glue('ss'), *elements, Glue('ss')]) + + +class Kern(Node): + """ + A `Kern` node has a width field to specify a (normally + negative) amount of spacing. This spacing correction appears in + horizontal lists between letters like A and V when the font + designer said that it looks better to move them closer together or + further apart. A kern node can also appear in a vertical list, + when its *width* denotes additional spacing in the vertical + direction. + """ + + height = 0 + depth = 0 + + def __init__(self, width): + super().__init__() + self.width = width + + def __repr__(self): + return "k%.02f" % self.width + + def shrink(self): + super().shrink() + if self.size < NUM_SIZE_LEVELS: + self.width *= SHRINK_FACTOR + + def grow(self): + super().grow() + self.width *= GROW_FACTOR + + +class SubSuperCluster(Hlist): + """ + A hack to get around that fact that this code does a two-pass parse like + TeX. This lets us store enough information in the hlist itself, namely the + nucleus, sub- and super-script, such that if another script follows that + needs to be attached, it can be reconfigured on the fly. + """ + + def __init__(self): + self.nucleus = None + self.sub = None + self.super = None + super().__init__([]) + + +class AutoHeightChar(Hlist): + """ + A character as close to the given height and depth as possible. + + When using a font with multiple height versions of some characters (such as + the BaKoMa fonts), the correct glyph will be selected, otherwise this will + always just return a scaled version of the glyph. + """ + + def __init__(self, c, height, depth, state, always=False, factor=None): + alternatives = state.font_output.get_sized_alternatives_for_symbol( + state.font, c) + + xHeight = state.font_output.get_xheight( + state.font, state.fontsize, state.dpi) + + state = state.copy() + target_total = height + depth + for fontname, sym in alternatives: + state.font = fontname + char = Char(sym, state) + # Ensure that size 0 is chosen when the text is regular sized but + # with descender glyphs by subtracting 0.2 * xHeight + if char.height + char.depth >= target_total - 0.2 * xHeight: + break + + shift = 0 + if state.font != 0: + if factor is None: + factor = target_total / (char.height + char.depth) + state.fontsize *= factor + char = Char(sym, state) + + shift = (depth - char.depth) + + super().__init__([char]) + self.shift_amount = shift + + +class AutoWidthChar(Hlist): + """ + A character as close to the given width as possible. + + When using a font with multiple width versions of some characters (such as + the BaKoMa fonts), the correct glyph will be selected, otherwise this will + always just return a scaled version of the glyph. + """ + + def __init__(self, c, width, state, always=False, char_class=Char): + alternatives = state.font_output.get_sized_alternatives_for_symbol( + state.font, c) + + state = state.copy() + for fontname, sym in alternatives: + state.font = fontname + char = char_class(sym, state) + if char.width >= width: + break + + factor = width / char.width + state.fontsize *= factor + char = char_class(sym, state) + + super().__init__([char]) + self.width = char.width + + +class Ship: + """ + Ship boxes to output once they have been set up, this sends them to output. + + Since boxes can be inside of boxes inside of boxes, the main work of `Ship` + is done by two mutually recursive routines, `hlist_out` and `vlist_out`, + which traverse the `Hlist` nodes and `Vlist` nodes inside of horizontal + and vertical boxes. The global variables used in TeX to store state as it + processes have become member variables here. + """ + + def __call__(self, ox, oy, box): + self.max_push = 0 # Deepest nesting of push commands so far + self.cur_s = 0 + self.cur_v = 0. + self.cur_h = 0. + self.off_h = ox + self.off_v = oy + box.height + self.hlist_out(box) + + @staticmethod + def clamp(value): + if value < -1000000000.: + return -1000000000. + if value > 1000000000.: + return 1000000000. + return value + + def hlist_out(self, box): + cur_g = 0 + cur_glue = 0. + glue_order = box.glue_order + glue_sign = box.glue_sign + base_line = self.cur_v + left_edge = self.cur_h + self.cur_s += 1 + self.max_push = max(self.cur_s, self.max_push) + clamp = self.clamp + + for p in box.children: + if isinstance(p, Char): + p.render(self.cur_h + self.off_h, self.cur_v + self.off_v) + self.cur_h += p.width + elif isinstance(p, Kern): + self.cur_h += p.width + elif isinstance(p, List): + # node623 + if len(p.children) == 0: + self.cur_h += p.width + else: + edge = self.cur_h + self.cur_v = base_line + p.shift_amount + if isinstance(p, Hlist): + self.hlist_out(p) + else: + # p.vpack(box.height + box.depth, 'exactly') + self.vlist_out(p) + self.cur_h = edge + p.width + self.cur_v = base_line + elif isinstance(p, Box): + # node624 + rule_height = p.height + rule_depth = p.depth + rule_width = p.width + if np.isinf(rule_height): + rule_height = box.height + if np.isinf(rule_depth): + rule_depth = box.depth + if rule_height > 0 and rule_width > 0: + self.cur_v = base_line + rule_depth + p.render(self.cur_h + self.off_h, + self.cur_v + self.off_v, + rule_width, rule_height) + self.cur_v = base_line + self.cur_h += rule_width + elif isinstance(p, Glue): + # node625 + glue_spec = p.glue_spec + rule_width = glue_spec.width - cur_g + if glue_sign != 0: # normal + if glue_sign == 1: # stretching + if glue_spec.stretch_order == glue_order: + cur_glue += glue_spec.stretch + cur_g = round(clamp(box.glue_set * cur_glue)) + elif glue_spec.shrink_order == glue_order: + cur_glue += glue_spec.shrink + cur_g = round(clamp(box.glue_set * cur_glue)) + rule_width += cur_g + self.cur_h += rule_width + self.cur_s -= 1 + + def vlist_out(self, box): + cur_g = 0 + cur_glue = 0. + glue_order = box.glue_order + glue_sign = box.glue_sign + self.cur_s += 1 + self.max_push = max(self.max_push, self.cur_s) + left_edge = self.cur_h + self.cur_v -= box.height + top_edge = self.cur_v + clamp = self.clamp + + for p in box.children: + if isinstance(p, Kern): + self.cur_v += p.width + elif isinstance(p, List): + if len(p.children) == 0: + self.cur_v += p.height + p.depth + else: + self.cur_v += p.height + self.cur_h = left_edge + p.shift_amount + save_v = self.cur_v + p.width = box.width + if isinstance(p, Hlist): + self.hlist_out(p) + else: + self.vlist_out(p) + self.cur_v = save_v + p.depth + self.cur_h = left_edge + elif isinstance(p, Box): + rule_height = p.height + rule_depth = p.depth + rule_width = p.width + if np.isinf(rule_width): + rule_width = box.width + rule_height += rule_depth + if rule_height > 0 and rule_depth > 0: + self.cur_v += rule_height + p.render(self.cur_h + self.off_h, + self.cur_v + self.off_v, + rule_width, rule_height) + elif isinstance(p, Glue): + glue_spec = p.glue_spec + rule_height = glue_spec.width - cur_g + if glue_sign != 0: # normal + if glue_sign == 1: # stretching + if glue_spec.stretch_order == glue_order: + cur_glue += glue_spec.stretch + cur_g = round(clamp(box.glue_set * cur_glue)) + elif glue_spec.shrink_order == glue_order: # shrinking + cur_glue += glue_spec.shrink + cur_g = round(clamp(box.glue_set * cur_glue)) + rule_height += cur_g + self.cur_v += rule_height + elif isinstance(p, Char): + raise RuntimeError( + "Internal mathtext error: Char node found in vlist") + self.cur_s -= 1 + + +ship = Ship() + + +############################################################################## +# PARSER + + +def Error(msg): + """Helper class to raise parser errors.""" + def raise_error(s, loc, toks): + raise ParseFatalException(s, loc, msg) + + empty = Empty() + empty.setParseAction(raise_error) + return empty + + +class Parser: + """ + A pyparsing-based parser for strings containing math expressions. + + Raw text may also appear outside of pairs of ``$``. + + The grammar is based directly on that in TeX, though it cuts a few corners. + """ + + class _MathStyle(enum.Enum): + DISPLAYSTYLE = enum.auto() + TEXTSTYLE = enum.auto() + SCRIPTSTYLE = enum.auto() + SCRIPTSCRIPTSTYLE = enum.auto() + + _binary_operators = set(''' + + * - + \\pm \\sqcap \\rhd + \\mp \\sqcup \\unlhd + \\times \\vee \\unrhd + \\div \\wedge \\oplus + \\ast \\setminus \\ominus + \\star \\wr \\otimes + \\circ \\diamond \\oslash + \\bullet \\bigtriangleup \\odot + \\cdot \\bigtriangledown \\bigcirc + \\cap \\triangleleft \\dagger + \\cup \\triangleright \\ddagger + \\uplus \\lhd \\amalg'''.split()) + + _relation_symbols = set(''' + = < > : + \\leq \\geq \\equiv \\models + \\prec \\succ \\sim \\perp + \\preceq \\succeq \\simeq \\mid + \\ll \\gg \\asymp \\parallel + \\subset \\supset \\approx \\bowtie + \\subseteq \\supseteq \\cong \\Join + \\sqsubset \\sqsupset \\neq \\smile + \\sqsubseteq \\sqsupseteq \\doteq \\frown + \\in \\ni \\propto \\vdash + \\dashv \\dots \\dotplus \\doteqdot'''.split()) + + _arrow_symbols = set(''' + \\leftarrow \\longleftarrow \\uparrow + \\Leftarrow \\Longleftarrow \\Uparrow + \\rightarrow \\longrightarrow \\downarrow + \\Rightarrow \\Longrightarrow \\Downarrow + \\leftrightarrow \\longleftrightarrow \\updownarrow + \\Leftrightarrow \\Longleftrightarrow \\Updownarrow + \\mapsto \\longmapsto \\nearrow + \\hookleftarrow \\hookrightarrow \\searrow + \\leftharpoonup \\rightharpoonup \\swarrow + \\leftharpoondown \\rightharpoondown \\nwarrow + \\rightleftharpoons \\leadsto'''.split()) + + _spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols + + _punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split()) + + _overunder_symbols = set(r''' + \sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee + \bigwedge \bigodot \bigotimes \bigoplus \biguplus + '''.split()) + + _overunder_functions = set( + "lim liminf limsup sup max min".split()) + + _dropsub_symbols = set(r'''\int \oint'''.split()) + + _fontnames = set("rm cal it tt sf bf default bb frak scr regular".split()) + + _function_names = set(""" + arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim + liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan + coth inf max tanh""".split()) + + _ambi_delim = set(""" + | \\| / \\backslash \\uparrow \\downarrow \\updownarrow \\Uparrow + \\Downarrow \\Updownarrow . \\vert \\Vert \\\\|""".split()) + + _left_delim = set(r"( [ \{ < \lfloor \langle \lceil".split()) + + _right_delim = set(r") ] \} > \rfloor \rangle \rceil".split()) + + def __init__(self): + p = types.SimpleNamespace() + # All forward declarations are here + p.accent = Forward() + p.ambi_delim = Forward() + p.apostrophe = Forward() + p.auto_delim = Forward() + p.binom = Forward() + p.bslash = Forward() + p.c_over_c = Forward() + p.customspace = Forward() + p.end_group = Forward() + p.float_literal = Forward() + p.font = Forward() + p.frac = Forward() + p.dfrac = Forward() + p.function = Forward() + p.genfrac = Forward() + p.group = Forward() + p.int_literal = Forward() + p.latexfont = Forward() + p.lbracket = Forward() + p.left_delim = Forward() + p.lbrace = Forward() + p.main = Forward() + p.math = Forward() + p.math_string = Forward() + p.non_math = Forward() + p.operatorname = Forward() + p.overline = Forward() + p.overset = Forward() + p.placeable = Forward() + p.rbrace = Forward() + p.rbracket = Forward() + p.required_group = Forward() + p.right_delim = Forward() + p.right_delim_safe = Forward() + p.simple = Forward() + p.simple_group = Forward() + p.single_symbol = Forward() + p.accentprefixed = Forward() + p.space = Forward() + p.sqrt = Forward() + p.start_group = Forward() + p.subsuper = Forward() + p.subsuperop = Forward() + p.symbol = Forward() + p.symbol_name = Forward() + p.token = Forward() + p.underset = Forward() + p.unknown_symbol = Forward() + + # Set names on everything -- very useful for debugging + for key, val in vars(p).items(): + if not key.startswith('_'): + val.setName(key) + + p.float_literal <<= Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)") + p.int_literal <<= Regex("[-+]?[0-9]+") + + p.lbrace <<= Literal('{').suppress() + p.rbrace <<= Literal('}').suppress() + p.lbracket <<= Literal('[').suppress() + p.rbracket <<= Literal(']').suppress() + p.bslash <<= Literal('\\') + + p.space <<= oneOf(list(self._space_widths)) + p.customspace <<= ( + Suppress(Literal(r'\hspace')) + - ((p.lbrace + p.float_literal + p.rbrace) + | Error(r"Expected \hspace{n}")) + ) + + unicode_range = "\U00000080-\U0001ffff" + p.single_symbol <<= Regex( + r"([a-zA-Z0-9 +\-*/<>=:,.;!\?&'@()\[\]|%s])|(\\[%%${}\[\]_|])" % + unicode_range) + p.accentprefixed <<= Suppress(p.bslash) + oneOf(self._accentprefixed) + p.symbol_name <<= ( + Combine(p.bslash + oneOf(list(tex2uni))) + + Suppress(Regex("(?=[^A-Za-z]|$)").leaveWhitespace()) + ) + p.symbol <<= (p.single_symbol | p.symbol_name).leaveWhitespace() + + p.apostrophe <<= Regex("'+") + + p.c_over_c <<= ( + Suppress(p.bslash) + + oneOf(list(self._char_over_chars)) + ) + + p.accent <<= Group( + Suppress(p.bslash) + + oneOf([*self._accent_map, *self._wide_accents]) + + Suppress(Optional(White())) + - p.placeable + ) + + p.function <<= ( + Suppress(p.bslash) + + oneOf(list(self._function_names)) + ) + + p.start_group <<= Optional(p.latexfont) + p.lbrace + p.end_group <<= p.rbrace.copy() + p.simple_group <<= Group(p.lbrace + ZeroOrMore(p.token) + p.rbrace) + p.required_group <<= Group(p.lbrace + OneOrMore(p.token) + p.rbrace) + p.group <<= Group( + p.start_group + ZeroOrMore(p.token) + p.end_group + ) + + p.font <<= Suppress(p.bslash) + oneOf(list(self._fontnames)) + p.latexfont <<= ( + Suppress(p.bslash) + + oneOf(['math' + x for x in self._fontnames]) + ) + + p.frac <<= Group( + Suppress(Literal(r"\frac")) + - ((p.required_group + p.required_group) + | Error(r"Expected \frac{num}{den}")) + ) + + p.dfrac <<= Group( + Suppress(Literal(r"\dfrac")) + - ((p.required_group + p.required_group) + | Error(r"Expected \dfrac{num}{den}")) + ) + + p.binom <<= Group( + Suppress(Literal(r"\binom")) + - ((p.required_group + p.required_group) + | Error(r"Expected \binom{num}{den}")) + ) + + p.ambi_delim <<= oneOf(list(self._ambi_delim)) + p.left_delim <<= oneOf(list(self._left_delim)) + p.right_delim <<= oneOf(list(self._right_delim)) + p.right_delim_safe <<= oneOf([*(self._right_delim - {'}'}), r'\}']) + + p.genfrac <<= Group( + Suppress(Literal(r"\genfrac")) + - (((p.lbrace + + Optional(p.ambi_delim | p.left_delim, default='') + + p.rbrace) + + (p.lbrace + + Optional(p.ambi_delim | p.right_delim_safe, default='') + + p.rbrace) + + (p.lbrace + p.float_literal + p.rbrace) + + p.simple_group + p.required_group + p.required_group) + | Error("Expected " + r"\genfrac{ldelim}{rdelim}{rulesize}{style}{num}{den}")) + ) + + p.sqrt <<= Group( + Suppress(Literal(r"\sqrt")) + - ((Group(Optional( + p.lbracket + OneOrMore(~p.rbracket + p.token) + p.rbracket)) + + p.required_group) + | Error("Expected \\sqrt{value}")) + ) + + p.overline <<= Group( + Suppress(Literal(r"\overline")) + - (p.required_group | Error("Expected \\overline{value}")) + ) + + p.overset <<= Group( + Suppress(Literal(r"\overset")) + - ((p.simple_group + p.simple_group) + | Error("Expected \\overset{body}{annotation}")) + ) + + p.underset <<= Group( + Suppress(Literal(r"\underset")) + - ((p.simple_group + p.simple_group) + | Error("Expected \\underset{body}{annotation}")) + ) + + p.unknown_symbol <<= Combine(p.bslash + Regex("[A-Za-z]*")) + + p.operatorname <<= Group( + Suppress(Literal(r"\operatorname")) + - ((p.lbrace + ZeroOrMore(p.simple | p.unknown_symbol) + p.rbrace) + | Error("Expected \\operatorname{value}")) + ) + + p.placeable <<= ( + p.accentprefixed # Must be before accent so named symbols that are + # prefixed with an accent name work + | p.accent # Must be before symbol as all accents are symbols + | p.symbol # Must be third to catch all named symbols and single + # chars not in a group + | p.c_over_c + | p.function + | p.group + | p.frac + | p.dfrac + | p.binom + | p.genfrac + | p.overset + | p.underset + | p.sqrt + | p.overline + | p.operatorname + ) + + p.simple <<= ( + p.space + | p.customspace + | p.font + | p.subsuper + ) + + p.subsuperop <<= oneOf(["_", "^"]) + + p.subsuper <<= Group( + (Optional(p.placeable) + + OneOrMore(p.subsuperop - p.placeable) + + Optional(p.apostrophe)) + | (p.placeable + Optional(p.apostrophe)) + | p.apostrophe + ) + + p.token <<= ( + p.simple + | p.auto_delim + | p.unknown_symbol # Must be last + ) + + p.auto_delim <<= ( + Suppress(Literal(r"\left")) + - ((p.left_delim | p.ambi_delim) + | Error("Expected a delimiter")) + + Group(ZeroOrMore(p.simple | p.auto_delim)) + + Suppress(Literal(r"\right")) + - ((p.right_delim | p.ambi_delim) + | Error("Expected a delimiter")) + ) + + p.math <<= OneOrMore(p.token) + + p.math_string <<= QuotedString('$', '\\', unquoteResults=False) + + p.non_math <<= Regex(r"(?:(?:\\[$])|[^$])*").leaveWhitespace() + + p.main <<= ( + p.non_math + ZeroOrMore(p.math_string + p.non_math) + StringEnd() + ) + + # Set actions + for key, val in vars(p).items(): + if not key.startswith('_'): + if hasattr(self, key): + val.setParseAction(getattr(self, key)) + + self._expression = p.main + self._math_expression = p.math + + def parse(self, s, fonts_object, fontsize, dpi): + """ + Parse expression *s* using the given *fonts_object* for + output, at the given *fontsize* and *dpi*. + + Returns the parse tree of `Node` instances. + """ + self._state_stack = [ + self.State(fonts_object, 'default', 'rm', fontsize, dpi)] + self._em_width_cache = {} + try: + result = self._expression.parseString(s) + except ParseBaseException as err: + raise ValueError("\n".join(["", + err.line, + " " * (err.column - 1) + "^", + str(err)])) from err + self._state_stack = None + self._em_width_cache = {} + self._expression.resetCache() + return result[0] + + # The state of the parser is maintained in a stack. Upon + # entering and leaving a group { } or math/non-math, the stack + # is pushed and popped accordingly. The current state always + # exists in the top element of the stack. + class State: + """ + Stores the state of the parser. + + States are pushed and popped from a stack as necessary, and + the "current" state is always at the top of the stack. + """ + def __init__(self, font_output, font, font_class, fontsize, dpi): + self.font_output = font_output + self._font = font + self.font_class = font_class + self.fontsize = fontsize + self.dpi = dpi + + def copy(self): + return Parser.State( + self.font_output, + self.font, + self.font_class, + self.fontsize, + self.dpi) + + @property + def font(self): + return self._font + + @font.setter + def font(self, name): + if name in ('rm', 'it', 'bf'): + self.font_class = name + self._font = name + + def get_state(self): + """Get the current `State` of the parser.""" + return self._state_stack[-1] + + def pop_state(self): + """Pop a `State` off of the stack.""" + self._state_stack.pop() + + def push_state(self): + """Push a new `State` onto the stack, copying the current state.""" + self._state_stack.append(self.get_state().copy()) + + def main(self, s, loc, toks): + return [Hlist(toks)] + + def math_string(self, s, loc, toks): + return self._math_expression.parseString(toks[0][1:-1]) + + def math(self, s, loc, toks): + hlist = Hlist(toks) + self.pop_state() + return [hlist] + + def non_math(self, s, loc, toks): + s = toks[0].replace(r'\$', '$') + symbols = [Char(c, self.get_state(), math=False) for c in s] + hlist = Hlist(symbols) + # We're going into math now, so set font to 'it' + self.push_state() + self.get_state().font = mpl.rcParams['mathtext.default'] + return [hlist] + + def _make_space(self, percentage): + # All spaces are relative to em width + state = self.get_state() + key = (state.font, state.fontsize, state.dpi) + width = self._em_width_cache.get(key) + if width is None: + metrics = state.font_output.get_metrics( + state.font, mpl.rcParams['mathtext.default'], 'm', + state.fontsize, state.dpi) + width = metrics.advance + self._em_width_cache[key] = width + return Kern(width * percentage) + + _space_widths = { + r'\,': 0.16667, # 3/18 em = 3 mu + r'\thinspace': 0.16667, # 3/18 em = 3 mu + r'\/': 0.16667, # 3/18 em = 3 mu + r'\>': 0.22222, # 4/18 em = 4 mu + r'\:': 0.22222, # 4/18 em = 4 mu + r'\;': 0.27778, # 5/18 em = 5 mu + r'\ ': 0.33333, # 6/18 em = 6 mu + r'~': 0.33333, # 6/18 em = 6 mu, nonbreakable + r'\enspace': 0.5, # 9/18 em = 9 mu + r'\quad': 1, # 1 em = 18 mu + r'\qquad': 2, # 2 em = 36 mu + r'\!': -0.16667, # -3/18 em = -3 mu + } + + def space(self, s, loc, toks): + tok, = toks + num = self._space_widths[tok] + box = self._make_space(num) + return [box] + + def customspace(self, s, loc, toks): + return [self._make_space(float(toks[0]))] + + def symbol(self, s, loc, toks): + c, = toks + try: + char = Char(c, self.get_state()) + except ValueError as err: + raise ParseFatalException(s, loc, + "Unknown symbol: %s" % c) from err + + if c in self._spaced_symbols: + # iterate until we find previous character, needed for cases + # such as ${ -2}$, $ -2$, or $ -2$. + prev_char = next((c for c in s[:loc][::-1] if c != ' '), '') + # Binary operators at start of string should not be spaced + if (c in self._binary_operators and + (len(s[:loc].split()) == 0 or prev_char == '{' or + prev_char in self._left_delim)): + return [char] + else: + return [Hlist([self._make_space(0.2), + char, + self._make_space(0.2)], + do_kern=True)] + elif c in self._punctuation_symbols: + + # Do not space commas between brackets + if c == ',': + prev_char = next((c for c in s[:loc][::-1] if c != ' '), '') + next_char = next((c for c in s[loc + 1:] if c != ' '), '') + if prev_char == '{' and next_char == '}': + return [char] + + # Do not space dots as decimal separators + if c == '.' and s[loc - 1].isdigit() and s[loc + 1].isdigit(): + return [char] + else: + return [Hlist([char, self._make_space(0.2)], do_kern=True)] + return [char] + + accentprefixed = symbol + + def unknown_symbol(self, s, loc, toks): + c, = toks + raise ParseFatalException(s, loc, "Unknown symbol: %s" % c) + + _char_over_chars = { + # The first 2 entries in the tuple are (font, char, sizescale) for + # the two symbols under and over. The third element is the space + # (in multiples of underline height) + r'AA': (('it', 'A', 1.0), (None, '\\circ', 0.5), 0.0), + } + + def c_over_c(self, s, loc, toks): + sym, = toks + state = self.get_state() + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + + under_desc, over_desc, space = \ + self._char_over_chars.get(sym, (None, None, 0.0)) + if under_desc is None: + raise ParseFatalException("Error parsing symbol") + + over_state = state.copy() + if over_desc[0] is not None: + over_state.font = over_desc[0] + over_state.fontsize *= over_desc[2] + over = Accent(over_desc[1], over_state) + + under_state = state.copy() + if under_desc[0] is not None: + under_state.font = under_desc[0] + under_state.fontsize *= under_desc[2] + under = Char(under_desc[1], under_state) + + width = max(over.width, under.width) + + over_centered = HCentered([over]) + over_centered.hpack(width, 'exactly') + + under_centered = HCentered([under]) + under_centered.hpack(width, 'exactly') + + return Vlist([ + over_centered, + Vbox(0., thickness * space), + under_centered + ]) + + _accent_map = { + r'hat': r'\circumflexaccent', + r'breve': r'\combiningbreve', + r'bar': r'\combiningoverline', + r'grave': r'\combininggraveaccent', + r'acute': r'\combiningacuteaccent', + r'tilde': r'\combiningtilde', + r'dot': r'\combiningdotabove', + r'ddot': r'\combiningdiaeresis', + r'dddot': r'\combiningthreedotsabove', + r'ddddot': r'\combiningfourdotsabove', + r'vec': r'\combiningrightarrowabove', + r'"': r'\combiningdiaeresis', + r"`": r'\combininggraveaccent', + r"'": r'\combiningacuteaccent', + r'~': r'\combiningtilde', + r'.': r'\combiningdotabove', + r'^': r'\circumflexaccent', + r'overrightarrow': r'\rightarrow', + r'overleftarrow': r'\leftarrow', + r'mathring': r'\circ', + } + + _wide_accents = set(r"widehat widetilde widebar".split()) + + # make a lambda and call it to get the namespace right + _accentprefixed = (lambda am: [ + p for p in tex2uni + if any(p.startswith(a) and a != p for a in am) + ])(set(_accent_map)) + + def accent(self, s, loc, toks): + state = self.get_state() + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + (accent, sym), = toks + if accent in self._wide_accents: + accent_box = AutoWidthChar( + '\\' + accent, sym.width, state, char_class=Accent) + else: + accent_box = Accent(self._accent_map[accent], state) + if accent == 'mathring': + accent_box.shrink() + accent_box.shrink() + centered = HCentered([Hbox(sym.width / 4.0), accent_box]) + centered.hpack(sym.width, 'exactly') + return Vlist([ + centered, + Vbox(0., thickness * 2.0), + Hlist([sym]) + ]) + + def function(self, s, loc, toks): + hlist = self.operatorname(s, loc, toks) + hlist.function_name, = toks + return hlist + + def operatorname(self, s, loc, toks): + self.push_state() + state = self.get_state() + state.font = 'rm' + hlist_list = [] + # Change the font of Chars, but leave Kerns alone + for c in toks[0]: + if isinstance(c, Char): + c.font = 'rm' + c._update_metrics() + hlist_list.append(c) + elif isinstance(c, str): + hlist_list.append(Char(c, state)) + else: + hlist_list.append(c) + next_char_loc = loc + len(toks[0]) + 1 + if isinstance(toks[0], ParseResults): + next_char_loc += len('operatorname{}') + next_char = next((c for c in s[next_char_loc:] if c != ' '), '') + delimiters = self._left_delim | self._ambi_delim | self._right_delim + delimiters |= {'^', '_'} + if (next_char not in delimiters and + toks[0] not in self._overunder_functions): + # Add thin space except when followed by parenthesis, bracket, etc. + hlist_list += [self._make_space(self._space_widths[r'\,'])] + self.pop_state() + return Hlist(hlist_list) + + def start_group(self, s, loc, toks): + self.push_state() + # Deal with LaTeX-style font tokens + if len(toks): + self.get_state().font = toks[0][4:] + return [] + + def group(self, s, loc, toks): + grp = Hlist(toks[0]) + return [grp] + required_group = simple_group = group + + def end_group(self, s, loc, toks): + self.pop_state() + return [] + + def font(self, s, loc, toks): + name, = toks + self.get_state().font = name + return [] + + def is_overunder(self, nucleus): + if isinstance(nucleus, Char): + return nucleus.c in self._overunder_symbols + elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'): + return nucleus.function_name in self._overunder_functions + return False + + def is_dropsub(self, nucleus): + if isinstance(nucleus, Char): + return nucleus.c in self._dropsub_symbols + return False + + def is_slanted(self, nucleus): + if isinstance(nucleus, Char): + return nucleus.is_slanted() + return False + + def is_between_brackets(self, s, loc): + return False + + def subsuper(self, s, loc, toks): + assert len(toks) == 1 + + nucleus = None + sub = None + super = None + + # Pick all of the apostrophes out, including first apostrophes that + # have been parsed as characters + napostrophes = 0 + new_toks = [] + for tok in toks[0]: + if isinstance(tok, str) and tok not in ('^', '_'): + napostrophes += len(tok) + elif isinstance(tok, Char) and tok.c == "'": + napostrophes += 1 + else: + new_toks.append(tok) + toks = new_toks + + if len(toks) == 0: + assert napostrophes + nucleus = Hbox(0.0) + elif len(toks) == 1: + if not napostrophes: + return toks[0] # .asList() + else: + nucleus = toks[0] + elif len(toks) in (2, 3): + # single subscript or superscript + nucleus = toks[0] if len(toks) == 3 else Hbox(0.0) + op, next = toks[-2:] + if op == '_': + sub = next + else: + super = next + elif len(toks) in (4, 5): + # subscript and superscript + nucleus = toks[0] if len(toks) == 5 else Hbox(0.0) + op1, next1, op2, next2 = toks[-4:] + if op1 == op2: + if op1 == '_': + raise ParseFatalException("Double subscript") + else: + raise ParseFatalException("Double superscript") + if op1 == '_': + sub = next1 + super = next2 + else: + super = next1 + sub = next2 + else: + raise ParseFatalException( + "Subscript/superscript sequence is too long. " + "Use braces { } to remove ambiguity.") + + state = self.get_state() + rule_thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + xHeight = state.font_output.get_xheight( + state.font, state.fontsize, state.dpi) + + if napostrophes: + if super is None: + super = Hlist([]) + for i in range(napostrophes): + super.children.extend(self.symbol(s, loc, ['\\prime'])) + # kern() and hpack() needed to get the metrics right after + # extending + super.kern() + super.hpack() + + # Handle over/under symbols, such as sum or prod + if self.is_overunder(nucleus): + vlist = [] + shift = 0. + width = nucleus.width + if super is not None: + super.shrink() + width = max(width, super.width) + if sub is not None: + sub.shrink() + width = max(width, sub.width) + + vgap = rule_thickness * 3.0 + if super is not None: + hlist = HCentered([super]) + hlist.hpack(width, 'exactly') + vlist.extend([hlist, Vbox(0, vgap)]) + hlist = HCentered([nucleus]) + hlist.hpack(width, 'exactly') + vlist.append(hlist) + if sub is not None: + hlist = HCentered([sub]) + hlist.hpack(width, 'exactly') + vlist.extend([Vbox(0, vgap), hlist]) + shift = hlist.height + vgap + vlist = Vlist(vlist) + vlist.shift_amount = shift + nucleus.depth + result = Hlist([vlist]) + return [result] + + # We remove kerning on the last character for consistency (otherwise + # it will compute kerning based on non-shrunk characters and may put + # them too close together when superscripted) + # We change the width of the last character to match the advance to + # consider some fonts with weird metrics: e.g. stix's f has a width of + # 7.75 and a kerning of -4.0 for an advance of 3.72, and we want to put + # the superscript at the advance + last_char = nucleus + if isinstance(nucleus, Hlist): + new_children = nucleus.children + if len(new_children): + # remove last kern + if (isinstance(new_children[-1], Kern) and + hasattr(new_children[-2], '_metrics')): + new_children = new_children[:-1] + last_char = new_children[-1] + if hasattr(last_char, '_metrics'): + last_char.width = last_char._metrics.advance + # create new Hlist without kerning + nucleus = Hlist(new_children, do_kern=False) + else: + if isinstance(nucleus, Char): + last_char.width = last_char._metrics.advance + nucleus = Hlist([nucleus]) + + # Handle regular sub/superscripts + constants = _get_font_constant_set(state) + lc_height = last_char.height + lc_baseline = 0 + if self.is_dropsub(last_char): + lc_baseline = last_char.depth + + # Compute kerning for sub and super + superkern = constants.delta * xHeight + subkern = constants.delta * xHeight + if self.is_slanted(last_char): + superkern += constants.delta * xHeight + superkern += (constants.delta_slanted * + (lc_height - xHeight * 2. / 3.)) + if self.is_dropsub(last_char): + subkern = (3 * constants.delta - + constants.delta_integral) * lc_height + superkern = (3 * constants.delta + + constants.delta_integral) * lc_height + else: + subkern = 0 + + if super is None: + # node757 + x = Hlist([Kern(subkern), sub]) + x.shrink() + if self.is_dropsub(last_char): + shift_down = lc_baseline + constants.subdrop * xHeight + else: + shift_down = constants.sub1 * xHeight + x.shift_amount = shift_down + else: + x = Hlist([Kern(superkern), super]) + x.shrink() + if self.is_dropsub(last_char): + shift_up = lc_height - constants.subdrop * xHeight + else: + shift_up = constants.sup1 * xHeight + if sub is None: + x.shift_amount = -shift_up + else: # Both sub and superscript + y = Hlist([Kern(subkern), sub]) + y.shrink() + if self.is_dropsub(last_char): + shift_down = lc_baseline + constants.subdrop * xHeight + else: + shift_down = constants.sub2 * xHeight + # If sub and superscript collide, move super up + clr = (2.0 * rule_thickness - + ((shift_up - x.depth) - (y.height - shift_down))) + if clr > 0.: + shift_up += clr + x = Vlist([ + x, + Kern((shift_up - x.depth) - (y.height - shift_down)), + y]) + x.shift_amount = shift_down + + if not self.is_dropsub(last_char): + x.width += constants.script_space * xHeight + result = Hlist([nucleus, x]) + + return [result] + + def _genfrac(self, ldelim, rdelim, rule, style, num, den): + state = self.get_state() + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + + rule = float(rule) + + if style is not self._MathStyle.DISPLAYSTYLE: + num.shrink() + den.shrink() + cnum = HCentered([num]) + cden = HCentered([den]) + width = max(num.width, den.width) + cnum.hpack(width, 'exactly') + cden.hpack(width, 'exactly') + vlist = Vlist([cnum, # numerator + Vbox(0, thickness * 2.0), # space + Hrule(state, rule), # rule + Vbox(0, thickness * 2.0), # space + cden # denominator + ]) + + # Shift so the fraction line sits in the middle of the + # equals sign + metrics = state.font_output.get_metrics( + state.font, mpl.rcParams['mathtext.default'], + '=', state.fontsize, state.dpi) + shift = (cden.height - + ((metrics.ymax + metrics.ymin) / 2 - + thickness * 3.0)) + vlist.shift_amount = shift + + result = [Hlist([vlist, Hbox(thickness * 2.)])] + if ldelim or rdelim: + if ldelim == '': + ldelim = '.' + if rdelim == '': + rdelim = '.' + return self._auto_sized_delimiter(ldelim, result, rdelim) + return result + + def genfrac(self, s, loc, toks): + args, = toks + return self._genfrac(*args) + + def frac(self, s, loc, toks): + state = self.get_state() + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + (num, den), = toks + return self._genfrac('', '', thickness, self._MathStyle.TEXTSTYLE, + num, den) + + def dfrac(self, s, loc, toks): + state = self.get_state() + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + (num, den), = toks + return self._genfrac('', '', thickness, self._MathStyle.DISPLAYSTYLE, + num, den) + + def binom(self, s, loc, toks): + (num, den), = toks + return self._genfrac('(', ')', 0.0, self._MathStyle.TEXTSTYLE, + num, den) + + def _genset(self, s, loc, toks): + (annotation, body), = toks + state = self.get_state() + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + + annotation.shrink() + cannotation = HCentered([annotation]) + cbody = HCentered([body]) + width = max(cannotation.width, cbody.width) + cannotation.hpack(width, 'exactly') + cbody.hpack(width, 'exactly') + + vgap = thickness * 3 + if s[loc + 1] == "u": # \underset + vlist = Vlist([cbody, # body + Vbox(0, vgap), # space + cannotation # annotation + ]) + # Shift so the body sits in the same vertical position + vlist.shift_amount = cbody.depth + cannotation.height + vgap + else: # \overset + vlist = Vlist([cannotation, # annotation + Vbox(0, vgap), # space + cbody # body + ]) + + # To add horizontal gap between symbols: wrap the Vlist into + # an Hlist and extend it with an Hbox(0, horizontal_gap) + return vlist + + overset = underset = _genset + + def sqrt(self, s, loc, toks): + (root, body), = toks + state = self.get_state() + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + + # Determine the height of the body, and add a little extra to + # the height so it doesn't seem cramped + height = body.height - body.shift_amount + thickness * 5.0 + depth = body.depth + body.shift_amount + check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True) + height = check.height - check.shift_amount + depth = check.depth + check.shift_amount + + # Put a little extra space to the left and right of the body + padded_body = Hlist([Hbox(2 * thickness), body, Hbox(2 * thickness)]) + rightside = Vlist([Hrule(state), Glue('fill'), padded_body]) + # Stretch the glue between the hrule and the body + rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0), + 'exactly', depth) + + # Add the root and shift it upward so it is above the tick. + # The value of 0.6 is a hard-coded hack ;) + if not root: + root = Box(check.width * 0.5, 0., 0.) + else: + root = Hlist(root) + root.shrink() + root.shrink() + + root_vlist = Vlist([Hlist([root])]) + root_vlist.shift_amount = -height * 0.6 + + hlist = Hlist([root_vlist, # Root + # Negative kerning to put root over tick + Kern(-check.width * 0.5), + check, # Check + rightside]) # Body + return [hlist] + + def overline(self, s, loc, toks): + (body,), = toks + + state = self.get_state() + thickness = state.font_output.get_underline_thickness( + state.font, state.fontsize, state.dpi) + + height = body.height - body.shift_amount + thickness * 3.0 + depth = body.depth + body.shift_amount + + # Place overline above body + rightside = Vlist([Hrule(state), Glue('fill'), Hlist([body])]) + + # Stretch the glue between the hrule and the body + rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0), + 'exactly', depth) + + hlist = Hlist([rightside]) + return [hlist] + + def _auto_sized_delimiter(self, front, middle, back): + state = self.get_state() + if len(middle): + height = max(x.height for x in middle) + depth = max(x.depth for x in middle) + factor = None + else: + height = 0 + depth = 0 + factor = 1.0 + parts = [] + # \left. and \right. aren't supposed to produce any symbols + if front != '.': + parts.append( + AutoHeightChar(front, height, depth, state, factor=factor)) + parts.extend(middle) + if back != '.': + parts.append( + AutoHeightChar(back, height, depth, state, factor=factor)) + hlist = Hlist(parts) + return hlist + + def auto_delim(self, s, loc, toks): + front, middle, back = toks + + return self._auto_sized_delimiter(front, middle.asList(), back) diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_mathtext_data.py b/.venv/lib/python3.9/site-packages/matplotlib/_mathtext_data.py new file mode 100644 index 00000000..1536bdc5 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_mathtext_data.py @@ -0,0 +1,1394 @@ +""" +font data tables for truetype and afm computer modern fonts +""" + +latex_to_bakoma = { + '\\__sqrt__' : ('cmex10', 0x70), + '\\bigcap' : ('cmex10', 0x5c), + '\\bigcup' : ('cmex10', 0x5b), + '\\bigodot' : ('cmex10', 0x4b), + '\\bigoplus' : ('cmex10', 0x4d), + '\\bigotimes' : ('cmex10', 0x4f), + '\\biguplus' : ('cmex10', 0x5d), + '\\bigvee' : ('cmex10', 0x5f), + '\\bigwedge' : ('cmex10', 0x5e), + '\\coprod' : ('cmex10', 0x61), + '\\int' : ('cmex10', 0x5a), + '\\langle' : ('cmex10', 0xad), + '\\leftangle' : ('cmex10', 0xad), + '\\leftbrace' : ('cmex10', 0xa9), + '\\oint' : ('cmex10', 0x49), + '\\prod' : ('cmex10', 0x59), + '\\rangle' : ('cmex10', 0xae), + '\\rightangle' : ('cmex10', 0xae), + '\\rightbrace' : ('cmex10', 0xaa), + '\\sum' : ('cmex10', 0x58), + '\\widehat' : ('cmex10', 0x62), + '\\widetilde' : ('cmex10', 0x65), + '\\{' : ('cmex10', 0xa9), + '\\}' : ('cmex10', 0xaa), + '{' : ('cmex10', 0xa9), + '}' : ('cmex10', 0xaa), + + ',' : ('cmmi10', 0x3b), + '.' : ('cmmi10', 0x3a), + '/' : ('cmmi10', 0x3d), + '<' : ('cmmi10', 0x3c), + '>' : ('cmmi10', 0x3e), + '\\alpha' : ('cmmi10', 0xae), + '\\beta' : ('cmmi10', 0xaf), + '\\chi' : ('cmmi10', 0xc2), + '\\combiningrightarrowabove' : ('cmmi10', 0x7e), + '\\delta' : ('cmmi10', 0xb1), + '\\ell' : ('cmmi10', 0x60), + '\\epsilon' : ('cmmi10', 0xb2), + '\\eta' : ('cmmi10', 0xb4), + '\\flat' : ('cmmi10', 0x5b), + '\\frown' : ('cmmi10', 0x5f), + '\\gamma' : ('cmmi10', 0xb0), + '\\imath' : ('cmmi10', 0x7b), + '\\iota' : ('cmmi10', 0xb6), + '\\jmath' : ('cmmi10', 0x7c), + '\\kappa' : ('cmmi10', 0x2219), + '\\lambda' : ('cmmi10', 0xb8), + '\\leftharpoondown' : ('cmmi10', 0x29), + '\\leftharpoonup' : ('cmmi10', 0x28), + '\\mu' : ('cmmi10', 0xb9), + '\\natural' : ('cmmi10', 0x5c), + '\\nu' : ('cmmi10', 0xba), + '\\omega' : ('cmmi10', 0x21), + '\\phi' : ('cmmi10', 0xc1), + '\\pi' : ('cmmi10', 0xbc), + '\\psi' : ('cmmi10', 0xc3), + '\\rho' : ('cmmi10', 0xbd), + '\\rightharpoondown' : ('cmmi10', 0x2b), + '\\rightharpoonup' : ('cmmi10', 0x2a), + '\\sharp' : ('cmmi10', 0x5d), + '\\sigma' : ('cmmi10', 0xbe), + '\\smile' : ('cmmi10', 0x5e), + '\\tau' : ('cmmi10', 0xbf), + '\\theta' : ('cmmi10', 0xb5), + '\\triangleleft' : ('cmmi10', 0x2f), + '\\triangleright' : ('cmmi10', 0x2e), + '\\upsilon' : ('cmmi10', 0xc0), + '\\varepsilon' : ('cmmi10', 0x22), + '\\varphi' : ('cmmi10', 0x27), + '\\varrho' : ('cmmi10', 0x25), + '\\varsigma' : ('cmmi10', 0x26), + '\\vartheta' : ('cmmi10', 0x23), + '\\wp' : ('cmmi10', 0x7d), + '\\xi' : ('cmmi10', 0xbb), + '\\zeta' : ('cmmi10', 0xb3), + + '!' : ('cmr10', 0x21), + '%' : ('cmr10', 0x25), + '&' : ('cmr10', 0x26), + '(' : ('cmr10', 0x28), + ')' : ('cmr10', 0x29), + '+' : ('cmr10', 0x2b), + '0' : ('cmr10', 0x30), + '1' : ('cmr10', 0x31), + '2' : ('cmr10', 0x32), + '3' : ('cmr10', 0x33), + '4' : ('cmr10', 0x34), + '5' : ('cmr10', 0x35), + '6' : ('cmr10', 0x36), + '7' : ('cmr10', 0x37), + '8' : ('cmr10', 0x38), + '9' : ('cmr10', 0x39), + ':' : ('cmr10', 0x3a), + ';' : ('cmr10', 0x3b), + '=' : ('cmr10', 0x3d), + '?' : ('cmr10', 0x3f), + '@' : ('cmr10', 0x40), + '[' : ('cmr10', 0x5b), + '\\#' : ('cmr10', 0x23), + '\\$' : ('cmr10', 0x24), + '\\%' : ('cmr10', 0x25), + '\\Delta' : ('cmr10', 0xa2), + '\\Gamma' : ('cmr10', 0xa1), + '\\Lambda' : ('cmr10', 0xa4), + '\\Omega' : ('cmr10', 0xad), + '\\Phi' : ('cmr10', 0xa9), + '\\Pi' : ('cmr10', 0xa6), + '\\Psi' : ('cmr10', 0xaa), + '\\Sigma' : ('cmr10', 0xa7), + '\\Theta' : ('cmr10', 0xa3), + '\\Upsilon' : ('cmr10', 0xa8), + '\\Xi' : ('cmr10', 0xa5), + '\\circumflexaccent' : ('cmr10', 0x5e), + '\\combiningacuteaccent' : ('cmr10', 0xb6), + '\\combiningbreve' : ('cmr10', 0xb8), + '\\combiningdiaeresis' : ('cmr10', 0xc4), + '\\combiningdotabove' : ('cmr10', 0x5f), + '\\combininggraveaccent' : ('cmr10', 0xb5), + '\\combiningoverline' : ('cmr10', 0xb9), + '\\combiningtilde' : ('cmr10', 0x7e), + '\\leftbracket' : ('cmr10', 0x5b), + '\\leftparen' : ('cmr10', 0x28), + '\\rightbracket' : ('cmr10', 0x5d), + '\\rightparen' : ('cmr10', 0x29), + '\\widebar' : ('cmr10', 0xb9), + ']' : ('cmr10', 0x5d), + + '*' : ('cmsy10', 0xa4), + '-' : ('cmsy10', 0xa1), + '\\Downarrow' : ('cmsy10', 0x2b), + '\\Im' : ('cmsy10', 0x3d), + '\\Leftarrow' : ('cmsy10', 0x28), + '\\Leftrightarrow' : ('cmsy10', 0x2c), + '\\P' : ('cmsy10', 0x7b), + '\\Re' : ('cmsy10', 0x3c), + '\\Rightarrow' : ('cmsy10', 0x29), + '\\S' : ('cmsy10', 0x78), + '\\Uparrow' : ('cmsy10', 0x2a), + '\\Updownarrow' : ('cmsy10', 0x6d), + '\\Vert' : ('cmsy10', 0x6b), + '\\aleph' : ('cmsy10', 0x40), + '\\approx' : ('cmsy10', 0xbc), + '\\ast' : ('cmsy10', 0xa4), + '\\asymp' : ('cmsy10', 0xb3), + '\\backslash' : ('cmsy10', 0x6e), + '\\bigcirc' : ('cmsy10', 0xb0), + '\\bigtriangledown' : ('cmsy10', 0x35), + '\\bigtriangleup' : ('cmsy10', 0x34), + '\\bot' : ('cmsy10', 0x3f), + '\\bullet' : ('cmsy10', 0xb2), + '\\cap' : ('cmsy10', 0x5c), + '\\cdot' : ('cmsy10', 0xa2), + '\\circ' : ('cmsy10', 0xb1), + '\\clubsuit' : ('cmsy10', 0x7c), + '\\cup' : ('cmsy10', 0x5b), + '\\dag' : ('cmsy10', 0x79), + '\\dashv' : ('cmsy10', 0x61), + '\\ddag' : ('cmsy10', 0x7a), + '\\diamond' : ('cmsy10', 0xa6), + '\\diamondsuit' : ('cmsy10', 0x7d), + '\\div' : ('cmsy10', 0xa5), + '\\downarrow' : ('cmsy10', 0x23), + '\\emptyset' : ('cmsy10', 0x3b), + '\\equiv' : ('cmsy10', 0xb4), + '\\exists' : ('cmsy10', 0x39), + '\\forall' : ('cmsy10', 0x38), + '\\geq' : ('cmsy10', 0xb8), + '\\gg' : ('cmsy10', 0xc0), + '\\heartsuit' : ('cmsy10', 0x7e), + '\\in' : ('cmsy10', 0x32), + '\\infty' : ('cmsy10', 0x31), + '\\lbrace' : ('cmsy10', 0x66), + '\\lceil' : ('cmsy10', 0x64), + '\\leftarrow' : ('cmsy10', 0xc3), + '\\leftrightarrow' : ('cmsy10', 0x24), + '\\leq' : ('cmsy10', 0x2219), + '\\lfloor' : ('cmsy10', 0x62), + '\\ll' : ('cmsy10', 0xbf), + '\\mid' : ('cmsy10', 0x6a), + '\\mp' : ('cmsy10', 0xa8), + '\\nabla' : ('cmsy10', 0x72), + '\\nearrow' : ('cmsy10', 0x25), + '\\neg' : ('cmsy10', 0x3a), + '\\ni' : ('cmsy10', 0x33), + '\\nwarrow' : ('cmsy10', 0x2d), + '\\odot' : ('cmsy10', 0xaf), + '\\ominus' : ('cmsy10', 0xaa), + '\\oplus' : ('cmsy10', 0xa9), + '\\oslash' : ('cmsy10', 0xae), + '\\otimes' : ('cmsy10', 0xad), + '\\pm' : ('cmsy10', 0xa7), + '\\prec' : ('cmsy10', 0xc1), + '\\preceq' : ('cmsy10', 0xb9), + '\\prime' : ('cmsy10', 0x30), + '\\propto' : ('cmsy10', 0x2f), + '\\rbrace' : ('cmsy10', 0x67), + '\\rceil' : ('cmsy10', 0x65), + '\\rfloor' : ('cmsy10', 0x63), + '\\rightarrow' : ('cmsy10', 0x21), + '\\searrow' : ('cmsy10', 0x26), + '\\sim' : ('cmsy10', 0xbb), + '\\simeq' : ('cmsy10', 0x27), + '\\slash' : ('cmsy10', 0x36), + '\\spadesuit' : ('cmsy10', 0xc4), + '\\sqcap' : ('cmsy10', 0x75), + '\\sqcup' : ('cmsy10', 0x74), + '\\sqsubseteq' : ('cmsy10', 0x76), + '\\sqsupseteq' : ('cmsy10', 0x77), + '\\subset' : ('cmsy10', 0xbd), + '\\subseteq' : ('cmsy10', 0xb5), + '\\succ' : ('cmsy10', 0xc2), + '\\succeq' : ('cmsy10', 0xba), + '\\supset' : ('cmsy10', 0xbe), + '\\supseteq' : ('cmsy10', 0xb6), + '\\swarrow' : ('cmsy10', 0x2e), + '\\times' : ('cmsy10', 0xa3), + '\\to' : ('cmsy10', 0x21), + '\\top' : ('cmsy10', 0x3e), + '\\uparrow' : ('cmsy10', 0x22), + '\\updownarrow' : ('cmsy10', 0x6c), + '\\uplus' : ('cmsy10', 0x5d), + '\\vdash' : ('cmsy10', 0x60), + '\\vee' : ('cmsy10', 0x5f), + '\\vert' : ('cmsy10', 0x6a), + '\\wedge' : ('cmsy10', 0x5e), + '\\wr' : ('cmsy10', 0x6f), + '\\|' : ('cmsy10', 0x6b), + '|' : ('cmsy10', 0x6a), + + '\\_' : ('cmtt10', 0x5f) +} + +latex_to_cmex = { # Unused; delete once mathtext becomes private. + r'\__sqrt__' : 112, + r'\bigcap' : 92, + r'\bigcup' : 91, + r'\bigodot' : 75, + r'\bigoplus' : 77, + r'\bigotimes' : 79, + r'\biguplus' : 93, + r'\bigvee' : 95, + r'\bigwedge' : 94, + r'\coprod' : 97, + r'\int' : 90, + r'\leftangle' : 173, + r'\leftbrace' : 169, + r'\oint' : 73, + r'\prod' : 89, + r'\rightangle' : 174, + r'\rightbrace' : 170, + r'\sum' : 88, + r'\widehat' : 98, + r'\widetilde' : 101, +} + +latex_to_standard = { + r'\cong' : ('psyr', 64), + r'\Delta' : ('psyr', 68), + r'\Phi' : ('psyr', 70), + r'\Gamma' : ('psyr', 89), + r'\alpha' : ('psyr', 97), + r'\beta' : ('psyr', 98), + r'\chi' : ('psyr', 99), + r'\delta' : ('psyr', 100), + r'\varepsilon' : ('psyr', 101), + r'\phi' : ('psyr', 102), + r'\gamma' : ('psyr', 103), + r'\eta' : ('psyr', 104), + r'\iota' : ('psyr', 105), + r'\varphi' : ('psyr', 106), + r'\kappa' : ('psyr', 108), + r'\nu' : ('psyr', 110), + r'\pi' : ('psyr', 112), + r'\theta' : ('psyr', 113), + r'\rho' : ('psyr', 114), + r'\sigma' : ('psyr', 115), + r'\tau' : ('psyr', 116), + r'\upsilon' : ('psyr', 117), + r'\varpi' : ('psyr', 118), + r'\omega' : ('psyr', 119), + r'\xi' : ('psyr', 120), + r'\psi' : ('psyr', 121), + r'\zeta' : ('psyr', 122), + r'\sim' : ('psyr', 126), + r'\leq' : ('psyr', 163), + r'\infty' : ('psyr', 165), + r'\clubsuit' : ('psyr', 167), + r'\diamondsuit' : ('psyr', 168), + r'\heartsuit' : ('psyr', 169), + r'\spadesuit' : ('psyr', 170), + r'\leftrightarrow' : ('psyr', 171), + r'\leftarrow' : ('psyr', 172), + r'\uparrow' : ('psyr', 173), + r'\rightarrow' : ('psyr', 174), + r'\downarrow' : ('psyr', 175), + r'\pm' : ('psyr', 176), + r'\geq' : ('psyr', 179), + r'\times' : ('psyr', 180), + r'\propto' : ('psyr', 181), + r'\partial' : ('psyr', 182), + r'\bullet' : ('psyr', 183), + r'\div' : ('psyr', 184), + r'\neq' : ('psyr', 185), + r'\equiv' : ('psyr', 186), + r'\approx' : ('psyr', 187), + r'\ldots' : ('psyr', 188), + r'\aleph' : ('psyr', 192), + r'\Im' : ('psyr', 193), + r'\Re' : ('psyr', 194), + r'\wp' : ('psyr', 195), + r'\otimes' : ('psyr', 196), + r'\oplus' : ('psyr', 197), + r'\oslash' : ('psyr', 198), + r'\cap' : ('psyr', 199), + r'\cup' : ('psyr', 200), + r'\supset' : ('psyr', 201), + r'\supseteq' : ('psyr', 202), + r'\subset' : ('psyr', 204), + r'\subseteq' : ('psyr', 205), + r'\in' : ('psyr', 206), + r'\notin' : ('psyr', 207), + r'\angle' : ('psyr', 208), + r'\nabla' : ('psyr', 209), + r'\textregistered' : ('psyr', 210), + r'\copyright' : ('psyr', 211), + r'\texttrademark' : ('psyr', 212), + r'\Pi' : ('psyr', 213), + r'\prod' : ('psyr', 213), + r'\surd' : ('psyr', 214), + r'\__sqrt__' : ('psyr', 214), + r'\cdot' : ('psyr', 215), + r'\urcorner' : ('psyr', 216), + r'\vee' : ('psyr', 217), + r'\wedge' : ('psyr', 218), + r'\Leftrightarrow' : ('psyr', 219), + r'\Leftarrow' : ('psyr', 220), + r'\Uparrow' : ('psyr', 221), + r'\Rightarrow' : ('psyr', 222), + r'\Downarrow' : ('psyr', 223), + r'\Diamond' : ('psyr', 224), + r'\Sigma' : ('psyr', 229), + r'\sum' : ('psyr', 229), + r'\forall' : ('psyr', 34), + r'\exists' : ('psyr', 36), + r'\lceil' : ('psyr', 233), + r'\lbrace' : ('psyr', 123), + r'\Psi' : ('psyr', 89), + r'\bot' : ('psyr', 0o136), + r'\Omega' : ('psyr', 0o127), + r'\leftbracket' : ('psyr', 0o133), + r'\rightbracket' : ('psyr', 0o135), + r'\leftbrace' : ('psyr', 123), + r'\leftparen' : ('psyr', 0o50), + r'\prime' : ('psyr', 0o242), + r'\sharp' : ('psyr', 0o43), + r'\slash' : ('psyr', 0o57), + r'\Lambda' : ('psyr', 0o114), + r'\neg' : ('psyr', 0o330), + r'\Upsilon' : ('psyr', 0o241), + r'\rightbrace' : ('psyr', 0o175), + r'\rfloor' : ('psyr', 0o373), + r'\lambda' : ('psyr', 0o154), + r'\to' : ('psyr', 0o256), + r'\Xi' : ('psyr', 0o130), + r'\emptyset' : ('psyr', 0o306), + r'\lfloor' : ('psyr', 0o353), + r'\rightparen' : ('psyr', 0o51), + r'\rceil' : ('psyr', 0o371), + r'\ni' : ('psyr', 0o47), + r'\epsilon' : ('psyr', 0o145), + r'\Theta' : ('psyr', 0o121), + r'\langle' : ('psyr', 0o341), + r'\leftangle' : ('psyr', 0o341), + r'\rangle' : ('psyr', 0o361), + r'\rightangle' : ('psyr', 0o361), + r'\rbrace' : ('psyr', 0o175), + r'\circ' : ('psyr', 0o260), + r'\diamond' : ('psyr', 0o340), + r'\mu' : ('psyr', 0o155), + r'\mid' : ('psyr', 0o352), + r'\imath' : ('pncri8a', 105), + r'\%' : ('pncr8a', 37), + r'\$' : ('pncr8a', 36), + r'\{' : ('pncr8a', 123), + r'\}' : ('pncr8a', 125), + r'\backslash' : ('pncr8a', 92), + r'\ast' : ('pncr8a', 42), + r'\#' : ('pncr8a', 35), + + r'\circumflexaccent' : ('pncri8a', 124), # for \hat + r'\combiningbreve' : ('pncri8a', 81), # for \breve + r'\combininggraveaccent' : ('pncri8a', 114), # for \grave + r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute + r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot + r'\combiningtilde' : ('pncri8a', 75), # for \tilde + r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec + r'\combiningdotabove' : ('pncri8a', 26), # for \dot +} + +# Automatically generated. + +type12uni = { + 'aring' : 229, + 'quotedblright' : 8221, + 'V' : 86, + 'dollar' : 36, + 'four' : 52, + 'Yacute' : 221, + 'P' : 80, + 'underscore' : 95, + 'p' : 112, + 'Otilde' : 213, + 'perthousand' : 8240, + 'zero' : 48, + 'dotlessi' : 305, + 'Scaron' : 352, + 'zcaron' : 382, + 'egrave' : 232, + 'section' : 167, + 'Icircumflex' : 206, + 'ntilde' : 241, + 'ampersand' : 38, + 'dotaccent' : 729, + 'degree' : 176, + 'K' : 75, + 'acircumflex' : 226, + 'Aring' : 197, + 'k' : 107, + 'smalltilde' : 732, + 'Agrave' : 192, + 'divide' : 247, + 'ocircumflex' : 244, + 'asciitilde' : 126, + 'two' : 50, + 'E' : 69, + 'scaron' : 353, + 'F' : 70, + 'bracketleft' : 91, + 'asciicircum' : 94, + 'f' : 102, + 'ordmasculine' : 186, + 'mu' : 181, + 'paragraph' : 182, + 'nine' : 57, + 'v' : 118, + 'guilsinglleft' : 8249, + 'backslash' : 92, + 'six' : 54, + 'A' : 65, + 'icircumflex' : 238, + 'a' : 97, + 'ogonek' : 731, + 'q' : 113, + 'oacute' : 243, + 'ograve' : 242, + 'edieresis' : 235, + 'comma' : 44, + 'otilde' : 245, + 'guillemotright' : 187, + 'ecircumflex' : 234, + 'greater' : 62, + 'uacute' : 250, + 'L' : 76, + 'bullet' : 8226, + 'cedilla' : 184, + 'ydieresis' : 255, + 'l' : 108, + 'logicalnot' : 172, + 'exclamdown' : 161, + 'endash' : 8211, + 'agrave' : 224, + 'Adieresis' : 196, + 'germandbls' : 223, + 'Odieresis' : 214, + 'space' : 32, + 'quoteright' : 8217, + 'ucircumflex' : 251, + 'G' : 71, + 'quoteleft' : 8216, + 'W' : 87, + 'Q' : 81, + 'g' : 103, + 'w' : 119, + 'question' : 63, + 'one' : 49, + 'ring' : 730, + 'figuredash' : 8210, + 'B' : 66, + 'iacute' : 237, + 'Ydieresis' : 376, + 'R' : 82, + 'b' : 98, + 'r' : 114, + 'Ccedilla' : 199, + 'minus' : 8722, + 'Lslash' : 321, + 'Uacute' : 218, + 'yacute' : 253, + 'Ucircumflex' : 219, + 'quotedbl' : 34, + 'onehalf' : 189, + 'Thorn' : 222, + 'M' : 77, + 'eight' : 56, + 'multiply' : 215, + 'grave' : 96, + 'Ocircumflex' : 212, + 'm' : 109, + 'Ugrave' : 217, + 'guilsinglright' : 8250, + 'Ntilde' : 209, + 'questiondown' : 191, + 'Atilde' : 195, + 'ccedilla' : 231, + 'Z' : 90, + 'copyright' : 169, + 'yen' : 165, + 'Eacute' : 201, + 'H' : 72, + 'X' : 88, + 'Idieresis' : 207, + 'bar' : 124, + 'h' : 104, + 'x' : 120, + 'udieresis' : 252, + 'ordfeminine' : 170, + 'braceleft' : 123, + 'macron' : 175, + 'atilde' : 227, + 'Acircumflex' : 194, + 'Oslash' : 216, + 'C' : 67, + 'quotedblleft' : 8220, + 'S' : 83, + 'exclam' : 33, + 'Zcaron' : 381, + 'equal' : 61, + 's' : 115, + 'eth' : 240, + 'Egrave' : 200, + 'hyphen' : 45, + 'period' : 46, + 'igrave' : 236, + 'colon' : 58, + 'Ecircumflex' : 202, + 'trademark' : 8482, + 'Aacute' : 193, + 'cent' : 162, + 'lslash' : 322, + 'c' : 99, + 'N' : 78, + 'breve' : 728, + 'Oacute' : 211, + 'guillemotleft' : 171, + 'n' : 110, + 'idieresis' : 239, + 'braceright' : 125, + 'seven' : 55, + 'brokenbar' : 166, + 'ugrave' : 249, + 'periodcentered' : 183, + 'sterling' : 163, + 'I' : 73, + 'Y' : 89, + 'Eth' : 208, + 'emdash' : 8212, + 'i' : 105, + 'daggerdbl' : 8225, + 'y' : 121, + 'plusminus' : 177, + 'less' : 60, + 'Udieresis' : 220, + 'D' : 68, + 'five' : 53, + 'T' : 84, + 'oslash' : 248, + 'acute' : 180, + 'd' : 100, + 'OE' : 338, + 'Igrave' : 204, + 't' : 116, + 'parenright' : 41, + 'adieresis' : 228, + 'quotesingle' : 39, + 'twodotenleader' : 8229, + 'slash' : 47, + 'ellipsis' : 8230, + 'numbersign' : 35, + 'odieresis' : 246, + 'O' : 79, + 'oe' : 339, + 'o' : 111, + 'Edieresis' : 203, + 'plus' : 43, + 'dagger' : 8224, + 'three' : 51, + 'hungarumlaut' : 733, + 'parenleft' : 40, + 'fraction' : 8260, + 'registered' : 174, + 'J' : 74, + 'dieresis' : 168, + 'Ograve' : 210, + 'j' : 106, + 'z' : 122, + 'ae' : 230, + 'semicolon' : 59, + 'at' : 64, + 'Iacute' : 205, + 'percent' : 37, + 'bracketright' : 93, + 'AE' : 198, + 'asterisk' : 42, + 'aacute' : 225, + 'U' : 85, + 'eacute' : 233, + 'e' : 101, + 'thorn' : 254, + 'u' : 117, +} + +uni2type1 = {v: k for k, v in type12uni.items()} + +tex2uni = { + 'widehat' : 0x0302, + 'widetilde' : 0x0303, + 'widebar' : 0x0305, + 'langle' : 0x27e8, + 'rangle' : 0x27e9, + 'perp' : 0x27c2, + 'neq' : 0x2260, + 'Join' : 0x2a1d, + 'leqslant' : 0x2a7d, + 'geqslant' : 0x2a7e, + 'lessapprox' : 0x2a85, + 'gtrapprox' : 0x2a86, + 'lesseqqgtr' : 0x2a8b, + 'gtreqqless' : 0x2a8c, + 'triangleeq' : 0x225c, + 'eqslantless' : 0x2a95, + 'eqslantgtr' : 0x2a96, + 'backepsilon' : 0x03f6, + 'precapprox' : 0x2ab7, + 'succapprox' : 0x2ab8, + 'fallingdotseq' : 0x2252, + 'subseteqq' : 0x2ac5, + 'supseteqq' : 0x2ac6, + 'varpropto' : 0x221d, + 'precnapprox' : 0x2ab9, + 'succnapprox' : 0x2aba, + 'subsetneqq' : 0x2acb, + 'supsetneqq' : 0x2acc, + 'lnapprox' : 0x2ab9, + 'gnapprox' : 0x2aba, + 'longleftarrow' : 0x27f5, + 'longrightarrow' : 0x27f6, + 'longleftrightarrow' : 0x27f7, + 'Longleftarrow' : 0x27f8, + 'Longrightarrow' : 0x27f9, + 'Longleftrightarrow' : 0x27fa, + 'longmapsto' : 0x27fc, + 'leadsto' : 0x21dd, + 'dashleftarrow' : 0x290e, + 'dashrightarrow' : 0x290f, + 'circlearrowleft' : 0x21ba, + 'circlearrowright' : 0x21bb, + 'leftrightsquigarrow' : 0x21ad, + 'leftsquigarrow' : 0x219c, + 'rightsquigarrow' : 0x219d, + 'Game' : 0x2141, + 'hbar' : 0x0127, + 'hslash' : 0x210f, + 'ldots' : 0x2026, + 'vdots' : 0x22ee, + 'doteqdot' : 0x2251, + 'doteq' : 8784, + 'partial' : 8706, + 'gg' : 8811, + 'asymp' : 8781, + 'blacktriangledown' : 9662, + 'otimes' : 8855, + 'nearrow' : 8599, + 'varpi' : 982, + 'vee' : 8744, + 'vec' : 8407, + 'smile' : 8995, + 'succnsim' : 8937, + 'gimel' : 8503, + 'vert' : 124, + '|' : 124, + 'varrho' : 1009, + 'P' : 182, + 'approxident' : 8779, + 'Swarrow' : 8665, + 'textasciicircum' : 94, + 'imageof' : 8887, + 'ntriangleleft' : 8938, + 'nleq' : 8816, + 'div' : 247, + 'nparallel' : 8742, + 'Leftarrow' : 8656, + 'lll' : 8920, + 'oiint' : 8751, + 'ngeq' : 8817, + 'Theta' : 920, + 'origof' : 8886, + 'blacksquare' : 9632, + 'solbar' : 9023, + 'neg' : 172, + 'sum' : 8721, + 'Vdash' : 8873, + 'coloneq' : 8788, + 'degree' : 176, + 'bowtie' : 8904, + 'blacktriangleright' : 9654, + 'varsigma' : 962, + 'leq' : 8804, + 'ggg' : 8921, + 'lneqq' : 8808, + 'scurel' : 8881, + 'stareq' : 8795, + 'BbbN' : 8469, + 'nLeftarrow' : 8653, + 'nLeftrightarrow' : 8654, + 'k' : 808, + 'bot' : 8869, + 'BbbC' : 8450, + 'Lsh' : 8624, + 'leftleftarrows' : 8647, + 'BbbZ' : 8484, + 'digamma' : 989, + 'BbbR' : 8477, + 'BbbP' : 8473, + 'BbbQ' : 8474, + 'vartriangleright' : 8883, + 'succsim' : 8831, + 'wedge' : 8743, + 'lessgtr' : 8822, + 'veebar' : 8891, + 'mapsdown' : 8615, + 'Rsh' : 8625, + 'chi' : 967, + 'prec' : 8826, + 'nsubseteq' : 8840, + 'therefore' : 8756, + 'eqcirc' : 8790, + 'textexclamdown' : 161, + 'nRightarrow' : 8655, + 'flat' : 9837, + 'notin' : 8713, + 'llcorner' : 8990, + 'varepsilon' : 949, + 'bigtriangleup' : 9651, + 'aleph' : 8501, + 'dotminus' : 8760, + 'upsilon' : 965, + 'Lambda' : 923, + 'cap' : 8745, + 'barleftarrow' : 8676, + 'mu' : 956, + 'boxplus' : 8862, + 'mp' : 8723, + 'circledast' : 8859, + 'tau' : 964, + 'in' : 8712, + 'backslash' : 92, + 'varnothing' : 8709, + 'sharp' : 9839, + 'eqsim' : 8770, + 'gnsim' : 8935, + 'Searrow' : 8664, + 'updownarrows' : 8645, + 'heartsuit' : 9825, + 'trianglelefteq' : 8884, + 'ddag' : 8225, + 'sqsubseteq' : 8849, + 'mapsfrom' : 8612, + 'boxbar' : 9707, + 'sim' : 8764, + 'Nwarrow' : 8662, + 'nequiv' : 8802, + 'succ' : 8827, + 'vdash' : 8866, + 'Leftrightarrow' : 8660, + 'parallel' : 8741, + 'invnot' : 8976, + 'natural' : 9838, + 'ss' : 223, + 'uparrow' : 8593, + 'nsim' : 8769, + 'hookrightarrow' : 8618, + 'Equiv' : 8803, + 'approx' : 8776, + 'Vvdash' : 8874, + 'nsucc' : 8833, + 'leftrightharpoons' : 8651, + 'Re' : 8476, + 'boxminus' : 8863, + 'equiv' : 8801, + 'Lleftarrow' : 8666, + 'll' : 8810, + 'Cup' : 8915, + 'measeq' : 8798, + 'upharpoonleft' : 8639, + 'lq' : 8216, + 'Upsilon' : 933, + 'subsetneq' : 8842, + 'greater' : 62, + 'supsetneq' : 8843, + 'Cap' : 8914, + 'L' : 321, + 'spadesuit' : 9824, + 'lrcorner' : 8991, + 'not' : 824, + 'bar' : 772, + 'rightharpoonaccent' : 8401, + 'boxdot' : 8865, + 'l' : 322, + 'leftharpoondown' : 8637, + 'bigcup' : 8899, + 'iint' : 8748, + 'bigwedge' : 8896, + 'downharpoonleft' : 8643, + 'textasciitilde' : 126, + 'subset' : 8834, + 'leqq' : 8806, + 'mapsup' : 8613, + 'nvDash' : 8877, + 'looparrowleft' : 8619, + 'nless' : 8814, + 'rightarrowbar' : 8677, + 'Vert' : 8214, + 'downdownarrows' : 8650, + 'uplus' : 8846, + 'simeq' : 8771, + 'napprox' : 8777, + 'ast' : 8727, + 'twoheaduparrow' : 8607, + 'doublebarwedge' : 8966, + 'Sigma' : 931, + 'leftharpoonaccent' : 8400, + 'ntrianglelefteq' : 8940, + 'nexists' : 8708, + 'times' : 215, + 'measuredangle' : 8737, + 'bumpeq' : 8783, + 'carriagereturn' : 8629, + 'adots' : 8944, + 'checkmark' : 10003, + 'lambda' : 955, + 'xi' : 958, + 'rbrace' : 125, + 'rbrack' : 93, + 'Nearrow' : 8663, + 'maltese' : 10016, + 'clubsuit' : 9827, + 'top' : 8868, + 'overarc' : 785, + 'varphi' : 966, + 'Delta' : 916, + 'iota' : 953, + 'nleftarrow' : 8602, + 'candra' : 784, + 'supset' : 8835, + 'triangleleft' : 9665, + 'gtreqless' : 8923, + 'ntrianglerighteq' : 8941, + 'quad' : 8195, + 'Xi' : 926, + 'gtrdot' : 8919, + 'leftthreetimes' : 8907, + 'minus' : 8722, + 'preccurlyeq' : 8828, + 'nleftrightarrow' : 8622, + 'lambdabar' : 411, + 'blacktriangle' : 9652, + 'kernelcontraction' : 8763, + 'Phi' : 934, + 'angle' : 8736, + 'spadesuitopen' : 9828, + 'eqless' : 8924, + 'mid' : 8739, + 'varkappa' : 1008, + 'Ldsh' : 8626, + 'updownarrow' : 8597, + 'beta' : 946, + 'textquotedblleft' : 8220, + 'rho' : 961, + 'alpha' : 945, + 'intercal' : 8890, + 'beth' : 8502, + 'grave' : 768, + 'acwopencirclearrow' : 8634, + 'nmid' : 8740, + 'nsupset' : 8837, + 'sigma' : 963, + 'dot' : 775, + 'Rightarrow' : 8658, + 'turnednot' : 8985, + 'backsimeq' : 8909, + 'leftarrowtail' : 8610, + 'approxeq' : 8778, + 'curlyeqsucc' : 8927, + 'rightarrowtail' : 8611, + 'Psi' : 936, + 'copyright' : 169, + 'yen' : 165, + 'vartriangleleft' : 8882, + 'rasp' : 700, + 'triangleright' : 9655, + 'precsim' : 8830, + 'infty' : 8734, + 'geq' : 8805, + 'updownarrowbar' : 8616, + 'precnsim' : 8936, + 'H' : 779, + 'ulcorner' : 8988, + 'looparrowright' : 8620, + 'ncong' : 8775, + 'downarrow' : 8595, + 'circeq' : 8791, + 'subseteq' : 8838, + 'bigstar' : 9733, + 'prime' : 8242, + 'lceil' : 8968, + 'Rrightarrow' : 8667, + 'oiiint' : 8752, + 'curlywedge' : 8911, + 'vDash' : 8872, + 'lfloor' : 8970, + 'ddots' : 8945, + 'exists' : 8707, + 'underbar' : 817, + 'Pi' : 928, + 'leftrightarrows' : 8646, + 'sphericalangle' : 8738, + 'coprod' : 8720, + 'circledcirc' : 8858, + 'gtrsim' : 8819, + 'gneqq' : 8809, + 'between' : 8812, + 'theta' : 952, + 'complement' : 8705, + 'arceq' : 8792, + 'nVdash' : 8878, + 'S' : 167, + 'wr' : 8768, + 'wp' : 8472, + 'backcong' : 8780, + 'lasp' : 701, + 'c' : 807, + 'nabla' : 8711, + 'dotplus' : 8724, + 'eta' : 951, + 'forall' : 8704, + 'eth' : 240, + 'colon' : 58, + 'sqcup' : 8852, + 'rightrightarrows' : 8649, + 'sqsupset' : 8848, + 'mapsto' : 8614, + 'bigtriangledown' : 9661, + 'sqsupseteq' : 8850, + 'propto' : 8733, + 'pi' : 960, + 'pm' : 177, + 'dots' : 0x2026, + 'nrightarrow' : 8603, + 'textasciiacute' : 180, + 'Doteq' : 8785, + 'breve' : 774, + 'sqcap' : 8851, + 'twoheadrightarrow' : 8608, + 'kappa' : 954, + 'vartriangle' : 9653, + 'diamondsuit' : 9826, + 'pitchfork' : 8916, + 'blacktriangleleft' : 9664, + 'nprec' : 8832, + 'curvearrowright' : 8631, + 'barwedge' : 8892, + 'multimap' : 8888, + 'textquestiondown' : 191, + 'cong' : 8773, + 'rtimes' : 8906, + 'rightzigzagarrow' : 8669, + 'rightarrow' : 8594, + 'leftarrow' : 8592, + '__sqrt__' : 8730, + 'twoheaddownarrow' : 8609, + 'oint' : 8750, + 'bigvee' : 8897, + 'eqdef' : 8797, + 'sterling' : 163, + 'phi' : 981, + 'Updownarrow' : 8661, + 'backprime' : 8245, + 'emdash' : 8212, + 'Gamma' : 915, + 'i' : 305, + 'rceil' : 8969, + 'leftharpoonup' : 8636, + 'Im' : 8465, + 'curvearrowleft' : 8630, + 'wedgeq' : 8793, + 'curlyeqprec' : 8926, + 'questeq' : 8799, + 'less' : 60, + 'upuparrows' : 8648, + 'tilde' : 771, + 'textasciigrave' : 96, + 'smallsetminus' : 8726, + 'ell' : 8467, + 'cup' : 8746, + 'danger' : 9761, + 'nVDash' : 8879, + 'cdotp' : 183, + 'cdots' : 8943, + 'hat' : 770, + 'eqgtr' : 8925, + 'psi' : 968, + 'frown' : 8994, + 'acute' : 769, + 'downzigzagarrow' : 8623, + 'ntriangleright' : 8939, + 'cupdot' : 8845, + 'circleddash' : 8861, + 'oslash' : 8856, + 'mho' : 8487, + 'd' : 803, + 'sqsubset' : 8847, + 'cdot' : 8901, + 'Omega' : 937, + 'OE' : 338, + 'veeeq' : 8794, + 'Finv' : 8498, + 't' : 865, + 'leftrightarrow' : 8596, + 'swarrow' : 8601, + 'rightthreetimes' : 8908, + 'rightleftharpoons' : 8652, + 'lesssim' : 8818, + 'searrow' : 8600, + 'because' : 8757, + 'gtrless' : 8823, + 'star' : 8902, + 'nsubset' : 8836, + 'zeta' : 950, + 'dddot' : 8411, + 'bigcirc' : 9675, + 'Supset' : 8913, + 'circ' : 8728, + 'slash' : 8725, + 'ocirc' : 778, + 'prod' : 8719, + 'twoheadleftarrow' : 8606, + 'daleth' : 8504, + 'upharpoonright' : 8638, + 'odot' : 8857, + 'Uparrow' : 8657, + 'O' : 216, + 'hookleftarrow' : 8617, + 'trianglerighteq' : 8885, + 'nsime' : 8772, + 'oe' : 339, + 'nwarrow' : 8598, + 'o' : 248, + 'ddddot' : 8412, + 'downharpoonright' : 8642, + 'succcurlyeq' : 8829, + 'gamma' : 947, + 'scrR' : 8475, + 'dag' : 8224, + 'thickspace' : 8197, + 'frakZ' : 8488, + 'lessdot' : 8918, + 'triangledown' : 9663, + 'ltimes' : 8905, + 'scrB' : 8492, + 'endash' : 8211, + 'scrE' : 8496, + 'scrF' : 8497, + 'scrH' : 8459, + 'scrI' : 8464, + 'rightharpoondown' : 8641, + 'scrL' : 8466, + 'scrM' : 8499, + 'frakC' : 8493, + 'nsupseteq' : 8841, + 'circledR' : 174, + 'circledS' : 9416, + 'ngtr' : 8815, + 'bigcap' : 8898, + 'scre' : 8495, + 'Downarrow' : 8659, + 'scrg' : 8458, + 'overleftrightarrow' : 8417, + 'scro' : 8500, + 'lnsim' : 8934, + 'eqcolon' : 8789, + 'curlyvee' : 8910, + 'urcorner' : 8989, + 'lbrace' : 123, + 'Bumpeq' : 8782, + 'delta' : 948, + 'boxtimes' : 8864, + 'overleftarrow' : 8406, + 'prurel' : 8880, + 'clubsuitopen' : 9831, + 'cwopencirclearrow' : 8635, + 'geqq' : 8807, + 'rightleftarrows' : 8644, + 'ac' : 8766, + 'ae' : 230, + 'int' : 8747, + 'rfloor' : 8971, + 'risingdotseq' : 8787, + 'nvdash' : 8876, + 'diamond' : 8900, + 'ddot' : 776, + 'backsim' : 8765, + 'oplus' : 8853, + 'triangleq' : 8796, + 'check' : 780, + 'ni' : 8715, + 'iiint' : 8749, + 'ne' : 8800, + 'lesseqgtr' : 8922, + 'obar' : 9021, + 'supseteq' : 8839, + 'nu' : 957, + 'AA' : 197, + 'AE' : 198, + 'models' : 8871, + 'ominus' : 8854, + 'dashv' : 8867, + 'omega' : 969, + 'rq' : 8217, + 'Subset' : 8912, + 'rightharpoonup' : 8640, + 'Rdsh' : 8627, + 'bullet' : 8729, + 'divideontimes' : 8903, + 'lbrack' : 91, + 'textquotedblright' : 8221, + 'Colon' : 8759, + '%' : 37, + '$' : 36, + '{' : 123, + '}' : 125, + '_' : 95, + '#' : 35, + 'imath' : 0x131, + 'circumflexaccent' : 770, + 'combiningbreve' : 774, + 'combiningoverline' : 772, + 'combininggraveaccent' : 768, + 'combiningacuteaccent' : 769, + 'combiningdiaeresis' : 776, + 'combiningtilde' : 771, + 'combiningrightarrowabove' : 8407, + 'combiningdotabove' : 775, + 'combiningthreedotsabove' : 8411, + 'combiningfourdotsabove' : 8412, + 'to' : 8594, + 'succeq' : 8829, + 'emptyset' : 8709, + 'leftparen' : 40, + 'rightparen' : 41, + 'bigoplus' : 10753, + 'leftangle' : 10216, + 'rightangle' : 10217, + 'leftbrace' : 124, + 'rightbrace' : 125, + 'jmath' : 567, + 'bigodot' : 10752, + 'preceq' : 8828, + 'biguplus' : 10756, + 'epsilon' : 949, + 'vartheta' : 977, + 'bigotimes' : 10754, + 'guillemotleft' : 171, + 'ring' : 730, + 'Thorn' : 222, + 'guilsinglright' : 8250, + 'perthousand' : 8240, + 'macron' : 175, + 'cent' : 162, + 'guillemotright' : 187, + 'equal' : 61, + 'asterisk' : 42, + 'guilsinglleft' : 8249, + 'plus' : 43, + 'thorn' : 254, + 'dagger' : 8224 +} + +# Each element is a 4-tuple of the form: +# src_start, src_end, dst_font, dst_start +# +stix_virtual_fonts = { + 'bb': + { + 'rm': + [ + (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 + (0x0041, 0x0042, 'rm', 0x1d538), # A-B + (0x0043, 0x0043, 'rm', 0x2102), # C + (0x0044, 0x0047, 'rm', 0x1d53b), # D-G + (0x0048, 0x0048, 'rm', 0x210d), # H + (0x0049, 0x004d, 'rm', 0x1d540), # I-M + (0x004e, 0x004e, 'rm', 0x2115), # N + (0x004f, 0x004f, 'rm', 0x1d546), # O + (0x0050, 0x0051, 'rm', 0x2119), # P-Q + (0x0052, 0x0052, 'rm', 0x211d), # R + (0x0053, 0x0059, 'rm', 0x1d54a), # S-Y + (0x005a, 0x005a, 'rm', 0x2124), # Z + (0x0061, 0x007a, 'rm', 0x1d552), # a-z + (0x0393, 0x0393, 'rm', 0x213e), # \Gamma + (0x03a0, 0x03a0, 'rm', 0x213f), # \Pi + (0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma + (0x03b3, 0x03b3, 'rm', 0x213d), # \gamma + (0x03c0, 0x03c0, 'rm', 0x213c), # \pi + ], + 'it': + [ + (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 + (0x0041, 0x0042, 'it', 0xe154), # A-B + (0x0043, 0x0043, 'it', 0x2102), # C + (0x0044, 0x0044, 'it', 0x2145), # D + (0x0045, 0x0047, 'it', 0xe156), # E-G + (0x0048, 0x0048, 'it', 0x210d), # H + (0x0049, 0x004d, 'it', 0xe159), # I-M + (0x004e, 0x004e, 'it', 0x2115), # N + (0x004f, 0x004f, 'it', 0xe15e), # O + (0x0050, 0x0051, 'it', 0x2119), # P-Q + (0x0052, 0x0052, 'it', 0x211d), # R + (0x0053, 0x0059, 'it', 0xe15f), # S-Y + (0x005a, 0x005a, 'it', 0x2124), # Z + (0x0061, 0x0063, 'it', 0xe166), # a-c + (0x0064, 0x0065, 'it', 0x2146), # d-e + (0x0066, 0x0068, 'it', 0xe169), # f-h + (0x0069, 0x006a, 'it', 0x2148), # i-j + (0x006b, 0x007a, 'it', 0xe16c), # k-z + (0x0393, 0x0393, 'it', 0x213e), # \Gamma (not in beta STIX fonts) + (0x03a0, 0x03a0, 'it', 0x213f), # \Pi + (0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (not in beta STIX fonts) + (0x03b3, 0x03b3, 'it', 0x213d), # \gamma (not in beta STIX fonts) + (0x03c0, 0x03c0, 'it', 0x213c), # \pi + ], + 'bf': + [ + (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 + (0x0041, 0x0042, 'bf', 0xe38a), # A-B + (0x0043, 0x0043, 'bf', 0x2102), # C + (0x0044, 0x0044, 'bf', 0x2145), # D + (0x0045, 0x0047, 'bf', 0xe38d), # E-G + (0x0048, 0x0048, 'bf', 0x210d), # H + (0x0049, 0x004d, 'bf', 0xe390), # I-M + (0x004e, 0x004e, 'bf', 0x2115), # N + (0x004f, 0x004f, 'bf', 0xe395), # O + (0x0050, 0x0051, 'bf', 0x2119), # P-Q + (0x0052, 0x0052, 'bf', 0x211d), # R + (0x0053, 0x0059, 'bf', 0xe396), # S-Y + (0x005a, 0x005a, 'bf', 0x2124), # Z + (0x0061, 0x0063, 'bf', 0xe39d), # a-c + (0x0064, 0x0065, 'bf', 0x2146), # d-e + (0x0066, 0x0068, 'bf', 0xe3a2), # f-h + (0x0069, 0x006a, 'bf', 0x2148), # i-j + (0x006b, 0x007a, 'bf', 0xe3a7), # k-z + (0x0393, 0x0393, 'bf', 0x213e), # \Gamma + (0x03a0, 0x03a0, 'bf', 0x213f), # \Pi + (0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma + (0x03b3, 0x03b3, 'bf', 0x213d), # \gamma + (0x03c0, 0x03c0, 'bf', 0x213c), # \pi + ], + }, + 'cal': + [ + (0x0041, 0x005a, 'it', 0xe22d), # A-Z + ], + 'frak': + { + 'rm': + [ + (0x0041, 0x0042, 'rm', 0x1d504), # A-B + (0x0043, 0x0043, 'rm', 0x212d), # C + (0x0044, 0x0047, 'rm', 0x1d507), # D-G + (0x0048, 0x0048, 'rm', 0x210c), # H + (0x0049, 0x0049, 'rm', 0x2111), # I + (0x004a, 0x0051, 'rm', 0x1d50d), # J-Q + (0x0052, 0x0052, 'rm', 0x211c), # R + (0x0053, 0x0059, 'rm', 0x1d516), # S-Y + (0x005a, 0x005a, 'rm', 0x2128), # Z + (0x0061, 0x007a, 'rm', 0x1d51e), # a-z + ], + 'bf': + [ + (0x0041, 0x005a, 'bf', 0x1d56c), # A-Z + (0x0061, 0x007a, 'bf', 0x1d586), # a-z + ], + }, + 'scr': + [ + (0x0041, 0x0041, 'it', 0x1d49c), # A + (0x0042, 0x0042, 'it', 0x212c), # B + (0x0043, 0x0044, 'it', 0x1d49e), # C-D + (0x0045, 0x0046, 'it', 0x2130), # E-F + (0x0047, 0x0047, 'it', 0x1d4a2), # G + (0x0048, 0x0048, 'it', 0x210b), # H + (0x0049, 0x0049, 'it', 0x2110), # I + (0x004a, 0x004b, 'it', 0x1d4a5), # J-K + (0x004c, 0x004c, 'it', 0x2112), # L + (0x004d, 0x004d, 'it', 0x2133), # M + (0x004e, 0x0051, 'it', 0x1d4a9), # N-Q + (0x0052, 0x0052, 'it', 0x211b), # R + (0x0053, 0x005a, 'it', 0x1d4ae), # S-Z + (0x0061, 0x0064, 'it', 0x1d4b6), # a-d + (0x0065, 0x0065, 'it', 0x212f), # e + (0x0066, 0x0066, 'it', 0x1d4bb), # f + (0x0067, 0x0067, 'it', 0x210a), # g + (0x0068, 0x006e, 'it', 0x1d4bd), # h-n + (0x006f, 0x006f, 'it', 0x2134), # o + (0x0070, 0x007a, 'it', 0x1d4c5), # p-z + ], + 'sf': + { + 'rm': + [ + (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9 + (0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z + (0x0061, 0x007a, 'rm', 0x1d5ba), # a-z + (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega + (0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega + (0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant + (0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant + (0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant + (0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant + (0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon + (0x2202, 0x2202, 'rm', 0xe17c), # partial differential + ], + 'it': + [ + # These numerals are actually upright. We don't actually + # want italic numerals ever. + (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9 + (0x0041, 0x005a, 'it', 0x1d608), # A-Z + (0x0061, 0x007a, 'it', 0x1d622), # a-z + (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega + (0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega + (0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant + (0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant + (0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant + (0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant + (0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon + ], + 'bf': + [ + (0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9 + (0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z + (0x0061, 0x007a, 'bf', 0x1d5ee), # a-z + (0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega + (0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega + (0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant + (0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant + (0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant + (0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant + (0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant + (0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon + (0x2202, 0x2202, 'bf', 0x1d789), # partial differential + (0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla + ], + }, + 'tt': + [ + (0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9 + (0x0041, 0x005a, 'rm', 0x1d670), # A-Z + (0x0061, 0x007a, 'rm', 0x1d68a) # a-z + ], + } + + +# Fix some incorrect glyphs. +stix_glyph_fixes = { + # Cap and Cup glyphs are swapped. + 0x22d2: 0x22d3, + 0x22d3: 0x22d2, +} diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_path.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/matplotlib/_path.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..5a477742 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/matplotlib/_path.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_pylab_helpers.py b/.venv/lib/python3.9/site-packages/matplotlib/_pylab_helpers.py new file mode 100644 index 00000000..27904dd8 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_pylab_helpers.py @@ -0,0 +1,140 @@ +""" +Manage figures for the pyplot interface. +""" + +import atexit +from collections import OrderedDict +import gc + + +class Gcf: + """ + Singleton to maintain the relation between figures and their managers, and + keep track of and "active" figure and manager. + + The canvas of a figure created through pyplot is associated with a figure + manager, which handles the interaction between the figure and the backend. + pyplot keeps track of figure managers using an identifier, the "figure + number" or "manager number" (which can actually be any hashable value); + this number is available as the :attr:`number` attribute of the manager. + + This class is never instantiated; it consists of an `OrderedDict` mapping + figure/manager numbers to managers, and a set of class methods that + manipulate this `OrderedDict`. + + Attributes + ---------- + figs : OrderedDict + `OrderedDict` mapping numbers to managers; the active manager is at the + end. + """ + + figs = OrderedDict() + + @classmethod + def get_fig_manager(cls, num): + """ + If manager number *num* exists, make it the active one and return it; + otherwise return *None*. + """ + manager = cls.figs.get(num, None) + if manager is not None: + cls.set_active(manager) + return manager + + @classmethod + def destroy(cls, num): + """ + Destroy manager *num* -- either a manager instance or a manager number. + + In the interactive backends, this is bound to the window "destroy" and + "delete" events. + + It is recommended to pass a manager instance, to avoid confusion when + two managers share the same number. + """ + if all(hasattr(num, attr) for attr in ["num", "destroy"]): + manager = num + if cls.figs.get(manager.num) is manager: + cls.figs.pop(manager.num) + else: + try: + manager = cls.figs.pop(num) + except KeyError: + return + if hasattr(manager, "_cidgcf"): + manager.canvas.mpl_disconnect(manager._cidgcf) + manager.destroy() + gc.collect(1) + + @classmethod + def destroy_fig(cls, fig): + """Destroy figure *fig*.""" + num = next((manager.num for manager in cls.figs.values() + if manager.canvas.figure == fig), None) + if num is not None: + cls.destroy(num) + + @classmethod + def destroy_all(cls): + """Destroy all figures.""" + # Reimport gc in case the module globals have already been removed + # during interpreter shutdown. + import gc + for manager in list(cls.figs.values()): + manager.canvas.mpl_disconnect(manager._cidgcf) + manager.destroy() + cls.figs.clear() + gc.collect(1) + + @classmethod + def has_fignum(cls, num): + """Return whether figure number *num* exists.""" + return num in cls.figs + + @classmethod + def get_all_fig_managers(cls): + """Return a list of figure managers.""" + return list(cls.figs.values()) + + @classmethod + def get_num_fig_managers(cls): + """Return the number of figures being managed.""" + return len(cls.figs) + + @classmethod + def get_active(cls): + """Return the active manager, or *None* if there is no manager.""" + return next(reversed(cls.figs.values())) if cls.figs else None + + @classmethod + def _set_new_active_manager(cls, manager): + """Adopt *manager* into pyplot and make it the active manager.""" + if not hasattr(manager, "_cidgcf"): + manager._cidgcf = manager.canvas.mpl_connect( + "button_press_event", lambda event: cls.set_active(manager)) + fig = manager.canvas.figure + fig.number = manager.num + label = fig.get_label() + if label: + manager.set_window_title(label) + cls.set_active(manager) + + @classmethod + def set_active(cls, manager): + """Make *manager* the active manager.""" + cls.figs[manager.num] = manager + cls.figs.move_to_end(manager.num) + + @classmethod + def draw_all(cls, force=False): + """ + Redraw all stale managed figures, or, if *force* is True, all managed + figures. + """ + for manager in cls.get_all_fig_managers(): + if force or manager.canvas.figure.stale: + manager.canvas.draw_idle() + + +atexit.register(Gcf.destroy_all) diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_qhull.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/matplotlib/_qhull.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..d99afbcc Binary files /dev/null and b/.venv/lib/python3.9/site-packages/matplotlib/_qhull.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_text_helpers.py b/.venv/lib/python3.9/site-packages/matplotlib/_text_helpers.py new file mode 100644 index 00000000..75d84997 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_text_helpers.py @@ -0,0 +1,68 @@ +""" +Low-level text helper utilities. +""" + +import dataclasses + +from . import _api +from .ft2font import KERNING_DEFAULT, LOAD_NO_HINTING + + +LayoutItem = dataclasses.make_dataclass( + "LayoutItem", ["char", "glyph_idx", "x", "prev_kern"]) + + +def warn_on_missing_glyph(codepoint): + _api.warn_external( + "Glyph {} ({}) missing from current font.".format( + codepoint, + chr(codepoint).encode("ascii", "namereplace").decode("ascii"))) + block = ("Hebrew" if 0x0590 <= codepoint <= 0x05ff else + "Arabic" if 0x0600 <= codepoint <= 0x06ff else + "Devanagari" if 0x0900 <= codepoint <= 0x097f else + "Bengali" if 0x0980 <= codepoint <= 0x09ff else + "Gurmukhi" if 0x0a00 <= codepoint <= 0x0a7f else + "Gujarati" if 0x0a80 <= codepoint <= 0x0aff else + "Oriya" if 0x0b00 <= codepoint <= 0x0b7f else + "Tamil" if 0x0b80 <= codepoint <= 0x0bff else + "Telugu" if 0x0c00 <= codepoint <= 0x0c7f else + "Kannada" if 0x0c80 <= codepoint <= 0x0cff else + "Malayalam" if 0x0d00 <= codepoint <= 0x0d7f else + "Sinhala" if 0x0d80 <= codepoint <= 0x0dff else + None) + if block: + _api.warn_external( + f"Matplotlib currently does not support {block} natively.") + + +def layout(string, font, *, kern_mode=KERNING_DEFAULT): + """ + Render *string* with *font*. For each character in *string*, yield a + (glyph-index, x-position) pair. When such a pair is yielded, the font's + glyph is set to the corresponding character. + + Parameters + ---------- + string : str + The string to be rendered. + font : FT2Font + The font. + kern_mode : int + A FreeType kerning mode. + + Yields + ------ + glyph_index : int + x_position : float + """ + x = 0 + prev_glyph_idx = None + for char in string: + glyph_idx = font.get_char_index(ord(char)) + kern = (font.get_kerning(prev_glyph_idx, glyph_idx, kern_mode) / 64 + if prev_glyph_idx is not None else 0.) + x += kern + glyph = font.load_glyph(glyph_idx, flags=LOAD_NO_HINTING) + yield LayoutItem(char, glyph_idx, x, kern) + x += glyph.linearHoriAdvance / 65536 + prev_glyph_idx = glyph_idx diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_tri.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/matplotlib/_tri.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..7ef935c9 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/matplotlib/_tri.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_ttconv.cpython-39-x86_64-linux-gnu.so b/.venv/lib/python3.9/site-packages/matplotlib/_ttconv.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 00000000..40b18ca5 Binary files /dev/null and b/.venv/lib/python3.9/site-packages/matplotlib/_ttconv.cpython-39-x86_64-linux-gnu.so differ diff --git a/.venv/lib/python3.9/site-packages/matplotlib/_version.py b/.venv/lib/python3.9/site-packages/matplotlib/_version.py new file mode 100644 index 00000000..a3c50b08 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/_version.py @@ -0,0 +1,5 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '3.5.0' +version_tuple = (3, 5, 0) diff --git a/.venv/lib/python3.9/site-packages/matplotlib/afm.py b/.venv/lib/python3.9/site-packages/matplotlib/afm.py new file mode 100644 index 00000000..3d02d7f9 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/afm.py @@ -0,0 +1,532 @@ +""" +A python interface to Adobe Font Metrics Files. + +Although a number of other Python implementations exist, and may be more +complete than this, it was decided not to go with them because they were +either: + +1) copyrighted or used a non-BSD compatible license +2) had too many dependencies and a free standing lib was needed +3) did more than needed and it was easier to write afresh rather than + figure out how to get just what was needed. + +It is pretty easy to use, and has no external dependencies: + +>>> import matplotlib as mpl +>>> from pathlib import Path +>>> afm_path = Path(mpl.get_data_path(), 'fonts', 'afm', 'ptmr8a.afm') +>>> +>>> from matplotlib.afm import AFM +>>> with afm_path.open('rb') as fh: +... afm = AFM(fh) +>>> afm.string_width_height('What the heck?') +(6220.0, 694) +>>> afm.get_fontname() +'Times-Roman' +>>> afm.get_kern_dist('A', 'f') +0 +>>> afm.get_kern_dist('A', 'y') +-92.0 +>>> afm.get_bbox_char('!') +[130, -9, 238, 676] + +As in the Adobe Font Metrics File Format Specification, all dimensions +are given in units of 1/1000 of the scale factor (point size) of the font +being used. +""" + +from collections import namedtuple +import logging +import re + +from ._mathtext_data import uni2type1 + + +_log = logging.getLogger(__name__) + + +def _to_int(x): + # Some AFM files have floats where we are expecting ints -- there is + # probably a better way to handle this (support floats, round rather than + # truncate). But I don't know what the best approach is now and this + # change to _to_int should at least prevent Matplotlib from crashing on + # these. JDH (2009-11-06) + return int(float(x)) + + +def _to_float(x): + # Some AFM files use "," instead of "." as decimal separator -- this + # shouldn't be ambiguous (unless someone is wicked enough to use "," as + # thousands separator...). + if isinstance(x, bytes): + # Encoding doesn't really matter -- if we have codepoints >127 the call + # to float() will error anyways. + x = x.decode('latin-1') + return float(x.replace(',', '.')) + + +def _to_str(x): + return x.decode('utf8') + + +def _to_list_of_ints(s): + s = s.replace(b',', b' ') + return [_to_int(val) for val in s.split()] + + +def _to_list_of_floats(s): + return [_to_float(val) for val in s.split()] + + +def _to_bool(s): + if s.lower().strip() in (b'false', b'0', b'no'): + return False + else: + return True + + +def _parse_header(fh): + """ + Read the font metrics header (up to the char metrics) and returns + a dictionary mapping *key* to *val*. *val* will be converted to the + appropriate python type as necessary; e.g.: + + * 'False'->False + * '0'->0 + * '-168 -218 1000 898'-> [-168, -218, 1000, 898] + + Dictionary keys are + + StartFontMetrics, FontName, FullName, FamilyName, Weight, + ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition, + UnderlineThickness, Version, Notice, EncodingScheme, CapHeight, + XHeight, Ascender, Descender, StartCharMetrics + """ + header_converters = { + b'StartFontMetrics': _to_float, + b'FontName': _to_str, + b'FullName': _to_str, + b'FamilyName': _to_str, + b'Weight': _to_str, + b'ItalicAngle': _to_float, + b'IsFixedPitch': _to_bool, + b'FontBBox': _to_list_of_ints, + b'UnderlinePosition': _to_float, + b'UnderlineThickness': _to_float, + b'Version': _to_str, + # Some AFM files have non-ASCII characters (which are not allowed by + # the spec). Given that there is actually no public API to even access + # this field, just return it as straight bytes. + b'Notice': lambda x: x, + b'EncodingScheme': _to_str, + b'CapHeight': _to_float, # Is the second version a mistake, or + b'Capheight': _to_float, # do some AFM files contain 'Capheight'? -JKS + b'XHeight': _to_float, + b'Ascender': _to_float, + b'Descender': _to_float, + b'StdHW': _to_float, + b'StdVW': _to_float, + b'StartCharMetrics': _to_int, + b'CharacterSet': _to_str, + b'Characters': _to_int, + } + d = {} + first_line = True + for line in fh: + line = line.rstrip() + if line.startswith(b'Comment'): + continue + lst = line.split(b' ', 1) + key = lst[0] + if first_line: + # AFM spec, Section 4: The StartFontMetrics keyword + # [followed by a version number] must be the first line in + # the file, and the EndFontMetrics keyword must be the + # last non-empty line in the file. We just check the + # first header entry. + if key != b'StartFontMetrics': + raise RuntimeError('Not an AFM file') + first_line = False + if len(lst) == 2: + val = lst[1] + else: + val = b'' + try: + converter = header_converters[key] + except KeyError: + _log.error('Found an unknown keyword in AFM header (was %r)' % key) + continue + try: + d[key] = converter(val) + except ValueError: + _log.error('Value error parsing header in AFM: %s, %s', key, val) + continue + if key == b'StartCharMetrics': + break + else: + raise RuntimeError('Bad parse') + return d + + +CharMetrics = namedtuple('CharMetrics', 'width, name, bbox') +CharMetrics.__doc__ = """ + Represents the character metrics of a single character. + + Notes + ----- + The fields do currently only describe a subset of character metrics + information defined in the AFM standard. + """ +CharMetrics.width.__doc__ = """The character width (WX).""" +CharMetrics.name.__doc__ = """The character name (N).""" +CharMetrics.bbox.__doc__ = """ + The bbox of the character (B) as a tuple (*llx*, *lly*, *urx*, *ury*).""" + + +def _parse_char_metrics(fh): + """ + Parse the given filehandle for character metrics information and return + the information as dicts. + + It is assumed that the file cursor is on the line behind + 'StartCharMetrics'. + + Returns + ------- + ascii_d : dict + A mapping "ASCII num of the character" to `.CharMetrics`. + name_d : dict + A mapping "character name" to `.CharMetrics`. + + Notes + ----- + This function is incomplete per the standard, but thus far parses + all the sample afm files tried. + """ + required_keys = {'C', 'WX', 'N', 'B'} + + ascii_d = {} + name_d = {} + for line in fh: + # We are defensively letting values be utf8. The spec requires + # ascii, but there are non-compliant fonts in circulation + line = _to_str(line.rstrip()) # Convert from byte-literal + if line.startswith('EndCharMetrics'): + return ascii_d, name_d + # Split the metric line into a dictionary, keyed by metric identifiers + vals = dict(s.strip().split(' ', 1) for s in line.split(';') if s) + # There may be other metrics present, but only these are needed + if not required_keys.issubset(vals): + raise RuntimeError('Bad char metrics line: %s' % line) + num = _to_int(vals['C']) + wx = _to_float(vals['WX']) + name = vals['N'] + bbox = _to_list_of_floats(vals['B']) + bbox = list(map(int, bbox)) + metrics = CharMetrics(wx, name, bbox) + # Workaround: If the character name is 'Euro', give it the + # corresponding character code, according to WinAnsiEncoding (see PDF + # Reference). + if name == 'Euro': + num = 128 + elif name == 'minus': + num = ord("\N{MINUS SIGN}") # 0x2212 + if num != -1: + ascii_d[num] = metrics + name_d[name] = metrics + raise RuntimeError('Bad parse') + + +def _parse_kern_pairs(fh): + """ + Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and + values are the kern pair value. For example, a kern pairs line like + ``KPX A y -50`` + + will be represented as:: + + d[ ('A', 'y') ] = -50 + + """ + + line = next(fh) + if not line.startswith(b'StartKernPairs'): + raise RuntimeError('Bad start of kern pairs data: %s' % line) + + d = {} + for line in fh: + line = line.rstrip() + if not line: + continue + if line.startswith(b'EndKernPairs'): + next(fh) # EndKernData + return d + vals = line.split() + if len(vals) != 4 or vals[0] != b'KPX': + raise RuntimeError('Bad kern pairs line: %s' % line) + c1, c2, val = _to_str(vals[1]), _to_str(vals[2]), _to_float(vals[3]) + d[(c1, c2)] = val + raise RuntimeError('Bad kern pairs parse') + + +CompositePart = namedtuple('CompositePart', 'name, dx, dy') +CompositePart.__doc__ = """ + Represents the information on a composite element of a composite char.""" +CompositePart.name.__doc__ = """Name of the part, e.g. 'acute'.""" +CompositePart.dx.__doc__ = """x-displacement of the part from the origin.""" +CompositePart.dy.__doc__ = """y-displacement of the part from the origin.""" + + +def _parse_composites(fh): + """ + Parse the given filehandle for composites information return them as a + dict. + + It is assumed that the file cursor is on the line behind 'StartComposites'. + + Returns + ------- + dict + A dict mapping composite character names to a parts list. The parts + list is a list of `.CompositePart` entries describing the parts of + the composite. + + Examples + -------- + A composite definition line:: + + CC Aacute 2 ; PCC A 0 0 ; PCC acute 160 170 ; + + will be represented as:: + + composites['Aacute'] = [CompositePart(name='A', dx=0, dy=0), + CompositePart(name='acute', dx=160, dy=170)] + + """ + composites = {} + for line in fh: + line = line.rstrip() + if not line: + continue + if line.startswith(b'EndComposites'): + return composites + vals = line.split(b';') + cc = vals[0].split() + name, _num_parts = cc[1], _to_int(cc[2]) + pccParts = [] + for s in vals[1:-1]: + pcc = s.split() + part = CompositePart(pcc[1], _to_float(pcc[2]), _to_float(pcc[3])) + pccParts.append(part) + composites[name] = pccParts + + raise RuntimeError('Bad composites parse') + + +def _parse_optional(fh): + """ + Parse the optional fields for kern pair data and composites. + + Returns + ------- + kern_data : dict + A dict containing kerning information. May be empty. + See `._parse_kern_pairs`. + composites : dict + A dict containing composite information. May be empty. + See `._parse_composites`. + """ + optional = { + b'StartKernData': _parse_kern_pairs, + b'StartComposites': _parse_composites, + } + + d = {b'StartKernData': {}, + b'StartComposites': {}} + for line in fh: + line = line.rstrip() + if not line: + continue + key = line.split()[0] + + if key in optional: + d[key] = optional[key](fh) + + return d[b'StartKernData'], d[b'StartComposites'] + + +class AFM: + + def __init__(self, fh): + """Parse the AFM file in file object *fh*.""" + self._header = _parse_header(fh) + self._metrics, self._metrics_by_name = _parse_char_metrics(fh) + self._kern, self._composite = _parse_optional(fh) + + def get_bbox_char(self, c, isord=False): + if not isord: + c = ord(c) + return self._metrics[c].bbox + + def string_width_height(self, s): + """ + Return the string width (including kerning) and string height + as a (*w*, *h*) tuple. + """ + if not len(s): + return 0, 0 + total_width = 0 + namelast = None + miny = 1e9 + maxy = 0 + for c in s: + if c == '\n': + continue + wx, name, bbox = self._metrics[ord(c)] + + total_width += wx + self._kern.get((namelast, name), 0) + l, b, w, h = bbox + miny = min(miny, b) + maxy = max(maxy, b + h) + + namelast = name + + return total_width, maxy - miny + + def get_str_bbox_and_descent(self, s): + """Return the string bounding box and the maximal descent.""" + if not len(s): + return 0, 0, 0, 0, 0 + total_width = 0 + namelast = None + miny = 1e9 + maxy = 0 + left = 0 + if not isinstance(s, str): + s = _to_str(s) + for c in s: + if c == '\n': + continue + name = uni2type1.get(ord(c), f"uni{ord(c):04X}") + try: + wx, _, bbox = self._metrics_by_name[name] + except KeyError: + name = 'question' + wx, _, bbox = self._metrics_by_name[name] + total_width += wx + self._kern.get((namelast, name), 0) + l, b, w, h = bbox + left = min(left, l) + miny = min(miny, b) + maxy = max(maxy, b + h) + + namelast = name + + return left, miny, total_width, maxy - miny, -miny + + def get_str_bbox(self, s): + """Return the string bounding box.""" + return self.get_str_bbox_and_descent(s)[:4] + + def get_name_char(self, c, isord=False): + """Get the name of the character, i.e., ';' is 'semicolon'.""" + if not isord: + c = ord(c) + return self._metrics[c].name + + def get_width_char(self, c, isord=False): + """ + Get the width of the character from the character metric WX field. + """ + if not isord: + c = ord(c) + return self._metrics[c].width + + def get_width_from_char_name(self, name): + """Get the width of the character from a type1 character name.""" + return self._metrics_by_name[name].width + + def get_height_char(self, c, isord=False): + """Get the bounding box (ink) height of character *c* (space is 0).""" + if not isord: + c = ord(c) + return self._metrics[c].bbox[-1] + + def get_kern_dist(self, c1, c2): + """ + Return the kerning pair distance (possibly 0) for chars *c1* and *c2*. + """ + name1, name2 = self.get_name_char(c1), self.get_name_char(c2) + return self.get_kern_dist_from_name(name1, name2) + + def get_kern_dist_from_name(self, name1, name2): + """ + Return the kerning pair distance (possibly 0) for chars + *name1* and *name2*. + """ + return self._kern.get((name1, name2), 0) + + def get_fontname(self): + """Return the font name, e.g., 'Times-Roman'.""" + return self._header[b'FontName'] + + @property + def postscript_name(self): # For consistency with FT2Font. + return self.get_fontname() + + def get_fullname(self): + """Return the font full name, e.g., 'Times-Roman'.""" + name = self._header.get(b'FullName') + if name is None: # use FontName as a substitute + name = self._header[b'FontName'] + return name + + def get_familyname(self): + """Return the font family name, e.g., 'Times'.""" + name = self._header.get(b'FamilyName') + if name is not None: + return name + + # FamilyName not specified so we'll make a guess + name = self.get_fullname() + extras = (r'(?i)([ -](regular|plain|italic|oblique|bold|semibold|' + r'light|ultralight|extra|condensed))+$') + return re.sub(extras, '', name) + + @property + def family_name(self): + """The font family name, e.g., 'Times'.""" + return self.get_familyname() + + def get_weight(self): + """Return the font weight, e.g., 'Bold' or 'Roman'.""" + return self._header[b'Weight'] + + def get_angle(self): + """Return the fontangle as float.""" + return self._header[b'ItalicAngle'] + + def get_capheight(self): + """Return the cap height as float.""" + return self._header[b'CapHeight'] + + def get_xheight(self): + """Return the xheight as float.""" + return self._header[b'XHeight'] + + def get_underline_thickness(self): + """Return the underline thickness as float.""" + return self._header[b'UnderlineThickness'] + + def get_horizontal_stem_width(self): + """ + Return the standard horizontal stem width as float, or *None* if + not specified in AFM file. + """ + return self._header.get(b'StdHW', None) + + def get_vertical_stem_width(self): + """ + Return the standard vertical stem width as float, or *None* if + not specified in AFM file. + """ + return self._header.get(b'StdVW', None) diff --git a/.venv/lib/python3.9/site-packages/matplotlib/animation.py b/.venv/lib/python3.9/site-packages/matplotlib/animation.py new file mode 100644 index 00000000..88e79bb4 --- /dev/null +++ b/.venv/lib/python3.9/site-packages/matplotlib/animation.py @@ -0,0 +1,1741 @@ +# TODO: +# * Documentation -- this will need a new section of the User's Guide. +# Both for Animations and just timers. +# - Also need to update +# https://scipy-cookbook.readthedocs.io/items/Matplotlib_Animations.html +# * Blit +# * Currently broken with Qt4 for widgets that don't start on screen +# * Still a few edge cases that aren't working correctly +# * Can this integrate better with existing matplotlib animation artist flag? +# - If animated removes from default draw(), perhaps we could use this to +# simplify initial draw. +# * Example +# * Frameless animation - pure procedural with no loop +# * Need example that uses something like inotify or subprocess +# * Complex syncing examples +# * Movies +# * Can blit be enabled for movies? +# * Need to consider event sources to allow clicking through multiple figures + +import abc +import base64 +import contextlib +from io import BytesIO, TextIOWrapper +import itertools +import logging +from pathlib import Path +import shutil +import subprocess +import sys +from tempfile import TemporaryDirectory +import uuid +import warnings + +import numpy as np +from PIL import Image + +import matplotlib as mpl +from matplotlib._animation_data import ( + DISPLAY_TEMPLATE, INCLUDED_FRAMES, JS_INCLUDE, STYLE_INCLUDE) +from matplotlib import _api, cbook + + +_log = logging.getLogger(__name__) + +# Process creation flag for subprocess to prevent it raising a terminal +# window. See for example: +# https://stackoverflow.com/q/24130623/ +if sys.platform == 'win32': + subprocess_creation_flags = CREATE_NO_WINDOW = 0x08000000 +else: + # Apparently None won't work here + subprocess_creation_flags = 0 + +# Other potential writing methods: +# * http://pymedia.org/ +# * libming (produces swf) python wrappers: https://github.com/libming/libming +# * Wrap x264 API: + +# (https://stackoverflow.com/q/2940671/) + + +def adjusted_figsize(w, h, dpi, n): + """ + Compute figure size so that pixels are a multiple of n. + + Parameters + ---------- + w, h : float + Size in inches. + + dpi : float + The dpi. + + n : int + The target multiple. + + Returns + ------- + wnew, hnew : float + The new figure size in inches. + """ + + # this maybe simplified if / when we adopt consistent rounding for + # pixel size across the whole library + def correct_roundoff(x, dpi, n): + if int(x*dpi) % n != 0: + if int(np.nextafter(x, np.inf)*dpi) % n == 0: + x = np.nextafter(x, np.inf) + elif int(np.nextafter(x, -np.inf)*dpi) % n == 0: + x = np.nextafter(x, -np.inf) + return x + + wnew = int(w * dpi / n) * n / dpi + hnew = int(h * dpi / n) * n / dpi + return correct_roundoff(wnew, dpi, n), correct_roundoff(hnew, dpi, n) + + +class MovieWriterRegistry: + """Registry of available writer classes by human readable name.""" + + def __init__(self): + self._registered = dict() + + def register(self, name): + """ + Decorator for registering a class under a name. + + Example use:: + + @registry.register(name) + class Foo: + pass + """ + def wrapper(writer_cls): + self._registered[name] = writer_cls + return writer_cls + return wrapper + + def is_available(self, name): + """ + Check if given writer is available by name. + + Parameters + ---------- + name : str + + Returns + ------- + bool + """ + try: + cls = self._registered[name] + except KeyError: + return False + return cls.isAvailable() + + def __iter__(self): + """Iterate over names of available writer class.""" + for name in self._registered: + if self.is_available(name): + yield name + + def list(self): + """Get a list of available MovieWriters.""" + return [*self] + + def __getitem__(self, name): + """Get an available writer class from its name.""" + if self.is_available(name): + return self._registered[name] + raise RuntimeError(f"Requested MovieWriter ({name}) not available") + + +writers = MovieWriterRegistry() + + +class AbstractMovieWriter(abc.ABC): + """ + Abstract base class for writing movies. Fundamentally, what a MovieWriter + does is provide is a way to grab frames by calling grab_frame(). + + setup() is called to start the process and finish() is called afterwards. + + This class is set up to provide for writing movie frame data to a pipe. + saving() is provided as a context manager to facilitate this process as:: + + with moviewriter.saving(fig, outfile='myfile.mp4', dpi=100): + # Iterate over frames + moviewriter.grab_frame(**savefig_kwargs) + + The use of the context manager ensures that setup() and finish() are + performed as necessary. + + An instance of a concrete subclass of this class can be given as the + ``writer`` argument of `Animation.save()`. + """ + + def __init__(self, fps=5, metadata=None, codec=None, bitrate=None): + self.fps = fps + self.metadata = metadata if metadata is not None else {} + self.codec = ( + mpl.rcParams['animation.codec'] if codec is None else codec) + self.bitrate = ( + mpl.rcParams['animation.bitrate'] if bitrate is None else bitrate) + + @abc.abstractmethod + def setup(self, fig, outfile, dpi=None): + """ + Setup for writing the movie file. + + Parameters + ---------- + fig : `~matplotlib.figure.Figure` + The figure object that contains the information for frames. + outfile : str + The filename of the resulting movie file. + dpi : float, default: ``fig.dpi`` + The DPI (or resolution) for the file. This controls the size + in pixels of the resulting movie file. + """ + self.outfile = outfile + self.fig = fig + if dpi is None: + dpi = self.fig.dpi + self.dpi = dpi + + @property + def frame_size(self): + """A tuple ``(width, height)`` in pixels of a movie frame.""" + w, h = self.fig.get_size_inches() + return int(w * self.dpi), int(h * self.dpi) + + @abc.abstractmethod + def grab_frame(self, **savefig_kwargs): + """ + Grab the image information from the figure and save as a movie frame. + + All keyword arguments in *savefig_kwargs* are passed on to the + `~.Figure.savefig` call that saves the figure. + """ + + @abc.abstractmethod + def finish(self): + """Finish any processing for writing the movie.""" + + @contextlib.contextmanager + def saving(self, fig, outfile, dpi, *args, **kwargs): + """ + Context manager to facilitate writing the movie file. + + ``*args, **kw`` are any parameters that should be passed to `setup`. + """ + # This particular sequence is what contextlib.contextmanager wants + self.setup(fig, outfile, dpi, *args, **kwargs) + try: + yield self + finally: + self.finish() + + +class MovieWriter(AbstractMovieWriter): + """ + Base class for writing movies. + + This is a base class for MovieWriter subclasses that write a movie frame + data to a pipe. You cannot instantiate this class directly. + See examples for how to use its subclasses. + + Attributes + ---------- + frame_format : str + The format used in writing frame data, defaults to 'rgba'. + fig : `~matplotlib.figure.Figure` + The figure to capture data from. + This must be provided by the sub-classes. + """ + + # Builtin writer subclasses additionally define the _exec_key and _args_key + # attributes, which indicate the rcParams entries where the path to the + # executable and additional command-line arguments to the executable are + # stored. Third-party writers cannot meaningfully set these as they cannot + # extend rcParams with new keys. + + # Pipe-based writers only support RGBA, but file-based ones support more + # formats. + supported_formats = ["rgba"] + + def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None, + metadata=None): + """ + Parameters + ---------- + fps : int, default: 5 + Movie frame rate (per second). + codec : str or None, default: :rc:`animation.codec` + The codec to use. + bitrate : int, default: :rc:`animation.bitrate` + The bitrate of the movie, in kilobits per second. Higher values + means higher quality movies, but increase the file size. A value + of -1 lets the underlying movie encoder select the bitrate. + extra_args : list of str or None, optional + Extra command-line arguments passed to the underlying movie + encoder. The default, None, means to use + :rc:`animation.[name-of-encoder]_args` for the builtin writers. + metadata : dict[str, str], default: {} + A dictionary of keys and values for metadata to include in the + output file. Some keys that may be of use include: + title, artist, genre, subject, copyright, srcform, comment. + """ + if type(self) is MovieWriter: + # TODO MovieWriter is still an abstract class and needs to be + # extended with a mixin. This should be clearer in naming + # and description. For now, just give a reasonable error + # message to users. + raise TypeError( + 'MovieWriter cannot be instantiated directly. Please use one ' + 'of its subclasses.') + + super().__init__(fps=fps, metadata=metadata, codec=codec, + bitrate=bitrate) + self.frame_format = self.supported_formats[0] + self.extra_args = extra_args + + def _adjust_frame_size(self): + if self.codec == 'h264': + wo, ho = self.fig.get_size_inches() + w, h = adjusted_figsize(wo, ho, self.dpi, 2) + if (wo, ho) != (w, h): + self.fig.set_size_inches(w, h, forward=True) + _log.info('figure size in inches has been adjusted ' + 'from %s x %s to %s x %s', wo, ho, w, h) + else: + w, h = self.fig.get_size_inches() + _log.debug('frame size in pixels is %s x %s', *self.frame_size) + return w, h + + def setup(self, fig, outfile, dpi=None): + # docstring inherited + super().setup(fig, outfile, dpi=dpi) + self._w, self._h = self._adjust_frame_size() + # Run here so that grab_frame() can write the data to a pipe. This + # eliminates the need for temp files. + self._run() + + def _run(self): + # Uses subprocess to call the program for assembling frames into a + # movie file. *args* returns the sequence of command line arguments + # from a few configuration options. + command = self._args() + _log.info('MovieWriter._run: running command: %s', + cbook._pformat_subprocess(command)) + PIPE = subprocess.PIPE + self._proc = subprocess.Popen( + command, stdin=PIPE, stdout=PIPE, stderr=PIPE, + creationflags=subprocess_creation_flags) + + def finish(self): + """Finish any processing for writing the movie.""" + overridden_cleanup = _api.deprecate_method_override( + __class__.cleanup, self, since="3.4", alternative="finish()") + if overridden_cleanup is not None: + overridden_cleanup() + else: + self._cleanup() # Inline _cleanup() once cleanup() is removed. + + def grab_frame(self, **savefig_kwargs): + # docstring inherited + _log.debug('MovieWriter.grab_frame: Grabbing frame.') + # Readjust the figure size in case it has been changed by the user. + # All frames must have the same size to save the movie correctly. + self.fig.set_size_inches(self._w, self._h) + # Save the figure data to the sink, using the frame format and dpi. + self.fig.savefig(self._proc.stdin, format=self.frame_format, + dpi=self.dpi, **savefig_kwargs) + + def _args(self): + """Assemble list of encoder-specific command-line arguments.""" + return NotImplementedError("args needs to be implemented by subclass.") + + def _cleanup(self): # Inline to finish() once cleanup() is removed. + """Clean-up and collect the process used to write the movie file.""" + out, err = self._proc.communicate() + # Use the encoding/errors that universal_newlines would use. + out = TextIOWrapper(BytesIO(out)).read() + err = TextIOWrapper(BytesIO(err)).read() + if out: + _log.log( + logging.WARNING if self._proc.returncode else logging.DEBUG, + "MovieWriter stdout:\n%s", out) + if err: + _log.log( + logging.WARNING if self._proc.returncode else logging.DEBUG, + "MovieWriter stderr:\n%s", err) + if self._proc.returncode: + raise subprocess.CalledProcessError( + self._proc.returncode, self._proc.args, out, err) + + @_api.deprecated("3.4") + def cleanup(self): + self._cleanup() + + @classmethod + def bin_path(cls): + """ + Return the binary path to the commandline tool used by a specific + subclass. This is a class method so that the tool can be looked for + before making a particular MovieWriter subclass available. + """ + return str(mpl.rcParams[cls._exec_key]) + + @classmethod + def isAvailable(cls): + """Return whether a MovieWriter subclass is actually available.""" + return shutil.which(cls.bin_path()) is not None + + +class FileMovieWriter(MovieWriter): + """ + `MovieWriter` for writing to individual files and stitching at the end. + + This must be sub-classed to be useful. + """ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.frame_format = mpl.rcParams['animation.frame_format'] + + def setup(self, fig, outfile, dpi=None, frame_prefix=None): + """ + Setup for writing the movie file. + + Parameters + ---------- + fig : `~matplotlib.figure.Figure` + The figure to grab the rendered frames from. + outfile : str + The filename of the resulting movie file. + dpi : float, default: ``fig.dpi`` + The dpi of the output file. This, with the figure size, + controls the size in pixels of the resulting movie file. + frame_prefix : str, optional + The filename prefix to use for temporary files. If *None* (the + default), files are written to a temporary directory which is + deleted by `cleanup`; if not *None*, no temporary files are + deleted. + """ + self.fig = fig + self.outfile = outfile + if dpi is None: + dpi = self.fig.dpi + self.dpi = dpi + self._adjust_frame_size() + + if frame_prefix is None: + self._tmpdir = TemporaryDirectory() + self.temp_prefix = str(Path(self._tmpdir.name, 'tmp')) + else: + self._tmpdir = None + self.temp_prefix = frame_prefix + self._frame_counter = 0 # used for generating sequential file names + self._temp_paths = list() + self.fname_format_str = '%s%%07d.%s' + + def __del__(self): + if self._tmpdir: + self._tmpdir.cleanup() + + @property + def frame_format(self): + """ + Format (png, jpeg, etc.) to use for saving the frames, which can be + decided by the individual subclasses. + """ + return self._frame_format + + @frame_format.setter + def frame_format(self, frame_format): + if frame_format in self.supported_formats: + self._frame_format = frame_format + else: + _api.warn_external( + f"Ignoring file format {frame_format!r} which is not " + f"supported by {type(self).__name__}; using " + f"{self.supported_formats[0]} instead.") + self._frame_format = self.supported_formats[0] + + def _base_temp_name(self): + # Generates a template name (without number) given the frame format + # for extension and the prefix. + return self.fname_format_str % (self.temp_prefix, self.frame_format) + + def grab_frame(self, **savefig_kwargs): + # docstring inherited + # Creates a filename for saving using basename and counter. + path = Path(self._base_temp_name() % self._frame_counter) + self._temp_paths.append(path) # Record the filename for later use. + self._frame_counter += 1 # Ensures each created name is unique. + _log.debug('FileMovieWriter.grab_frame: Grabbing frame %d to path=%s', + self._frame_counter, path) + with open(path, 'wb') as sink: # Save figure to the sink. + self.fig.savefig(sink, format=self.frame_format, dpi=self.dpi, + **savefig_kwargs) + + def finish(self): + # Call run here now that all frame grabbing is done. All temp files + # are available to be assembled. + self._run() + super().finish() # Will call clean-up + + def _cleanup(self): # Inline to finish() once cleanup() is removed. + super()._cleanup() + if self._tmpdir: + _log.debug('MovieWriter: clearing temporary path=%s', self._tmpdir) + self._tmpdir.cleanup() + + +@writers.register('pillow') +class PillowWriter(AbstractMovieWriter): + @classmethod + def isAvailable(cls): + return True + + def setup(self, fig, outfile, dpi=None): + super().setup(fig, outfile, dpi=dpi) + self._frames = [] + + def grab_frame(self, **savefig_kwargs): + buf = BytesIO() + self.fig.savefig( + buf, **{**savefig_kwargs, "format": "rgba", "dpi": self.dpi}) + self._frames.append(Image.frombuffer( + "RGBA", self.frame_size, buf.getbuffer(), "raw", "RGBA", 0, 1)) + + def finish(self): + self._frames[0].save( + self.outfile, save_all=True, append_images=self._frames[1:], + duration=int(1000 / self.fps), loop=0) + + +# Base class of ffmpeg information. Has the config keys and the common set +# of arguments that controls the *output* side of things. +class FFMpegBase: + """ + Mixin class for FFMpeg output. + + To be useful this must be multiply-inherited from with a + `MovieWriterBase` sub-class. + """ + + _exec_key = 'animation.ffmpeg_path' + _args_key = 'animation.ffmpeg_args' + + @property + def output_args(self): + args = [] + if Path(self.outfile).suffix == '.gif': + self.codec = 'gif' + else: + args.extend(['-vcodec', self.codec]) + extra_args = (self.extra_args if self.extra_args is not None + else mpl.rcParams[self._args_key]) + # For h264, the default format is yuv444p, which is not compatible + # with quicktime (and others). Specifying yuv420p fixes playback on + # iOS, as well as HTML5 video in firefox and safari (on both Win and + # OSX). Also fixes internet explorer. This is as of 2015/10/29. + if self.codec == 'h264' and '-pix_fmt' not in extra_args: + args.extend(['-pix_fmt', 'yuv420p']) + # For GIF, we're telling FFMPEG to split the video stream, to generate + # a palette, and then use it for encoding. + elif self.codec == 'gif' and '-filter_complex' not in extra_args: + args.extend(['-filter_complex', + 'split [a][b];[a] palettegen [p];[b][p] paletteuse']) + if self.bitrate > 0: + args.extend(['-b', '%dk' % self.bitrate]) # %dk: bitrate in kbps. + args.extend(extra_args) + for k, v in self.metadata.items(): + args.extend(['-metadata', '%s=%s' % (k, v)]) + + return args + ['-y', self.outfile] + + +# Combine FFMpeg options with pipe-based writing +@writers.register('ffmpeg') +class FFMpegWriter(FFMpegBase, MovieWriter): + """ + Pipe-based ffmpeg writer. + + Frames are streamed directly to ffmpeg via a pipe and written in a single + pass. + """ + def _args(self): + # Returns the command line parameters for subprocess to use + # ffmpeg to create a movie using a pipe. + args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo', + '-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format, + '-r', str(self.fps)] + # Logging is quieted because subprocess.PIPE has limited buffer size. + # If you have a lot of frames in your animation and set logging to + # DEBUG, you will have a buffer overrun. + if _log.getEffectiveLevel() > logging.DEBUG: + args += ['-loglevel', 'error'] + args += ['-i', 'pipe:'] + self.output_args + return args + + +# Combine FFMpeg options with temp file-based writing +@writers.register('ffmpeg_file') +class FFMpegFileWriter(FFMpegBase, FileMovieWriter): + """ + File-based ffmpeg writer. + + Frames are written to temporary files on disk and then stitched + together at the end. + """ + supported_formats = ['png', 'jpeg', 'tiff', 'raw', 'rgba'] + + def _args(self): + # Returns the command line parameters for subprocess to use + # ffmpeg to create a movie using a collection of temp images + args = [] + # For raw frames, we need to explicitly tell ffmpeg the metadata. + if self.frame_format in {'raw', 'rgba'}: + args += [ + '-f', 'image2', '-vcodec', 'rawvideo', + '-video_size', '%dx%d' % self.frame_size, + '-pixel_format', 'rgba', + '-framerate', str(self.fps), + ] + args += ['-r', str(self.fps), '-i', self._base_temp_name(), + '-vframes', str(self._frame_counter)] + # Logging is quieted because subprocess.PIPE has limited buffer size. + # If you have a lot of frames in your animation and set logging to + # DEBUG, you will have a buffer overrun. + if _log.getEffectiveLevel() > logging.DEBUG: + args += ['-loglevel', 'error'] + return [self.bin_path(), *args, *self.output_args] + + +# Base class for animated GIFs with ImageMagick +class ImageMagickBase: + """ + Mixin class for ImageMagick output. + + To be useful this must be multiply-inherited from with a + `MovieWriterBase` sub-class. + """ + + _exec_key = 'animation.convert_path' + _args_key = 'animation.convert_args' + + @property + def delay(self): + return 100. / self.fps + + @property + def output_args(self): + extra_args = (self.extra_args if self.extra_args is not None + else mpl.rcParams[self._args_key]) + return [*extra_args, self.outfile] + + @classmethod + def bin_path(cls): + binpath = super().bin_path() + if binpath == 'convert': + binpath = mpl._get_executable_info('magick').executable + return binpath + + @classmethod + def isAvailable(cls): + try: + return super().isAvailable() + except mpl.ExecutableNotFoundError as _enf: + # May be raised by get_executable_info. + _log.debug('ImageMagick unavailable due to: %s', _enf) + return False + + +# Combine ImageMagick options with pipe-based writing +@writers.register('imagemagick') +class ImageMagickWriter(ImageMagickBase, MovieWriter): + """ + Pipe-based animated gif. + + Frames are streamed directly to ImageMagick via a pipe and written + in a single pass. + + """ + def _args(self): + return ([self.bin_path(), + '-size', '%ix%i' % self.frame_size, '-depth', '8', + '-delay', str(self.delay), '-loop', '0', + '%s:-' % self.frame_format] + + self.output_args) + + +# Combine ImageMagick options with temp file-based writing +@writers.register('imagemagick_file') +class ImageMagickFileWriter(ImageMagickBase, FileMovieWriter): + """ + File-based animated gif writer. + + Frames are written to temporary files on disk and then stitched + together at the end. + """ + + supported_formats = ['png', 'jpeg', 'tiff', 'raw', 'rgba'] + + def _args(self): + # Force format: ImageMagick does not recognize 'raw'. + fmt = 'rgba:' if self.frame_format == 'raw' else '' + return ([self.bin_path(), + '-size', '%ix%i' % self.frame_size, '-depth', '8', + '-delay', str(self.delay), '-loop', '0', + '%s%s*.%s' % (fmt, self.temp_prefix, self.frame_format)] + + self.output_args) + + +# Taken directly from jakevdp's JSAnimation package at +# http://github.com/jakevdp/JSAnimation +def _included_frames(paths, frame_format): + """paths should be a list of Paths""" + return INCLUDED_FRAMES.format(Nframes=len(paths), + frame_dir=paths[0].parent, + frame_format=frame_format) + + +def _embedded_frames(frame_list, frame_format): + """frame_list should be a list of base64-encoded png files""" + if frame_format == 'svg': + # Fix MIME type for svg + frame_format = 'svg+xml' + template = ' frames[{0}] = "data:image/{1};base64,{2}"\n' + return "\n" + "".join( + template.format(i, frame_format, frame_data.replace('\n', '\\\n')) + for i, frame_data in enumerate(frame_list)) + + +@writers.register('html') +class HTMLWriter(FileMovieWriter): + """Writer for JavaScript-based HTML movies.""" + + supported_formats = ['png', 'jpeg', 'tiff', 'svg'] + + @classmethod + def isAvailable(cls): + return True + + def __init__(self, fps=30, codec=None, bitrate=None, extra_args=None, + metadata=None, embed_frames=False, default_mode='loop', + embed_limit=None): + + if extra_args: + _log.warning("HTMLWriter ignores 'extra_args'") + extra_args = () # Don't lookup nonexistent rcParam[args_key]. + self.embed_frames = embed_frames + self.default_mode = default_mode.lower() + _api.check_in_list(['loop', 'once', 'reflect'], + default_mode=self.default_mode) + + # Save embed limit, which is given in MB + if embed_limit is None: + self._bytes_limit = mpl.rcParams['animation.embed_limit'] + else: + self._bytes_limit = embed_limit + # Convert from MB to bytes + self._bytes_limit *= 1024 * 1024 + + super().__init__(fps, codec, bitrate, extra_args, metadata) + + def setup(self, fig, outfile, dpi, frame_dir=None): + outfile = Path(outfile) + _api.check_in_list(['.html', '.htm'], outfile_extension=outfile.suffix) + + self._saved_frames = [] + self._total_bytes = 0 + self._hit_limit = False + + if not self.embed_frames: + if frame_dir is None: + frame_dir = outfile.with_name(outfile.stem + '_frames') + frame_dir.mkdir(parents=True, exist_ok=True) + frame_prefix = frame_dir / 'frame' + else: + frame_prefix = None + + super().setup(fig, outfile, dpi, frame_prefix) + self._clear_temp = False + + def grab_frame(self, **savefig_kwargs): + if self.embed_frames: + # Just stop processing if we hit the limit + if self._hit_limit: + return + f = BytesIO() + self.fig.savefig(f, format=self.frame_format, + dpi=self.dpi, **savefig_kwargs) + imgdata64 = base64.encodebytes(f.getvalue()).decode('ascii') + self._total_bytes += len(imgdata64) + if self._total_bytes >= self._bytes_limit: + _log.warning( + "Animation size has reached %s bytes, exceeding the limit " + "of %s. If you're sure you want a larger animation " + "embedded, set the animation.embed_limit rc parameter to " + "a larger value (in MB). This and further frames will be " + "dropped.", self._total_bytes, self._bytes_limit) + self._hit_limit = True + else: + self._saved_frames.append(imgdata64) + else: + return super().grab_frame(**savefig_kwargs) + + def finish(self): + # save the frames to an html file + if self.embed_frames: + fill_frames = _embedded_frames(self._saved_frames, + self.frame_format) + Nframes = len(self._saved_frames) + else: + # temp names is filled by FileMovieWriter + fill_frames = _included_frames(self._temp_paths, self.frame_format) + Nframes = len(self._temp_paths) + mode_dict = dict(once_checked='', + loop_checked='', + reflect_checked='') + mode_dict[self.default_mode + '_checked'] = 'checked' + + interval = 1000 // self.fps + + with open(self.outfile, 'w') as of: + of.write(JS_INCLUDE + STYLE_INCLUDE) + of.write(DISPLAY_TEMPLATE.format(id=uuid.uuid4().hex, + Nframes=Nframes, + fill_frames=fill_frames, + interval=interval, + **mode_dict)) + + # duplicate the temporary file clean up logic from + # FileMovieWriter.cleanup. We can not call the inherited + # versions of finish or cleanup because both assume that + # there is a subprocess that we either need to call to merge + # many frames together or that there is a subprocess call that + # we need to clean up. + if self._tmpdir: + _log.debug('MovieWriter: clearing temporary path=%s', self._tmpdir) + self._tmpdir.cleanup() + + +class Animation: + """ + A base class for Animations. + + This class is not usable as is, and should be subclassed to provide needed + behavior. + + .. note:: + + You must store the created Animation in a variable that lives as long + as the animation should run. Otherwise, the Animation object will be + garbage-collected and the animation stops. + + Parameters + ---------- + fig : `~matplotlib.figure.Figure` + The figure object used to get needed events, such as draw or resize. + + event_source : object, optional + A class that can run a callback when desired events + are generated, as well as be stopped and started. + + Examples include timers (see `TimedAnimation`) and file + system notifications. + + blit : bool, default: False + Whether blitting is used to optimize drawing. + + See Also + -------- + FuncAnimation, ArtistAnimation + """ + + def __init__(self, fig, event_source=None, blit=False): + self._draw_was_started = False + + self._fig = fig + # Disables blitting for backends that don't support it. This + # allows users to request it if available, but still have a + # fallback that works if it is not. + self._blit = blit and fig.canvas.supports_blit + + # These are the basics of the animation. The frame sequence represents + # information for each frame of the animation and depends on how the + # drawing is handled by the subclasses. The event source fires events + # that cause the frame sequence to be iterated. + self.frame_seq = self.new_frame_seq() + self.event_source = event_source + + # Instead of starting the event source now, we connect to the figure's + # draw_event, so that we only start once the figure has been drawn. + self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start) + + # Connect to the figure's close_event so that we don't continue to + # fire events and try to draw to a deleted figure. + self._close_id = self._fig.canvas.mpl_connect('close_event', + self._stop) + if self._blit: + self._setup_blit() + + def __del__(self): + if not getattr(self, '_draw_was_started', True): + warnings.warn( + 'Animation was deleted without rendering anything. This is ' + 'most likely not intended. To prevent deletion, assign the ' + 'Animation to a variable, e.g. `anim`, that exists until you ' + 'have outputted the Animation using `plt.show()` or ' + '`anim.save()`.' + ) + + def _start(self, *args): + """ + Starts interactive animation. Adds the draw frame command to the GUI + handler, calls show to start the event loop. + """ + # Do not start the event source if saving() it. + if self._fig.canvas.is_saving(): + return + # First disconnect our draw event handler + self._fig.canvas.mpl_disconnect(self._first_draw_id) + + # Now do any initial draw + self._init_draw() + + # Add our callback for stepping the animation and + # actually start the event_source. + self.event_source.add_callback(self._step) + self.event_source.start() + + def _stop(self, *args): + # On stop we disconnect all of our events. + if self._blit: + self._fig.canvas.mpl_disconnect(self._resize_id) + self._fig.canvas.mpl_disconnect(self._close_id) + self.event_source.remove_callback(self._step) + self.event_source = None + + def save(self, filename, writer=None, fps=None, dpi=None, codec=None, + bitrate=None, extra_args=None, metadata=None, extra_anim=None, + savefig_kwargs=None, *, progress_callback=None): + """ + Save the animation as a movie file by drawing every frame. + + Parameters + ---------- + filename : str + The output filename, e.g., :file:`mymovie.mp4`. + + writer : `MovieWriter` or str, default: :rc:`animation.writer` + A `MovieWriter` instance to use or a key that identifies a + class to use, such as 'ffmpeg'. + + fps : int, optional + Movie frame rate (per second). If not set, the frame rate from the + animation's frame interval. + + dpi : float, default: :rc:`savefig.dpi` + Controls the dots per inch for the movie frames. Together with + the figure's size in inches, this controls the size of the movie. + + codec : str, default: :rc:`animation.codec`. + The video codec to use. Not all codecs are supported by a given + `MovieWriter`. + + bitrate : int, default: :rc:`animation.bitrate` + The bitrate of the movie, in kilobits per second. Higher values + means higher quality movies, but increase the file size. A value + of -1 lets the underlying movie encoder select the bitrate. + + extra_args : list of str or None, optional + Extra command-line arguments passed to the underlying movie + encoder. The default, None, means to use + :rc:`animation.[name-of-encoder]_args` for the builtin writers. + + metadata : dict[str, str], default: {} + Dictionary of keys and values for metadata to include in + the output file. Some keys that may be of use include: + title, artist, genre, subject, copyright, srcform, comment. + + extra_anim : list, default: [] + Additional `Animation` objects that should be included + in the saved movie file. These need to be from the same + `matplotlib.figure.Figure` instance. Also, animation frames will + just be simply combined, so there should be a 1:1 correspondence + between the frames from the different animations. + + savefig_kwargs : dict, default: {} + Keyword arguments passed to each `~.Figure.savefig` call used to + save the individual frames. + + progress_callback : function, optional + A callback function that will be called for every frame to notify + the saving progress. It must have the signature :: + + def func(current_frame: int, total_frames: int) -> Any + + where *current_frame* is the current frame number and + *total_frames* is the total number of frames to be saved. + *total_frames* is set to None, if the total number of frames can + not be determined. Return values may exist but are ignored. + + Example code to write the progress to stdout:: + + progress_callback =\ + lambda i, n: print(f'Saving frame {i} of {n}') + + Notes + ----- + *fps*, *codec*, *bitrate*, *extra_args* and *metadata* are used to + construct a `.MovieWriter` instance and can only be passed if + *writer* is a string. If they are passed as non-*None* and *writer* + is a `.MovieWriter`, a `RuntimeError` will be raised. + """ + + if writer is None: + writer = mpl.rcParams['animation.writer'] + elif (not isinstance(writer, str) and + any(arg is not None + for arg in (fps, codec, bitrate, extra_args, metadata))): + raise RuntimeError('Passing in values for arguments ' + 'fps, codec, bitrate, extra_args, or metadata ' + 'is not supported when writer is an existing ' + 'MovieWriter instance. These should instead be ' + 'passed as arguments when creating the ' + 'MovieWriter instance.') + + if savefig_kwargs is None: + savefig_kwargs = {} + + if fps is None and hasattr(self, '_interval'): + # Convert interval in ms to frames per second + fps = 1000. / self._interval + + # Re-use the savefig DPI for ours if none is given + if dpi is None: + dpi = mpl.rcParams['savefig.dpi'] + if dpi == 'figure': + dpi = self._fig.dpi + + writer_kwargs = {} + if codec is not None: + writer_kwargs['codec'] = codec + if bitrate is not None: + writer_kwargs['bitrate'] = bitrate + if extra_args is not None: + writer_kwargs['extra_args'] = extra_args + if metadata is not None: + writer_kwargs['metadata'] = metadata + + all_anim = [self] + if extra_anim is not None: + all_anim.extend(anim + for anim + in extra_anim if anim._fig is self._fig) + + # If we have the name of a writer, instantiate an instance of the + # registered class. + if isinstance(writer, str): + try: + writer_cls = writers[writer] + except RuntimeError: # Raised if not available. + writer_cls = PillowWriter # Always available. + _log.warning("MovieWriter %s unavailable; using Pillow " + "instead.", writer) + writer = writer_cls(fps, **writer_kwargs) + _log.info('Animation.save using %s', type(writer)) + + if 'bbox_inches' in savefig_kwargs: + _log.warning("Warning: discarding the 'bbox_inches' argument in " + "'savefig_kwargs' as it may cause frame size " + "to vary, which is inappropriate for animation.") + savefig_kwargs.pop('bbox_inches') + + # Create a new sequence of frames for saved data. This is different + # from new_frame_seq() to give the ability to save 'live' generated + # frame information to be saved later. + # TODO: Right now, after closing the figure, saving a movie won't work + # since GUI widgets are gone. Either need to remove extra code to + # allow for this non-existent use case or find a way to make it work. + if mpl.rcParams['savefig.bbox'] == 'tight': + _log.info("Disabling savefig.bbox = 'tight', as it may cause " + "frame size to vary, which is inappropriate for " + "animation.") + # canvas._is_saving = True makes the draw_event animation-starting + # callback a no-op; canvas.manager = None prevents resizing the GUI + # widget (both are likewise done in savefig()). + with mpl.rc_context({'savefig.bbox': None}), \ + writer.saving(self._fig, filename, dpi), \ + cbook._setattr_cm(self._fig.canvas, + _is_saving=True, manager=None): + for anim in all_anim: + anim._init_draw() # Clear the initial frame + frame_number = 0 + # TODO: Currently only FuncAnimation has a save_count + # attribute. Can we generalize this to all Animations? + save_count_list = [getattr(a, 'save_count', None) + for a in all_anim] + if None in save_count_list: + total_frames = None + else: + total_frames = sum(save_count_list) + for data in zip(*[a.new_saved_frame_seq() for a in all_anim]): + for anim, d in zip(all_anim, data): + # TODO: See if turning off blit is really necessary + anim._draw_next_frame(d, blit=False) + if progress_callback is not None: + progress_callback(frame_number, total_frames) + frame_number += 1 + writer.grab_frame(**savefig_kwargs) + + def _step(self, *args): + """ + Handler for getting events. By default, gets the next frame in the + sequence and hands the data off to be drawn. + """ + # Returns True to indicate that the event source should continue to + # call _step, until the frame sequence reaches the end of iteration, + # at which point False will be returned. + try: + framedata = next(self.frame_seq) + self._draw_next_frame(framedata, self._blit) + return True + except StopIteration: + return False + + def new_frame_seq(self): + """Return a new sequence of frame information.""" + # Default implementation is just an iterator over self._framedata + return iter(self._framedata) + + def new_saved_frame_seq(self): + """Return a new sequence of saved/cached frame information.""" + # Default is the same as the regular frame sequence + return self.new_frame_seq() + + def _draw_next_frame(self, framedata, blit): + # Breaks down the drawing of the next frame into steps of pre- and + # post- draw, as well as the drawing of the frame itself. + self._pre_draw(framedata, blit) + self._draw_frame(framedata) + self._post_draw(framedata, blit) + + def _init_draw(self): + # Initial draw to clear the frame. Also used by the blitting code + # when a clean base is required. + self._draw_was_started = True + + def _pre_draw(self, framedata, blit): + # Perform any cleaning or whatnot before the drawing of the frame. + # This default implementation allows blit to clear the frame. + if blit: + self._blit_clear(self._drawn_artists) + + def _draw_frame(self, framedata): + # Performs actual drawing of the frame. + raise NotImplementedError('Needs to be implemented by subclasses to' + ' actually make an animation.') + + def _post_draw(self, framedata, blit): + # After the frame is rendered, this handles the actual flushing of + # the draw, which can be a direct draw_idle() or make use of the + # blitting. + if blit and self._drawn_artists: + self._blit_draw(self._drawn_artists) + else: + self._fig.canvas.draw_idle() + + # The rest of the code in this class is to facilitate easy blitting + def _blit_draw(self, artists): + # Handles blitted drawing, which renders only the artists given instead + # of the entire figure. + updated_ax = {a.axes for a in artists} + # Enumerate artists to cache axes' backgrounds. We do not draw + # artists yet to not cache foreground from plots with shared axes + for ax in updated_ax: + # If we haven't cached the background for the current view of this + # axes object, do so now. This might not always be reliable, but + # it's an attempt to automate the process. + cur_view = ax._get_view() + view, bg = self._blit_cache.get(ax, (object(), None)) + if cur_view != view: + self._blit_cache[ax] = ( + cur_view, ax.figure.canvas.copy_from_bbox(ax.bbox)) + # Make a separate pass to draw foreground. + for a in artists: + a.axes.draw_artist(a) + # After rendering all the needed artists, blit each axes individually. + for ax in updated_ax: + ax.figure.canvas.blit(ax.bbox) + + def _blit_clear(self, artists): + # Get a list of the axes that need clearing from the artists that + # have been drawn. Grab the appropriate saved background from the + # cache and restore. + axes = {a.axes for a in artists} + for ax in axes: + try: + view, bg = self._blit_cache[ax] + except KeyError: + continue + if ax._get_view() == view: + ax.figure.canvas.restore_region(bg) + else: + self._blit_cache.pop(ax) + + def _setup_blit(self): + # Setting up the blit requires: a cache of the background for the + # axes + self._blit_cache = dict() + self._drawn_artists = [] + self._resize_id = self._fig.canvas.mpl_connect('resize_event', + self._on_resize) + self._post_draw(None, self._blit) + + def _on_resize(self, event): + # On resize, we need to disable the resize event handling so we don't + # get too many events. Also stop the animation events, so that + # we're paused. Reset the cache and re-init. Set up an event handler + # to catch once the draw has actually taken place. + self._fig.canvas.mpl_disconnect(self._resize_id) + self.event_source.stop() + self._blit_cache.clear() + self._init_draw() + self._resize_id = self._fig.canvas.mpl_connect('draw_event', + self._end_redraw) + + def _end_redraw(self, event): + # Now that the redraw has happened, do the post draw flushing and + # blit handling. Then re-enable all of the original events. + self._post_draw(None, False) + self.event_source.start() + self._fig.canvas.mpl_disconnect(self._resize_id) + self._resize_id = self._fig.canvas.mpl_connect('resize_event', + self._on_resize) + + def to_html5_video(self, embed_limit=None): + """ + Convert the animation to an HTML5 ``