Back to module index

Go to module by name

linecache

Cache lines from Python source files.

This is intended to read lines from modules imported -- hence if a filename
is not found, it will look down the module search path for a file by
that name.

Functions

checkcache

checkcache(filename=None)

  Discard cache entries that are out of date.
      (This is not checked upon each call!)

clearcache

clearcache()

  Clear the cache entirely.

getline

getline(filename, lineno, module_globals=None)

  Get a line for a Python source file from the cache.
      Update the cache if it doesn't contain an entry for this file already.

getlines

getlines(filename, module_globals=None)

  Get the lines for a Python source file from the cache.
      Update the cache if it doesn't contain an entry for this file already.

lazycache

lazycache(filename, module_globals)

  Seed the cache for filename with module_globals.

      The module loader will be asked for the source only when getlines is
      called, not immediately.

      If there is an entry in the cache already, it is not altered.

      :return: True if a lazy load is registered in the cache,
          otherwise False. To register such a load a module loader with a
          get_source method must be found, the filename must be a cachable
          filename, and the filename must not be already cached.
    

updatecache

updatecache(filename, module_globals=None)

  Update a cache entry and return its list of lines.
      If something's wrong, print a message, discard the cache entry,
      and return an empty list.

Other members

cache = {'/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/server.py': (2412, 1.0, ['#!/usr/bin/env python3\n', '\n', 'import asyncio\n', 'import logging\n', 'import signal\n', 'import traceback\n', 'from typing import TYPE_CHECKING\n', '\n', 'from .response import Response, Status\n', 'from .tls import make_sni_context\n', '\n', 'if TYPE_CHECKING:\n', '    from .config import Config\n', '\n', '\n', 'class Server():\n', '    def __init__(\n', '        self,\n', '        config: "Config",\n', '    ):\n', '        self.log = logging.getLogger("amethyst.server")\n', '        self.access_log = logging.getLogger("amethyst.access")\n', '\n', '        self.server = None\n', '        self.config = config\n', '\n', '        self.ssl_context = make_sni_context(config)\n', '        self.server = self.get_server()\n', '\n', '    def get_server(self):\n', '        loop = asyncio.get_event_loop()\n', '\n', '        return asyncio.start_server(\n', '            self.handle_connection, port=self.config.port,\n', '            ssl=self.ssl_context, loop=loop,\n', '        )\n', '\n', '    async def handle_connection(self, reader, writer):\n', '        from .request import Connection\n', '\n', '        peer_addr = writer.get_extra_info("peername")\n', '        peer_cert = writer.get_extra_info("peercert")\n', '\n', '        self.log.debug(f"Received connection from {peer_addr}")\n', '\n', '        url = "-"\n', '        try:\n', '            url = (await reader.readuntil(b"\\r\\n")).rstrip(b"\\r\\n").decode()\n', '\n', '            if len(url) > 1024:\n', '                response = Response(Status.BAD_REQUEST, "URL too long!")\n', '            else:\n', '                response = await self.config.handler(\n', '                    url, Connection(self, peer_addr, peer_cert)\n', '                )\n', '\n', '        except UnicodeDecodeError:\n', '            response = Response(Status.BAD_REQUEST, "URL must be UTF-8")\n', '\n', '        except Exception:\n', '            self.log.error(f"While generating response; {traceback.format_exc()}")\n', '\n', '            response = Response(\n', '                Status.TEMPORARY_FAILURE,\n', '                "Exception thrown during request processing; see server logs for details."\n', '            )\n', '\n', '        self.access_log.info(\n', '            f"{url} {response.status_code.value}[{response.status_code.name}]"\n', '            f" {response.meta}"\n', '        )\n', '\n', '        try:\n', '            line = f"{response.status_code.value} {response.meta}\\r\\n".encode()\n', '            writer.write(line)\n', '\n', '            if response.status_code.is_success() and response.content is not None:\n', '                writer.write(response.content)\n', '\n', '        except Exception:\n', '            self.log.error(f"While writing response; {traceback.format_exc()}")\n', '\n', '        finally:\n', '            writer.close()\n'], '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/server.py'), '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/handler.py': (2535, 1.0, ['from .resource import Resource\n', 'from .response import Status, Response\n', 'from .request import Connection, Context\n', 'from .util import get_path_components\n', 'from urllib.parse import urlparse\n', 'from typing import Dict, Callable, Awaitable\n', '\n', 'import logging\n', 'import re\n', '\n', 'Handler = Callable[[str, Connection], Awaitable[Response]]\n', 'PORT_RE = re.compile(r":([0-9]{1,5})$")\n', '\n', '\n', 'class GenericHandler():\n', '    def __init__(self, url_map: Dict[str, Dict[str, Resource]]):\n', '        self.url_map = url_map\n', '        self.log = logging.getLogger("amethyst.handler.GenericHandler")\n', '\n', '    async def __call__(self, url: str, conn: Connection) -> Response:\n', '        result = urlparse(url)\n', '\n', '        if not result.scheme:\n', '            return Response(\n', '                Status.BAD_REQUEST,\n', '                f"Requested URL must have a scheme."\n', '            )\n', '\n', '        if result.scheme != "gemini":\n', '            # This is exclusively a Gemini server.\n', '            return Response(\n', '                Status.PROXY_REQUEST_REFUSED,\n', '                f"This server does not proxy non-Gemini URLs."\n', '            )\n', '\n', '        host = result.netloc\n', '\n', '        if (port_match := PORT_RE.search(host)):\n', '            if int(port_match.group(1)) != conn.server.config.port:\n', '                return Response(\n', '                    Status.PROXY_REQUEST_REFUSED,\n', '                    f"{host} is not served here."\n', '                )\n', '\n', '            host = PORT_RE.sub("", host) \n', '\n', '        if host not in self.url_map:\n', '            self.log.warn(f"Received request for host {host} not in URL map")\n', '\n', '            return Response(\n', '                Status.PROXY_REQUEST_REFUSED,\n', '                f"{host} is not served here.",\n', '            )\n', '\n', '        req_path = result.path\n', '        try:\n', '            req_path = get_path_components(req_path)\n', '        except ValueError:\n', '            return Response(Status.BAD_REQUEST, "Invalid URL")\n', '\n', '        paths = [\n', '            (get_path_components(i), v) for i, v in self.url_map[host].items()\n', '        ]\n', '\n', '        for path, resource in sorted(paths, key=lambda k: len(k[0]), reverse=True):\n', '            if len(req_path) < len(path) or req_path[:len(path)] != path:\n', '                continue\n', '\n', '            truncated_path = "/".join(req_path[len(path):])\n', '            if result.path.endswith("/"):\n', '                truncated_path += "/"\n', '\n', '            return await resource(Context(\n', '                result.netloc, result.path, truncated_path,\n', '                result.query, conn\n', '            ))\n', '\n', '        return Response(\n', '            Status.NOT_FOUND, f"{req_path} was not found on this server."\n', '        )\n'], '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/handler.py'), '/nix/store/inim7ysrgjydvfwzdlh373mr5yg5993m-python3.9-amethyst_extensions-0.0.1/lib/python3.9/site-packages/amethyst_ext/pydoc.py': (6841, 1.0, ['from amethyst.response import Response, Status\n', '\n', 'import importlib\n', 'import inspect\n', 'import pkgutil\n', 'import re\n', 'import sys\n', 'import textwrap\n', '\n', 'SITE_PACKAGES_RE = re.compile(r"lib/python[^/]+/site-packages")\n', 'PYTHON3_RE = re.compile(r"python3[^-]*")\n', '\n', '\n', 'class PydocResource():\n', '    @staticmethod\n', '    def classify(thing):\n', '        if inspect.ismodule(thing):\n', '            return "module"\n', '        elif inspect.isclass(thing):\n', '            return "class"\n', '        elif (inspect.isfunction(thing) or inspect.ismethod(thing) or\n', '              inspect.ismethoddescriptor(thing) or inspect.isroutine(thing)):\n', '            return "function"\n', '        else:\n', '            return "other"\n', '\n', '    def doc_class(self, cls, name=None):\n', '        lines = []\n', '\n', '        if name is None:\n', '            name = cls.__name__\n', '        else:\n', '            name = f"{name}.{cls.__name__}"\n', '\n', '        lines.append(f"### {name}")\n', '        if (clsdoc := getattr(cls, "__doc__")):\n', '            lines.append(f"```\\n{clsdoc}\\n```\\n")\n', '\n', '        members = {}\n', '        members = {"class": [], "function": [], "other": []}\n', '\n', '        for name, member in inspect.getmembers(cls):\n', '            if name.startswith("_"):\n', '                continue\n', '\n', '            if (classification := self.classify(member)) in {"class", "function", "other"}:\n', '                members[classification].append((name, member))\n', '\n', '        members["class"].sort()\n', '        for _, scls in members["class"]:\n', '            lines.append(self.doc_class(scls, name))\n', '\n', '        members["function"].sort()\n', '        for name, func in members["function"]:\n', '            lines.append(self.doc_func(func))\n', '\n', '        members["other"].sort()\n', '        for name, other in members["other"]:\n', '            lines.append(self.doc_other(name, other))\n', '\n', '        return "\\n".join(lines)\n', '\n', '    def doc_func(self, func):\n', '        lines = []\n', '\n', '        lines.append("```")\n', '        try:\n', '            lines.append(f"{func.__name__}{inspect.signature(func)}")\n', '        except ValueError:\n', '            lines.append(f"{func.__name__}(...)")\n', '\n', '        if (funcdoc := getattr(func, "__doc__")):\n', '            lines.append(f"\\n{textwrap.indent(funcdoc, \'  \')}\\n```\\n")\n', '        else:\n', '            lines.append("```\\n")\n', '\n', '        return "\\n".join(lines)\n', '\n', '    def doc_other(self, name, other):\n', '        doc = getattr(other, "__doc__", "")\n', '        if doc and doc != type(other).__doc__:\n', '            doc = textwrap.indent(doc, "  ")\n', '            doc += "\\n```\\n"\n', '        else:\n', '            doc = "```"\n', '\n', '        return f"```\\n{name} = {other!r}\\n{doc}"\n', '\n', '    def doc_mod(self, modname):\n', '        lines = []\n', '\n', '        try:\n', '            module = importlib.import_module(modname)\n', '        except ImportError:\n', '            return None\n', '\n', '        ispkg = (getattr(module, "__package__", "") == modname)\n', '\n', '        lines.append("=> _ Back to module index")\n', '        lines.append("=> _/search Go to module by name")\n', '        if "." in modname:\n', '            components = modname.split(".")\n', '            for i in range(len(components) - 1, 0, -1):\n', '                lines.append("=> " + ".".join(components[:i]))\n', '\n', '        if ispkg:\n', '            lines.append(f"# {modname} (package)")\n', '        else:\n', '            lines.append(f"# {modname}")\n', '\n', '        if (moddoc := getattr(module, "__doc__")):\n', '            lines.append(f"```\\n{moddoc}\\n```")\n', '        else:\n', '            lines.append("This module has no docstring.")\n', '\n', '        members = {"module": [], "class": [], "function": [], "other": []}\n', '        for name, member in inspect.getmembers(module):\n', '            if name.startswith("_"):\n', '                continue\n', '\n', '            members[self.classify(member)].append((name, member))\n', '\n', '        if members["class"]:\n', '            members["class"].sort()\n', '            lines.append("## Classes")\n', '            for name, cls in members["class"]:\n', '                lines.append(self.doc_class(cls))\n', '\n', '        if members["function"]:\n', '            members["function"].sort()\n', '            lines.append("## Functions")\n', '            for name, func in members["function"]:\n', '                lines.append(f"### {name}")\n', '                lines.append(self.doc_func(func))\n', '\n', '        if members["other"]:\n', '            lines.append("## Other members")\n', '            members["other"].sort()\n', '            for name, other in members["other"]:\n', '                lines.append(self.doc_other(name, other))\n', '\n', '        if members["module"]:\n', '            members["module"].sort()\n', '            lines.append("## Modules")\n', '            for name, mod in members["module"]:\n', '                lines.append(f"=> {mod.__name__} {name}")\n', '\n', '        return "\\n".join(lines)\n', '\n', '    def index(self):\n', '        lines = []\n', '\n', '        lines.append("=> _/search Go to module by name")\n', '\n', '        lines.append("# Built-in modules")\n', '        names = [name for name in sys.builtin_module_names if name != "__main__"]\n', '        for name in sorted(names):\n', '            lines.append(f"=> {name}")\n', '\n', '        lines.append("# Python modules")\n', '        for dirname in sorted(sys.path):\n', '            display = dirname\n', '            if display.startswith("/nix/store/"):\n', '                display = f"(nix)/{display[44:]}"\n', '\n', '            display = SITE_PACKAGES_RE.sub("l/p/s-p", display)\n', '            display = PYTHON3_RE.sub("p3", display)\n', '\n', '            modpkgs = []\n', '            for importer, name, ispkg in pkgutil.iter_modules([dirname]):\n', '                if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):\n', '                    # Ignore modules that contain surrogate characters\n', '                    # (pydoc does this)\n', '                    continue\n', '\n', '                if name == "setup":\n', '                    # never import "setup.py"\n', '                    continue\n', '\n', '                modpkgs.append((name, ispkg))\n', '\n', '            if modpkgs:\n', '                lines.append(f"## {display}")\n', '                for name, ispkg in sorted(modpkgs):\n', '                    if ispkg:\n', '                        lines.append(f"=> {name} {name} (package)")\n', '                    else:\n', '                        lines.append(f"=> {name}")\n', '\n', '        return "\\n".join(lines)\n', '\n', '\n', '    async def __call__(self, ctx):\n', '        path = ctx.path\n', '        if not path:\n', '            return Response(Status.REDIRECT_PERMANENT, ctx.orig_path + "/")\n', '\n', '        path = path.strip("/")\n', '        if not path or path == "_":\n', '            text = self.index()\n', '\n', '        elif path == "_/search":\n', '            if ctx.query:\n', '                try:\n', '                    importlib.import_module(ctx.query)\n', '                    return Response(Status.REDIRECT_TEMPORARY, "../" + ctx.query)\n', '                except ImportError:\n', '                    return Response(Status.INPUT, f"Sorry, I don\'t know about {ctx.query}. Module name?")\n', '\n', '            return Response(Status.INPUT, "Module name?")\n', '        else:\n', '            text = self.doc_mod(path)\n', '\n', '        if text is not None:\n', '            return Response(\n', '                Status.SUCCESS, "text/gemini", text.encode()\n', '            )\n', '\n', '        return Response(Status.NOT_FOUND, "text/gemini")\n'], '/nix/store/inim7ysrgjydvfwzdlh373mr5yg5993m-python3.9-amethyst_extensions-0.0.1/lib/python3.9/site-packages/amethyst_ext/pydoc.py'), '/nix/store/l466cxk1dkpn3jbx3wirraf8abfl183l-python3.9-pyparsing-2.4.7/lib/python3.9/site-packages/pyparsing.py': (273365, 1.0, ['# -*- coding: utf-8 -*-\n', '# module pyparsing.py\n', '#\n', '# Copyright (c) 2003-2019  Paul T. McGuire\n', '#\n', '# Permission is hereby granted, free of charge, to any person obtaining\n', '# a copy of this software and associated documentation files (the\n', '# "Software"), to deal in the Software without restriction, including\n', '# without limitation the rights to use, copy, modify, merge, publish,\n', '# distribute, sublicense, and/or sell copies of the Software, and to\n', '# permit persons to whom the Software is furnished to do so, subject to\n', '# the following conditions:\n', '#\n', '# The above copyright notice and this permission notice shall be\n', '# included in all copies or substantial portions of the Software.\n', '#\n', '# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\n', '# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n', '# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n', '# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n', '# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n', '# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n', '# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n', '#\n', '\n', '__doc__ = \\\n', '"""\n', 'pyparsing module - Classes and methods to define and execute parsing grammars\n', '=============================================================================\n', '\n', 'The pyparsing module is an alternative approach to creating and\n', 'executing simple grammars, vs. the traditional lex/yacc approach, or the\n', "use of regular expressions.  With pyparsing, you don't need to learn\n", 'a new syntax for defining grammars or matching expressions - the parsing\n', 'module provides a library of classes that you use to construct the\n', 'grammar directly in Python.\n', '\n', 'Here is a program to parse "Hello, World!" (or any greeting of the form\n', '``"<salutation>, <addressee>!"``), built up using :class:`Word`,\n', ':class:`Literal`, and :class:`And` elements\n', "(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,\n", 'and the strings are auto-converted to :class:`Literal` expressions)::\n', '\n', '    from pyparsing import Word, alphas\n', '\n', '    # define grammar of a greeting\n', '    greet = Word(alphas) + "," + Word(alphas) + "!"\n', '\n', '    hello = "Hello, World!"\n', '    print (hello, "->", greet.parseString(hello))\n', '\n', 'The program outputs the following::\n', '\n', "    Hello, World! -> ['Hello', ',', 'World', '!']\n", '\n', 'The Python representation of the grammar is quite readable, owing to the\n', "self-explanatory class names, and the use of '+', '|' and '^' operators.\n", '\n', 'The :class:`ParseResults` object returned from\n', ':class:`ParserElement.parseString` can be\n', 'accessed as a nested list, a dictionary, or an object with named\n', 'attributes.\n', '\n', 'The pyparsing module handles some of the problems that are typically\n', 'vexing when writing text parsers:\n', '\n', '  - extra or missing whitespace (the above program will also handle\n', '    "Hello,World!", "Hello  ,  World  !", etc.)\n', '  - quoted strings\n', '  - embedded comments\n', '\n', '\n', 'Getting Started -\n', '-----------------\n', 'Visit the classes :class:`ParserElement` and :class:`ParseResults` to\n', 'see the base classes that most other pyparsing\n', 'classes inherit from. Use the docstrings for examples of how to:\n', '\n', ' - construct literal match expressions from :class:`Literal` and\n', '   :class:`CaselessLiteral` classes\n', ' - construct character word-group expressions using the :class:`Word`\n', '   class\n', ' - see how to create repetitive expressions using :class:`ZeroOrMore`\n', '   and :class:`OneOrMore` classes\n', " - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,\n", "   and :class:`'&'<Each>` operators to combine simple expressions into\n", '   more complex ones\n', ' - associate names with your parsed results using\n', '   :class:`ParserElement.setResultsName`\n', ' - access the parsed data, which is returned as a :class:`ParseResults`\n', '   object\n', ' - find some helpful expression short-cuts like :class:`delimitedList`\n', '   and :class:`oneOf`\n', ' - find more useful common expressions in the :class:`pyparsing_common`\n', '   namespace class\n', '"""\n', '\n', '__version__ = "2.4.7"\n', '__versionTime__ = "30 Mar 2020 00:43 UTC"\n', '__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"\n', '\n', 'import string\n', 'from weakref import ref as wkref\n', 'import copy\n', 'import sys\n', 'import warnings\n', 'import re\n', 'import sre_constants\n', 'import collections\n', 'import pprint\n', 'import traceback\n', 'import types\n', 'from datetime import datetime\n', 'from operator import itemgetter\n', 'import itertools\n', 'from functools import wraps\n', 'from contextlib import contextmanager\n', '\n', 'try:\n', '    # Python 3\n', '    from itertools import filterfalse\n', 'except ImportError:\n', '    from itertools import ifilterfalse as filterfalse\n', '\n', 'try:\n', '    from _thread import RLock\n', 'except ImportError:\n', '    from threading import RLock\n', '\n', 'try:\n', '    # Python 3\n', '    from collections.abc import Iterable\n', '    from collections.abc import MutableMapping, Mapping\n', 'except ImportError:\n', '    # Python 2.7\n', '    from collections import Iterable\n', '    from collections import MutableMapping, Mapping\n', '\n', 'try:\n', '    from collections import OrderedDict as _OrderedDict\n', 'except ImportError:\n', '    try:\n', '        from ordereddict import OrderedDict as _OrderedDict\n', '    except ImportError:\n', '        _OrderedDict = None\n', '\n', 'try:\n', '    from types import SimpleNamespace\n', 'except ImportError:\n', '    class SimpleNamespace: pass\n', '\n', '# version compatibility configuration\n', '__compat__ = SimpleNamespace()\n', '__compat__.__doc__ = """\n', '    A cross-version compatibility configuration for pyparsing features that will be\n', '    released in a future version. By setting values in this configuration to True,\n', '    those features can be enabled in prior versions for compatibility development\n', '    and testing.\n', '\n', '     - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping\n', '       of results names when an And expression is nested within an Or or MatchFirst; set to\n', '       True to enable bugfix released in pyparsing 2.3.0, or False to preserve\n', '       pre-2.3.0 handling of named results\n', '"""\n', '__compat__.collect_all_And_tokens = True\n', '\n', '__diag__ = SimpleNamespace()\n', '__diag__.__doc__ = """\n', 'Diagnostic configuration (all default to False)\n', '     - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results\n', '       name is defined on a MatchFirst or Or expression with one or more And subexpressions\n', '       (only warns if __compat__.collect_all_And_tokens is False)\n', '     - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results\n', '       name is defined on a containing expression with ungrouped subexpressions that also\n', '       have results names\n', '     - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined\n', '       with a results name, but has no contents defined\n', '     - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is\n', '       incorrectly called with multiple str arguments\n', '     - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent\n', '       calls to ParserElement.setName()\n', '"""\n', '__diag__.warn_multiple_tokens_in_named_alternation = False\n', '__diag__.warn_ungrouped_named_tokens_in_collection = False\n', '__diag__.warn_name_set_on_empty_Forward = False\n', '__diag__.warn_on_multiple_string_args_to_oneof = False\n', '__diag__.enable_debug_on_named_expressions = False\n', '__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")]\n', '\n', 'def _enable_all_warnings():\n', '    __diag__.warn_multiple_tokens_in_named_alternation = True\n', '    __diag__.warn_ungrouped_named_tokens_in_collection = True\n', '    __diag__.warn_name_set_on_empty_Forward = True\n', '    __diag__.warn_on_multiple_string_args_to_oneof = True\n', '__diag__.enable_all_warnings = _enable_all_warnings\n', '\n', '\n', "__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__',\n", "           'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',\n", "           'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',\n", "           'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',\n", "           'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',\n", "           'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',\n", "           'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',\n", "           'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char',\n", "           'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',\n", "           'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',\n", "           'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',\n", "           'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',\n", "           'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',\n", "           'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',\n", "           'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',\n", "           'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',\n", "           'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',\n", "           'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass',\n", "           'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set',\n", "           'conditionAsParseAction', 're',\n", '           ]\n', '\n', 'system_version = tuple(sys.version_info)[:3]\n', 'PY_3 = system_version[0] == 3\n', 'if PY_3:\n', '    _MAX_INT = sys.maxsize\n', '    basestring = str\n', '    unichr = chr\n', '    unicode = str\n', '    _ustr = str\n', '\n', '    # build list of single arg builtins, that can be used as parse actions\n', '    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]\n', '\n', 'else:\n', '    _MAX_INT = sys.maxint\n', '    range = xrange\n', '\n', '    def _ustr(obj):\n', '        """Drop-in replacement for str(obj) that tries to be Unicode\n', '        friendly. It first tries str(obj). If that fails with\n', '        a UnicodeEncodeError, then it tries unicode(obj). It then\n', '        < returns the unicode object | encodes it with the default\n', '        encoding | ... >.\n', '        """\n', '        if isinstance(obj, unicode):\n', '            return obj\n', '\n', '        try:\n', '            # If this works, then _ustr(obj) has the same behaviour as str(obj), so\n', "            # it won't break any existing code.\n", '            return str(obj)\n', '\n', '        except UnicodeEncodeError:\n', '            # Else encode it\n', "            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')\n", "            xmlcharref = Regex(r'&#\\d+;')\n", "            xmlcharref.setParseAction(lambda t: '\\\\u' + hex(int(t[0][2:-1]))[2:])\n", '            return xmlcharref.transformString(ret)\n', '\n', '    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions\n', '    singleArgBuiltins = []\n', '    import __builtin__\n', '\n', '    for fname in "sum len sorted reversed list tuple set any all min max".split():\n', '        try:\n', '            singleArgBuiltins.append(getattr(__builtin__, fname))\n', '        except AttributeError:\n', '            continue\n', '\n', '_generatorType = type((y for y in range(1)))\n', '\n', 'def _xml_escape(data):\n', '    """Escape &, <, >, ", \', etc. in a string of data."""\n', '\n', '    # ampersand must be replaced first\n', '    from_symbols = \'&><"\\\'\'\n', '    to_symbols = (\'&\' + s + \';\' for s in "amp gt lt quot apos".split())\n', '    for from_, to_ in zip(from_symbols, to_symbols):\n', '        data = data.replace(from_, to_)\n', '    return data\n', '\n', 'alphas = string.ascii_uppercase + string.ascii_lowercase\n', 'nums = "0123456789"\n', 'hexnums = nums + "ABCDEFabcdef"\n', 'alphanums = alphas + nums\n', '_bslash = chr(92)\n', 'printables = "".join(c for c in string.printable if c not in string.whitespace)\n', '\n', '\n', 'def conditionAsParseAction(fn, message=None, fatal=False):\n', '    msg = message if message is not None else "failed user-defined condition"\n', '    exc_type = ParseFatalException if fatal else ParseException\n', '    fn = _trim_arity(fn)\n', '\n', '    @wraps(fn)\n', '    def pa(s, l, t):\n', '        if not bool(fn(s, l, t)):\n', '            raise exc_type(s, l, msg)\n', '\n', '    return pa\n', '\n', 'class ParseBaseException(Exception):\n', '    """base exception class for all parsing runtime exceptions"""\n', '    # Performance tuning: we construct a *lot* of these, so keep this\n', '    # constructor as small and fast as possible\n', '    def __init__(self, pstr, loc=0, msg=None, elem=None):\n', '        self.loc = loc\n', '        if msg is None:\n', '            self.msg = pstr\n', '            self.pstr = ""\n', '        else:\n', '            self.msg = msg\n', '            self.pstr = pstr\n', '        self.parserElement = elem\n', '        self.args = (pstr, loc, msg)\n', '\n', '    @classmethod\n', '    def _from_exception(cls, pe):\n', '        """\n', '        internal factory method to simplify creating one type of ParseException\n', '        from another - avoids having __init__ signature conflicts among subclasses\n', '        """\n', '        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)\n', '\n', '    def __getattr__(self, aname):\n', '        """supported attributes by name are:\n', '           - lineno - returns the line number of the exception text\n', '           - col - returns the column number of the exception text\n', '           - line - returns the line containing the exception text\n', '        """\n', '        if aname == "lineno":\n', '            return lineno(self.loc, self.pstr)\n', '        elif aname in ("col", "column"):\n', '            return col(self.loc, self.pstr)\n', '        elif aname == "line":\n', '            return line(self.loc, self.pstr)\n', '        else:\n', '            raise AttributeError(aname)\n', '\n', '    def __str__(self):\n', '        if self.pstr:\n', '            if self.loc >= len(self.pstr):\n', "                foundstr = ', found end of text'\n", '            else:\n', "                foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\\\', '\\\\')\n", '        else:\n', "            foundstr = ''\n", '        return ("%s%s  (at char %d), (line:%d, col:%d)" %\n', '                   (self.msg, foundstr, self.loc, self.lineno, self.column))\n', '    def __repr__(self):\n', '        return _ustr(self)\n', '    def markInputline(self, markerString=">!<"):\n', '        """Extracts the exception line from the input string, and marks\n', '           the location of the exception with a special symbol.\n', '        """\n', '        line_str = self.line\n', '        line_column = self.column - 1\n', '        if markerString:\n', '            line_str = "".join((line_str[:line_column],\n', '                                markerString, line_str[line_column:]))\n', '        return line_str.strip()\n', '    def __dir__(self):\n', '        return "lineno col line".split() + dir(type(self))\n', '\n', 'class ParseException(ParseBaseException):\n', '    """\n', "    Exception thrown when parse expressions don't match class;\n", '    supported attributes by name are:\n', '    - lineno - returns the line number of the exception text\n', '    - col - returns the column number of the exception text\n', '    - line - returns the line containing the exception text\n', '\n', '    Example::\n', '\n', '        try:\n', '            Word(nums).setName("integer").parseString("ABC")\n', '        except ParseException as pe:\n', '            print(pe)\n', '            print("column: {}".format(pe.col))\n', '\n', '    prints::\n', '\n', '       Expected integer (at char 0), (line:1, col:1)\n', '        column: 1\n', '\n', '    """\n', '\n', '    @staticmethod\n', '    def explain(exc, depth=16):\n', '        """\n', '        Method to take an exception and translate the Python internal traceback into a list\n', '        of the pyparsing expressions that caused the exception to be raised.\n', '\n', '        Parameters:\n', '\n', '         - exc - exception raised during parsing (need not be a ParseException, in support\n', '           of Python exceptions that might be raised in a parse action)\n', '         - depth (default=16) - number of levels back in the stack trace to list expression\n', '           and function names; if None, the full stack trace names will be listed; if 0, only\n', '           the failing input line, marker, and exception string will be shown\n', '\n', '        Returns a multi-line string listing the ParserElements and/or function names in the\n', "        exception's stack trace.\n", '\n', '        Note: the diagnostic output will include string representations of the expressions\n', '        that failed to parse. These representations will be more helpful if you use `setName` to\n', '        give identifiable names to your expressions. Otherwise they will use the default string\n', '        forms, which may be cryptic to read.\n', '\n', '        explain() is only supported under Python 3.\n', '        """\n', '        import inspect\n', '\n', '        if depth is None:\n', '            depth = sys.getrecursionlimit()\n', '        ret = []\n', '        if isinstance(exc, ParseBaseException):\n', '            ret.append(exc.line)\n', "            ret.append(' ' * (exc.col - 1) + '^')\n", '        ret.append("{0}: {1}".format(type(exc).__name__, exc))\n', '\n', '        if depth > 0:\n', '            callers = inspect.getinnerframes(exc.__traceback__, context=depth)\n', '            seen = set()\n', '            for i, ff in enumerate(callers[-depth:]):\n', '                frm = ff[0]\n', '\n', "                f_self = frm.f_locals.get('self', None)\n", '                if isinstance(f_self, ParserElement):\n', "                    if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'):\n", '                        continue\n', '                    if f_self in seen:\n', '                        continue\n', '                    seen.add(f_self)\n', '\n', '                    self_type = type(f_self)\n', '                    ret.append("{0}.{1} - {2}".format(self_type.__module__,\n', '                                                      self_type.__name__,\n', '                                                      f_self))\n', '                elif f_self is not None:\n', '                    self_type = type(f_self)\n', '                    ret.append("{0}.{1}".format(self_type.__module__,\n', '                                                self_type.__name__))\n', '                else:\n', '                    code = frm.f_code\n', "                    if code.co_name in ('wrapper', '<module>'):\n", '                        continue\n', '\n', '                    ret.append("{0}".format(code.co_name))\n', '\n', '                depth -= 1\n', '                if not depth:\n', '                    break\n', '\n', "        return '\\n'.join(ret)\n", '\n', '\n', 'class ParseFatalException(ParseBaseException):\n', '    """user-throwable exception thrown when inconsistent parse content\n', '       is found; stops all parsing immediately"""\n', '    pass\n', '\n', 'class ParseSyntaxException(ParseFatalException):\n', '    """just like :class:`ParseFatalException`, but thrown internally\n', "    when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates\n", '    that parsing is to stop immediately because an unbacktrackable\n', '    syntax error has been found.\n', '    """\n', '    pass\n', '\n', '#~ class ReparseException(ParseBaseException):\n', '    #~ """Experimental class - parse actions can raise this exception to cause\n', '       #~ pyparsing to reparse the input string:\n', '        #~ - with a modified input string, and/or\n', '        #~ - with a modified start location\n', '       #~ Set the values of the ReparseException in the constructor, and raise the\n', '       #~ exception in a parse action to cause pyparsing to use the new string/location.\n', '       #~ Setting the values as None causes no change to be made.\n', '       #~ """\n', '    #~ def __init_( self, newstring, restartLoc ):\n', '        #~ self.newParseText = newstring\n', '        #~ self.reparseLoc = restartLoc\n', '\n', 'class RecursiveGrammarException(Exception):\n', '    """exception thrown by :class:`ParserElement.validate` if the\n', '    grammar could be improperly recursive\n', '    """\n', '    def __init__(self, parseElementList):\n', '        self.parseElementTrace = parseElementList\n', '\n', '    def __str__(self):\n', '        return "RecursiveGrammarException: %s" % self.parseElementTrace\n', '\n', 'class _ParseResultsWithOffset(object):\n', '    def __init__(self, p1, p2):\n', '        self.tup = (p1, p2)\n', '    def __getitem__(self, i):\n', '        return self.tup[i]\n', '    def __repr__(self):\n', '        return repr(self.tup[0])\n', '    def setOffset(self, i):\n', '        self.tup = (self.tup[0], i)\n', '\n', 'class ParseResults(object):\n', '    """Structured parse results, to provide multiple means of access to\n', '    the parsed data:\n', '\n', '       - as a list (``len(results)``)\n', '       - by list index (``results[0], results[1]``, etc.)\n', '       - by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`)\n', '\n', '    Example::\n', '\n', '        integer = Word(nums)\n', '        date_str = (integer.setResultsName("year") + \'/\'\n', '                        + integer.setResultsName("month") + \'/\'\n', '                        + integer.setResultsName("day"))\n', '        # equivalent form:\n', '        # date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', '        # parseString returns a ParseResults object\n', '        result = date_str.parseString("1999/12/31")\n', '\n', '        def test(s, fn=repr):\n', '            print("%s -> %s" % (s, fn(eval(s))))\n', '        test("list(result)")\n', '        test("result[0]")\n', '        test("result[\'month\']")\n', '        test("result.day")\n', '        test("\'month\' in result")\n', '        test("\'minutes\' in result")\n', '        test("result.dump()", str)\n', '\n', '    prints::\n', '\n', "        list(result) -> ['1999', '/', '12', '/', '31']\n", "        result[0] -> '1999'\n", "        result['month'] -> '12'\n", "        result.day -> '31'\n", "        'month' in result -> True\n", "        'minutes' in result -> False\n", "        result.dump() -> ['1999', '/', '12', '/', '31']\n", '        - day: 31\n', '        - month: 12\n', '        - year: 1999\n', '    """\n', '    def __new__(cls, toklist=None, name=None, asList=True, modal=True):\n', '        if isinstance(toklist, cls):\n', '            return toklist\n', '        retobj = object.__new__(cls)\n', '        retobj.__doinit = True\n', '        return retobj\n', '\n', '    # Performance tuning: we construct a *lot* of these, so keep this\n', '    # constructor as small and fast as possible\n', '    def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance):\n', '        if self.__doinit:\n', '            self.__doinit = False\n', '            self.__name = None\n', '            self.__parent = None\n', '            self.__accumNames = {}\n', '            self.__asList = asList\n', '            self.__modal = modal\n', '            if toklist is None:\n', '                toklist = []\n', '            if isinstance(toklist, list):\n', '                self.__toklist = toklist[:]\n', '            elif isinstance(toklist, _generatorType):\n', '                self.__toklist = list(toklist)\n', '            else:\n', '                self.__toklist = [toklist]\n', '            self.__tokdict = dict()\n', '\n', '        if name is not None and name:\n', '            if not modal:\n', '                self.__accumNames[name] = 0\n', '            if isinstance(name, int):\n', '                name = _ustr(name)  # will always return a str, but use _ustr for consistency\n', '            self.__name = name\n', "            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])):\n", '                if isinstance(toklist, basestring):\n', '                    toklist = [toklist]\n', '                if asList:\n', '                    if isinstance(toklist, ParseResults):\n', '                        self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0)\n', '                    else:\n', '                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)\n', '                    self[name].__name = name\n', '                else:\n', '                    try:\n', '                        self[name] = toklist[0]\n', '                    except (KeyError, TypeError, IndexError):\n', '                        self[name] = toklist\n', '\n', '    def __getitem__(self, i):\n', '        if isinstance(i, (int, slice)):\n', '            return self.__toklist[i]\n', '        else:\n', '            if i not in self.__accumNames:\n', '                return self.__tokdict[i][-1][0]\n', '            else:\n', '                return ParseResults([v[0] for v in self.__tokdict[i]])\n', '\n', '    def __setitem__(self, k, v, isinstance=isinstance):\n', '        if isinstance(v, _ParseResultsWithOffset):\n', '            self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]\n', '            sub = v[0]\n', '        elif isinstance(k, (int, slice)):\n', '            self.__toklist[k] = v\n', '            sub = v\n', '        else:\n', '            self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)]\n', '            sub = v\n', '        if isinstance(sub, ParseResults):\n', '            sub.__parent = wkref(self)\n', '\n', '    def __delitem__(self, i):\n', '        if isinstance(i, (int, slice)):\n', '            mylen = len(self.__toklist)\n', '            del self.__toklist[i]\n', '\n', '            # convert int to slice\n', '            if isinstance(i, int):\n', '                if i < 0:\n', '                    i += mylen\n', '                i = slice(i, i + 1)\n', '            # get removed indices\n', '            removed = list(range(*i.indices(mylen)))\n', '            removed.reverse()\n', '            # fixup indices in token dictionary\n', '            for name, occurrences in self.__tokdict.items():\n', '                for j in removed:\n', '                    for k, (value, position) in enumerate(occurrences):\n', '                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))\n', '        else:\n', '            del self.__tokdict[i]\n', '\n', '    def __contains__(self, k):\n', '        return k in self.__tokdict\n', '\n', '    def __len__(self):\n', '        return len(self.__toklist)\n', '\n', '    def __bool__(self):\n', '        return (not not self.__toklist)\n', '    __nonzero__ = __bool__\n', '\n', '    def __iter__(self):\n', '        return iter(self.__toklist)\n', '\n', '    def __reversed__(self):\n', '        return iter(self.__toklist[::-1])\n', '\n', '    def _iterkeys(self):\n', '        if hasattr(self.__tokdict, "iterkeys"):\n', '            return self.__tokdict.iterkeys()\n', '        else:\n', '            return iter(self.__tokdict)\n', '\n', '    def _itervalues(self):\n', '        return (self[k] for k in self._iterkeys())\n', '\n', '    def _iteritems(self):\n', '        return ((k, self[k]) for k in self._iterkeys())\n', '\n', '    if PY_3:\n', '        keys = _iterkeys\n', '        """Returns an iterator of all named result keys."""\n', '\n', '        values = _itervalues\n', '        """Returns an iterator of all named result values."""\n', '\n', '        items = _iteritems\n', '        """Returns an iterator of all named result key-value tuples."""\n', '\n', '    else:\n', '        iterkeys = _iterkeys\n', '        """Returns an iterator of all named result keys (Python 2.x only)."""\n', '\n', '        itervalues = _itervalues\n', '        """Returns an iterator of all named result values (Python 2.x only)."""\n', '\n', '        iteritems = _iteritems\n', '        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""\n', '\n', '        def keys(self):\n', '            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""\n', '            return list(self.iterkeys())\n', '\n', '        def values(self):\n', '            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""\n', '            return list(self.itervalues())\n', '\n', '        def items(self):\n', '            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""\n', '            return list(self.iteritems())\n', '\n', '    def haskeys(self):\n', '        """Since keys() returns an iterator, this method is helpful in bypassing\n', '           code that looks for the existence of any defined results names."""\n', '        return bool(self.__tokdict)\n', '\n', '    def pop(self, *args, **kwargs):\n', '        """\n', '        Removes and returns item at specified index (default= ``last``).\n', '        Supports both ``list`` and ``dict`` semantics for ``pop()``. If\n', '        passed no argument or an integer argument, it will use ``list``\n', '        semantics and pop tokens from the list of parsed tokens. If passed\n', '        a non-integer argument (most likely a string), it will use ``dict``\n', '        semantics and pop the corresponding value from any defined results\n', '        names. A second default return value argument is supported, just as in\n', '        ``dict.pop()``.\n', '\n', '        Example::\n', '\n', '            def remove_first(tokens):\n', '                tokens.pop(0)\n', '            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> [\'0\', \'123\', \'321\']\n', '            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> [\'123\', \'321\']\n', '\n', '            label = Word(alphas)\n', '            patt = label("LABEL") + OneOrMore(Word(nums))\n', '            print(patt.parseString("AAB 123 321").dump())\n', '\n', '            # Use pop() in a parse action to remove named result (note that corresponding value is not\n', '            # removed from list form of results)\n', '            def remove_LABEL(tokens):\n', '                tokens.pop("LABEL")\n', '                return tokens\n', '            patt.addParseAction(remove_LABEL)\n', '            print(patt.parseString("AAB 123 321").dump())\n', '\n', '        prints::\n', '\n', "            ['AAB', '123', '321']\n", '            - LABEL: AAB\n', '\n', "            ['AAB', '123', '321']\n", '        """\n', '        if not args:\n', '            args = [-1]\n', '        for k, v in kwargs.items():\n', "            if k == 'default':\n", '                args = (args[0], v)\n', '            else:\n', '                raise TypeError("pop() got an unexpected keyword argument \'%s\'" % k)\n', '        if (isinstance(args[0], int)\n', '                or len(args) == 1\n', '                or args[0] in self):\n', '            index = args[0]\n', '            ret = self[index]\n', '            del self[index]\n', '            return ret\n', '        else:\n', '            defaultvalue = args[1]\n', '            return defaultvalue\n', '\n', '    def get(self, key, defaultValue=None):\n', '        """\n', '        Returns named result matching the given key, or if there is no\n', '        such name, then returns the given ``defaultValue`` or ``None`` if no\n', '        ``defaultValue`` is specified.\n', '\n', '        Similar to ``dict.get()``.\n', '\n', '        Example::\n', '\n', '            integer = Word(nums)\n', '            date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', '            result = date_str.parseString("1999/12/31")\n', '            print(result.get("year")) # -> \'1999\'\n', '            print(result.get("hour", "not specified")) # -> \'not specified\'\n', '            print(result.get("hour")) # -> None\n', '        """\n', '        if key in self:\n', '            return self[key]\n', '        else:\n', '            return defaultValue\n', '\n', '    def insert(self, index, insStr):\n', '        """\n', '        Inserts new element at location index in the list of parsed tokens.\n', '\n', '        Similar to ``list.insert()``.\n', '\n', '        Example::\n', '\n', '            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> [\'0\', \'123\', \'321\']\n', '\n', '            # use a parse action to insert the parse location in the front of the parsed results\n', '            def insert_locn(locn, tokens):\n', '                tokens.insert(0, locn)\n', '            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, \'0\', \'123\', \'321\']\n', '        """\n', '        self.__toklist.insert(index, insStr)\n', '        # fixup indices in token dictionary\n', '        for name, occurrences in self.__tokdict.items():\n', '            for k, (value, position) in enumerate(occurrences):\n', '                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))\n', '\n', '    def append(self, item):\n', '        """\n', '        Add single element to end of ParseResults list of elements.\n', '\n', '        Example::\n', '\n', '            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> [\'0\', \'123\', \'321\']\n', '\n', '            # use a parse action to compute the sum of the parsed integers, and add it to the end\n', '            def append_sum(tokens):\n', '                tokens.append(sum(map(int, tokens)))\n', '            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> [\'0\', \'123\', \'321\', 444]\n', '        """\n', '        self.__toklist.append(item)\n', '\n', '    def extend(self, itemseq):\n', '        """\n', '        Add sequence of elements to end of ParseResults list of elements.\n', '\n', '        Example::\n', '\n', '            patt = OneOrMore(Word(alphas))\n', '\n', '            # use a parse action to append the reverse of the matched strings, to make a palindrome\n', '            def make_palindrome(tokens):\n', '                tokens.extend(reversed([t[::-1] for t in tokens]))\n', "                return ''.join(tokens)\n", '            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> \'lskdjsdlkjflksddsklfjkldsjdksl\'\n', '        """\n', '        if isinstance(itemseq, ParseResults):\n', '            self.__iadd__(itemseq)\n', '        else:\n', '            self.__toklist.extend(itemseq)\n', '\n', '    def clear(self):\n', '        """\n', '        Clear all elements and results names.\n', '        """\n', '        del self.__toklist[:]\n', '        self.__tokdict.clear()\n', '\n', '    def __getattr__(self, name):\n', '        try:\n', '            return self[name]\n', '        except KeyError:\n', '            return ""\n', '\n', '    def __add__(self, other):\n', '        ret = self.copy()\n', '        ret += other\n', '        return ret\n', '\n', '    def __iadd__(self, other):\n', '        if other.__tokdict:\n', '            offset = len(self.__toklist)\n', '            addoffset = lambda a: offset if a < 0 else a + offset\n', '            otheritems = other.__tokdict.items()\n', '            otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))\n', '                              for k, vlist in otheritems for v in vlist]\n', '            for k, v in otherdictitems:\n', '                self[k] = v\n', '                if isinstance(v[0], ParseResults):\n', '                    v[0].__parent = wkref(self)\n', '\n', '        self.__toklist += other.__toklist\n', '        self.__accumNames.update(other.__accumNames)\n', '        return self\n', '\n', '    def __radd__(self, other):\n', '        if isinstance(other, int) and other == 0:\n', '            # useful for merging many ParseResults using sum() builtin\n', '            return self.copy()\n', '        else:\n', '            # this may raise a TypeError - so be it\n', '            return other + self\n', '\n', '    def __repr__(self):\n', '        return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict))\n', '\n', '    def __str__(self):\n', "        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'\n", '\n', "    def _asStringList(self, sep=''):\n", '        out = []\n', '        for item in self.__toklist:\n', '            if out and sep:\n', '                out.append(sep)\n', '            if isinstance(item, ParseResults):\n', '                out += item._asStringList()\n', '            else:\n', '                out.append(_ustr(item))\n', '        return out\n', '\n', '    def asList(self):\n', '        """\n', '        Returns the parse results as a nested list of matching tokens, all converted to strings.\n', '\n', '        Example::\n', '\n', '            patt = OneOrMore(Word(alphas))\n', '            result = patt.parseString("sldkj lsdkj sldkj")\n', '            # even though the result prints in string-like form, it is actually a pyparsing ParseResults\n', "            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']\n", '\n', '            # Use asList() to create an actual list\n', '            result_list = result.asList()\n', "            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']\n", '        """\n', '        return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist]\n', '\n', '    def asDict(self):\n', '        """\n', '        Returns the named parse results as a nested dictionary.\n', '\n', '        Example::\n', '\n', '            integer = Word(nums)\n', '            date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', "            result = date_str.parseString('12/31/1999')\n", "            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})\n", '\n', '            result_dict = result.asDict()\n', "            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}\n", '\n', '            # even though a ParseResults supports dict-like access, sometime you just need to have a dict\n', '            import json\n', '            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable\n', '            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}\n', '        """\n', '        if PY_3:\n', '            item_fn = self.items\n', '        else:\n', '            item_fn = self.iteritems\n', '\n', '        def toItem(obj):\n', '            if isinstance(obj, ParseResults):\n', '                if obj.haskeys():\n', '                    return obj.asDict()\n', '                else:\n', '                    return [toItem(v) for v in obj]\n', '            else:\n', '                return obj\n', '\n', '        return dict((k, toItem(v)) for k, v in item_fn())\n', '\n', '    def copy(self):\n', '        """\n', '        Returns a new copy of a :class:`ParseResults` object.\n', '        """\n', '        ret = ParseResults(self.__toklist)\n', '        ret.__tokdict = dict(self.__tokdict.items())\n', '        ret.__parent = self.__parent\n', '        ret.__accumNames.update(self.__accumNames)\n', '        ret.__name = self.__name\n', '        return ret\n', '\n', '    def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):\n', '        """\n', '        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.\n', '        """\n', '        nl = "\\n"\n', '        out = []\n', '        namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()\n', '                          for v in vlist)\n', '        nextLevelIndent = indent + "  "\n', '\n', '        # collapse out indents if formatting is not desired\n', '        if not formatted:\n', '            indent = ""\n', '            nextLevelIndent = ""\n', '            nl = ""\n', '\n', '        selfTag = None\n', '        if doctag is not None:\n', '            selfTag = doctag\n', '        else:\n', '            if self.__name:\n', '                selfTag = self.__name\n', '\n', '        if not selfTag:\n', '            if namedItemsOnly:\n', '                return ""\n', '            else:\n', '                selfTag = "ITEM"\n', '\n', '        out += [nl, indent, "<", selfTag, ">"]\n', '\n', '        for i, res in enumerate(self.__toklist):\n', '            if isinstance(res, ParseResults):\n', '                if i in namedItems:\n', '                    out += [res.asXML(namedItems[i],\n', '                                      namedItemsOnly and doctag is None,\n', '                                      nextLevelIndent,\n', '                                      formatted)]\n', '                else:\n', '                    out += [res.asXML(None,\n', '                                      namedItemsOnly and doctag is None,\n', '                                      nextLevelIndent,\n', '                                      formatted)]\n', '            else:\n', '                # individual token, see if there is a name for it\n', '                resTag = None\n', '                if i in namedItems:\n', '                    resTag = namedItems[i]\n', '                if not resTag:\n', '                    if namedItemsOnly:\n', '                        continue\n', '                    else:\n', '                        resTag = "ITEM"\n', '                xmlBodyText = _xml_escape(_ustr(res))\n', '                out += [nl, nextLevelIndent, "<", resTag, ">",\n', '                        xmlBodyText,\n', '                                                "</", resTag, ">"]\n', '\n', '        out += [nl, indent, "</", selfTag, ">"]\n', '        return "".join(out)\n', '\n', '    def __lookup(self, sub):\n', '        for k, vlist in self.__tokdict.items():\n', '            for v, loc in vlist:\n', '                if sub is v:\n', '                    return k\n', '        return None\n', '\n', '    def getName(self):\n', '        r"""\n', '        Returns the results name for this token expression. Useful when several\n', '        different expressions might match at a particular location.\n', '\n', '        Example::\n', '\n', '            integer = Word(nums)\n', '            ssn_expr = Regex(r"\\d\\d\\d-\\d\\d-\\d\\d\\d\\d")\n', "            house_number_expr = Suppress('#') + Word(nums, alphanums)\n", '            user_data = (Group(house_number_expr)("house_number")\n', '                        | Group(ssn_expr)("ssn")\n', '                        | Group(integer)("age"))\n', '            user_info = OneOrMore(user_data)\n', '\n', '            result = user_info.parseString("22 111-22-3333 #221B")\n', '            for item in result:\n', "                print(item.getName(), ':', item[0])\n", '\n', '        prints::\n', '\n', '            age : 22\n', '            ssn : 111-22-3333\n', '            house_number : 221B\n', '        """\n', '        if self.__name:\n', '            return self.__name\n', '        elif self.__parent:\n', '            par = self.__parent()\n', '            if par:\n', '                return par.__lookup(self)\n', '            else:\n', '                return None\n', '        elif (len(self) == 1\n', '              and len(self.__tokdict) == 1\n', '              and next(iter(self.__tokdict.values()))[0][1] in (0, -1)):\n', '            return next(iter(self.__tokdict.keys()))\n', '        else:\n', '            return None\n', '\n', "    def dump(self, indent='', full=True, include_list=True, _depth=0):\n", '        """\n', '        Diagnostic method for listing out the contents of\n', '        a :class:`ParseResults`. Accepts an optional ``indent`` argument so\n', '        that this string can be embedded in a nested display of other data.\n', '\n', '        Example::\n', '\n', '            integer = Word(nums)\n', '            date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', "            result = date_str.parseString('12/31/1999')\n", '            print(result.dump())\n', '\n', '        prints::\n', '\n', "            ['12', '/', '31', '/', '1999']\n", '            - day: 1999\n', '            - month: 31\n', '            - year: 12\n', '        """\n', '        out = []\n', "        NL = '\\n'\n", '        if include_list:\n', '            out.append(indent + _ustr(self.asList()))\n', '        else:\n', "            out.append('')\n", '\n', '        if full:\n', '            if self.haskeys():\n', '                items = sorted((str(k), v) for k, v in self.items())\n', '                for k, v in items:\n', '                    if out:\n', '                        out.append(NL)\n', '                    out.append("%s%s- %s: " % (indent, (\'  \' * _depth), k))\n', '                    if isinstance(v, ParseResults):\n', '                        if v:\n', '                            out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1))\n', '                        else:\n', '                            out.append(_ustr(v))\n', '                    else:\n', '                        out.append(repr(v))\n', '            elif any(isinstance(vv, ParseResults) for vv in self):\n', '                v = self\n', '                for i, vv in enumerate(v):\n', '                    if isinstance(vv, ParseResults):\n', '                        out.append("\\n%s%s[%d]:\\n%s%s%s" % (indent,\n', "                                                            ('  ' * (_depth)),\n", '                                                            i,\n', '                                                            indent,\n', "                                                            ('  ' * (_depth + 1)),\n", '                                                            vv.dump(indent=indent,\n', '                                                                    full=full,\n', '                                                                    include_list=include_list,\n', '                                                                    _depth=_depth + 1)))\n', '                    else:\n', '                        out.append("\\n%s%s[%d]:\\n%s%s%s" % (indent,\n', "                                                            ('  ' * (_depth)),\n", '                                                            i,\n', '                                                            indent,\n', "                                                            ('  ' * (_depth + 1)),\n", '                                                            _ustr(vv)))\n', '\n', '        return "".join(out)\n', '\n', '    def pprint(self, *args, **kwargs):\n', '        """\n', '        Pretty-printer for parsed results as a list, using the\n', '        `pprint <https://docs.python.org/3/library/pprint.html>`_ module.\n', '        Accepts additional positional or keyword args as defined for\n', '        `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .\n', '\n', '        Example::\n', '\n', '            ident = Word(alphas, alphanums)\n', '            num = Word(nums)\n', '            func = Forward()\n', "            term = ident | num | Group('(' + func + ')')\n", '            func <<= ident + Group(Optional(delimitedList(term)))\n', '            result = func.parseString("fna a,b,(fnb c,d,200),100")\n', '            result.pprint(width=40)\n', '\n', '        prints::\n', '\n', "            ['fna',\n", "             ['a',\n", "              'b',\n", "              ['(', 'fnb', ['c', 'd', '200'], ')'],\n", "              '100']]\n", '        """\n', '        pprint.pprint(self.asList(), *args, **kwargs)\n', '\n', '    # add support for pickle protocol\n', '    def __getstate__(self):\n', '        return (self.__toklist,\n', '                (self.__tokdict.copy(),\n', '                 self.__parent is not None and self.__parent() or None,\n', '                 self.__accumNames,\n', '                 self.__name))\n', '\n', '    def __setstate__(self, state):\n', '        self.__toklist = state[0]\n', '        self.__tokdict, par, inAccumNames, self.__name = state[1]\n', '        self.__accumNames = {}\n', '        self.__accumNames.update(inAccumNames)\n', '        if par is not None:\n', '            self.__parent = wkref(par)\n', '        else:\n', '            self.__parent = None\n', '\n', '    def __getnewargs__(self):\n', '        return self.__toklist, self.__name, self.__asList, self.__modal\n', '\n', '    def __dir__(self):\n', '        return dir(type(self)) + list(self.keys())\n', '\n', '    @classmethod\n', '    def from_dict(cls, other, name=None):\n', '        """\n', '        Helper classmethod to construct a ParseResults from a dict, preserving the\n', "        name-value relations as results names. If an optional 'name' argument is\n", '        given, a nested ParseResults will be returned\n', '        """\n', '        def is_iterable(obj):\n', '            try:\n', '                iter(obj)\n', '            except Exception:\n', '                return False\n', '            else:\n', '                if PY_3:\n', '                    return not isinstance(obj, (str, bytes))\n', '                else:\n', '                    return not isinstance(obj, basestring)\n', '\n', '        ret = cls([])\n', '        for k, v in other.items():\n', '            if isinstance(v, Mapping):\n', '                ret += cls.from_dict(v, name=k)\n', '            else:\n', '                ret += cls([v], name=k, asList=is_iterable(v))\n', '        if name is not None:\n', '            ret = cls([ret], name=name)\n', '        return ret\n', '\n', 'MutableMapping.register(ParseResults)\n', '\n', 'def col (loc, strg):\n', '    """Returns current column within a string, counting newlines as line separators.\n', '   The first column is number 1.\n', '\n', '   Note: the default parsing behavior is to expand tabs in the input string\n', '   before starting the parsing process.  See\n', '   :class:`ParserElement.parseString` for more\n', '   information on parsing strings containing ``<TAB>`` s, and suggested\n', '   methods to maintain a consistent view of the parsed string, the parse\n', '   location, and line and column positions within the parsed string.\n', '   """\n', '    s = strg\n', '    return 1 if 0 < loc < len(s) and s[loc-1] == \'\\n\' else loc - s.rfind("\\n", 0, loc)\n', '\n', 'def lineno(loc, strg):\n', '    """Returns current line number within a string, counting newlines as line separators.\n', '    The first line is number 1.\n', '\n', '    Note - the default parsing behavior is to expand tabs in the input string\n', '    before starting the parsing process.  See :class:`ParserElement.parseString`\n', '    for more information on parsing strings containing ``<TAB>`` s, and\n', '    suggested methods to maintain a consistent view of the parsed string, the\n', '    parse location, and line and column positions within the parsed string.\n', '    """\n', '    return strg.count("\\n", 0, loc) + 1\n', '\n', 'def line(loc, strg):\n', '    """Returns the line of text containing loc within a string, counting newlines as line separators.\n', '       """\n', '    lastCR = strg.rfind("\\n", 0, loc)\n', '    nextCR = strg.find("\\n", loc)\n', '    if nextCR >= 0:\n', '        return strg[lastCR + 1:nextCR]\n', '    else:\n', '        return strg[lastCR + 1:]\n', '\n', 'def _defaultStartDebugAction(instring, loc, expr):\n', '    print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring))))\n', '\n', 'def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):\n', '    print("Matched " + _ustr(expr) + " -> " + str(toks.asList()))\n', '\n', 'def _defaultExceptionDebugAction(instring, loc, expr, exc):\n', '    print("Exception raised:" + _ustr(exc))\n', '\n', 'def nullDebugAction(*args):\n', '    """\'Do-nothing\' debug action, to suppress debugging output during parsing."""\n', '    pass\n', '\n', '# Only works on Python 3.x - nonlocal is toxic to Python 2 installs\n', "#~ 'decorator to trim function calls to match the arity of the target'\n", '#~ def _trim_arity(func, maxargs=3):\n', '    #~ if func in singleArgBuiltins:\n', '        #~ return lambda s,l,t: func(t)\n', '    #~ limit = 0\n', '    #~ foundArity = False\n', '    #~ def wrapper(*args):\n', '        #~ nonlocal limit,foundArity\n', '        #~ while 1:\n', '            #~ try:\n', '                #~ ret = func(*args[limit:])\n', '                #~ foundArity = True\n', '                #~ return ret\n', '            #~ except TypeError:\n', '                #~ if limit == maxargs or foundArity:\n', '                    #~ raise\n', '                #~ limit += 1\n', '                #~ continue\n', '    #~ return wrapper\n', '\n', '# this version is Python 2.x-3.x cross-compatible\n', "'decorator to trim function calls to match the arity of the target'\n", 'def _trim_arity(func, maxargs=2):\n', '    if func in singleArgBuiltins:\n', '        return lambda s, l, t: func(t)\n', '    limit = [0]\n', '    foundArity = [False]\n', '\n', '    # traceback return data structure changed in Py3.5 - normalize back to plain tuples\n', '    if system_version[:2] >= (3, 5):\n', '        def extract_stack(limit=0):\n', '            # special handling for Python 3.5.0 - extra deep call stack by 1\n', '            offset = -3 if system_version == (3, 5, 0) else -2\n', '            frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]\n', '            return [frame_summary[:2]]\n', '        def extract_tb(tb, limit=0):\n', '            frames = traceback.extract_tb(tb, limit=limit)\n', '            frame_summary = frames[-1]\n', '            return [frame_summary[:2]]\n', '    else:\n', '        extract_stack = traceback.extract_stack\n', '        extract_tb = traceback.extract_tb\n', '\n', '    # synthesize what would be returned by traceback.extract_stack at the call to\n', "    # user's parse action 'func', so that we don't incur call penalty at parse time\n", '\n', '    LINE_DIFF = 6\n', '    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND\n', '    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!\n', '    this_line = extract_stack(limit=2)[-1]\n', '    pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)\n', '\n', '    def wrapper(*args):\n', '        while 1:\n', '            try:\n', '                ret = func(*args[limit[0]:])\n', '                foundArity[0] = True\n', '                return ret\n', '            except TypeError:\n', '                # re-raise TypeErrors if they did not come from our arity testing\n', '                if foundArity[0]:\n', '                    raise\n', '                else:\n', '                    try:\n', '                        tb = sys.exc_info()[-1]\n', '                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:\n', '                            raise\n', '                    finally:\n', '                        try:\n', '                            del tb\n', '                        except NameError:\n', '                            pass\n', '\n', '                if limit[0] <= maxargs:\n', '                    limit[0] += 1\n', '                    continue\n', '                raise\n', '\n', '    # copy func name to wrapper for sensible debug output\n', '    func_name = "<parse action>"\n', '    try:\n', "        func_name = getattr(func, '__name__',\n", "                            getattr(func, '__class__').__name__)\n", '    except Exception:\n', '        func_name = str(func)\n', '    wrapper.__name__ = func_name\n', '\n', '    return wrapper\n', '\n', '\n', 'class ParserElement(object):\n', '    """Abstract base level parser element class."""\n', '    DEFAULT_WHITE_CHARS = " \\n\\t\\r"\n', '    verbose_stacktrace = False\n', '\n', '    @staticmethod\n', '    def setDefaultWhitespaceChars(chars):\n', '        r"""\n', '        Overrides the default whitespace chars\n', '\n', '        Example::\n', '\n', '            # default whitespace chars are space, <TAB> and newline\n', '            OneOrMore(Word(alphas)).parseString("abc def\\nghi jkl")  # -> [\'abc\', \'def\', \'ghi\', \'jkl\']\n', '\n', '            # change to just treat newline as significant\n', '            ParserElement.setDefaultWhitespaceChars(" \\t")\n', '            OneOrMore(Word(alphas)).parseString("abc def\\nghi jkl")  # -> [\'abc\', \'def\']\n', '        """\n', '        ParserElement.DEFAULT_WHITE_CHARS = chars\n', '\n', '    @staticmethod\n', '    def inlineLiteralsUsing(cls):\n', '        """\n', '        Set class to be used for inclusion of string literals into a parser.\n', '\n', '        Example::\n', '\n', '            # default literal class used is Literal\n', '            integer = Word(nums)\n', '            date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', '            date_str.parseString("1999/12/31")  # -> [\'1999\', \'/\', \'12\', \'/\', \'31\']\n', '\n', '\n', '            # change to Suppress\n', '            ParserElement.inlineLiteralsUsing(Suppress)\n', '            date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', '            date_str.parseString("1999/12/31")  # -> [\'1999\', \'12\', \'31\']\n', '        """\n', '        ParserElement._literalStringClass = cls\n', '\n', '    @classmethod\n', '    def _trim_traceback(cls, tb):\n', '        while tb.tb_next:\n', '            tb = tb.tb_next\n', '        return tb\n', '\n', '    def __init__(self, savelist=False):\n', '        self.parseAction = list()\n', '        self.failAction = None\n', '        # ~ self.name = "<unknown>"  # don\'t define self.name, let subclasses try/except upcall\n', '        self.strRepr = None\n', '        self.resultsName = None\n', '        self.saveAsList = savelist\n', '        self.skipWhitespace = True\n', '        self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)\n', '        self.copyDefaultWhiteChars = True\n', '        self.mayReturnEmpty = False # used when checking for left-recursion\n', '        self.keepTabs = False\n', '        self.ignoreExprs = list()\n', '        self.debug = False\n', '        self.streamlined = False\n', "        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index\n", '        self.errmsg = ""\n', '        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)\n', '        self.debugActions = (None, None, None)  # custom debug actions\n', '        self.re = None\n', '        self.callPreparse = True # used to avoid redundant calls to preParse\n', '        self.callDuringTry = False\n', '\n', '    def copy(self):\n', '        """\n', '        Make a copy of this :class:`ParserElement`.  Useful for defining\n', '        different parse actions for the same parsing pattern, using copies of\n', '        the original parse element.\n', '\n', '        Example::\n', '\n', '            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))\n', '            integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K")\n', '            integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")\n', '\n', '            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))\n', '\n', '        prints::\n', '\n', '            [5120, 100, 655360, 268435456]\n', '\n', '        Equivalent form of ``expr.copy()`` is just ``expr()``::\n', '\n', '            integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")\n', '        """\n', '        cpy = copy.copy(self)\n', '        cpy.parseAction = self.parseAction[:]\n', '        cpy.ignoreExprs = self.ignoreExprs[:]\n', '        if self.copyDefaultWhiteChars:\n', '            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS\n', '        return cpy\n', '\n', '    def setName(self, name):\n', '        """\n', '        Define name for this expression, makes debugging and exception messages clearer.\n', '\n', '        Example::\n', '\n', '            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)\n', '            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)\n', '        """\n', '        self.name = name\n', '        self.errmsg = "Expected " + self.name\n', '        if __diag__.enable_debug_on_named_expressions:\n', '            self.setDebug()\n', '        return self\n', '\n', '    def setResultsName(self, name, listAllMatches=False):\n', '        """\n', '        Define name for referencing matching tokens as a nested attribute\n', '        of the returned parse results.\n', '        NOTE: this returns a *copy* of the original :class:`ParserElement` object;\n', '        this is so that the client can define a basic element, such as an\n', '        integer, and reference it in multiple places with different names.\n', '\n', '        You can also set results names using the abbreviated syntax,\n', '        ``expr("name")`` in place of ``expr.setResultsName("name")``\n', '        - see :class:`__call__`.\n', '\n', '        Example::\n', '\n', '            date_str = (integer.setResultsName("year") + \'/\'\n', '                        + integer.setResultsName("month") + \'/\'\n', '                        + integer.setResultsName("day"))\n', '\n', '            # equivalent form:\n', '            date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '        """\n', '        return self._setResultsName(name, listAllMatches)\n', '\n', '    def _setResultsName(self, name, listAllMatches=False):\n', '        newself = self.copy()\n', '        if name.endswith("*"):\n', '            name = name[:-1]\n', '            listAllMatches = True\n', '        newself.resultsName = name\n', '        newself.modalResults = not listAllMatches\n', '        return newself\n', '\n', '    def setBreak(self, breakFlag=True):\n', '        """Method to invoke the Python pdb debugger when this element is\n', '           about to be parsed. Set ``breakFlag`` to True to enable, False to\n', '           disable.\n', '        """\n', '        if breakFlag:\n', '            _parseMethod = self._parse\n', '            def breaker(instring, loc, doActions=True, callPreParse=True):\n', '                import pdb\n', '                # this call to pdb.set_trace() is intentional, not a checkin error\n', '                pdb.set_trace()\n', '                return _parseMethod(instring, loc, doActions, callPreParse)\n', '            breaker._originalParseMethod = _parseMethod\n', '            self._parse = breaker\n', '        else:\n', '            if hasattr(self._parse, "_originalParseMethod"):\n', '                self._parse = self._parse._originalParseMethod\n', '        return self\n', '\n', '    def setParseAction(self, *fns, **kwargs):\n', '        """\n', '        Define one or more actions to perform when successfully matching parse element definition.\n', '        Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` ,\n', '        ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:\n', '\n', '        - s   = the original string being parsed (see note below)\n', '        - loc = the location of the matching substring\n', '        - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object\n', '\n', '        If the functions in fns modify the tokens, they can return them as the return\n', '        value from fn, and the modified list of tokens will replace the original.\n', '        Otherwise, fn does not need to return any value.\n', '\n', '        If None is passed as the parse action, all previously added parse actions for this\n', '        expression are cleared.\n', '\n', '        Optional keyword arguments:\n', '        - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing\n', '\n', '        Note: the default parsing behavior is to expand tabs in the input string\n', '        before starting the parsing process.  See :class:`parseString for more\n', '        information on parsing strings containing ``<TAB>`` s, and suggested\n', '        methods to maintain a consistent view of the parsed string, the parse\n', '        location, and line and column positions within the parsed string.\n', '\n', '        Example::\n', '\n', '            integer = Word(nums)\n', "            date_str = integer + '/' + integer + '/' + integer\n", '\n', '            date_str.parseString("1999/12/31")  # -> [\'1999\', \'/\', \'12\', \'/\', \'31\']\n', '\n', '            # use parse action to convert to ints at parse time\n', '            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))\n', "            date_str = integer + '/' + integer + '/' + integer\n", '\n', '            # note that integer fields are now ints, not strings\n', '            date_str.parseString("1999/12/31")  # -> [1999, \'/\', 12, \'/\', 31]\n', '        """\n', '        if list(fns) == [None,]:\n', '            self.parseAction = []\n', '        else:\n', '            if not all(callable(fn) for fn in fns):\n', '                raise TypeError("parse actions must be callable")\n', '            self.parseAction = list(map(_trim_arity, list(fns)))\n', '            self.callDuringTry = kwargs.get("callDuringTry", False)\n', '        return self\n', '\n', '    def addParseAction(self, *fns, **kwargs):\n', '        """\n', "        Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`.\n", '\n', '        See examples in :class:`copy`.\n', '        """\n', '        self.parseAction += list(map(_trim_arity, list(fns)))\n', '        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)\n', '        return self\n', '\n', '    def addCondition(self, *fns, **kwargs):\n', '        """Add a boolean predicate function to expression\'s list of parse actions. See\n', '        :class:`setParseAction` for function call signatures. Unlike ``setParseAction``,\n', '        functions passed to ``addCondition`` need to return boolean success/fail of the condition.\n', '\n', '        Optional keyword arguments:\n', '        - message = define a custom message to be used in the raised exception\n', '        - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException\n', '\n', '        Example::\n', '\n', '            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))\n', '            year_int = integer.copy()\n', '            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")\n', "            date_str = year_int + '/' + integer + '/' + integer\n", '\n', '            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)\n', '        """\n', '        for fn in fns:\n', "            self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'),\n", "                                                           fatal=kwargs.get('fatal', False)))\n", '\n', '        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)\n', '        return self\n', '\n', '    def setFailAction(self, fn):\n', '        """Define action to perform if parsing fails at this expression.\n', '           Fail acton fn is a callable function that takes the arguments\n', '           ``fn(s, loc, expr, err)`` where:\n', '           - s = string being parsed\n', '           - loc = location where expression match was attempted and failed\n', '           - expr = the parse expression that failed\n', '           - err = the exception thrown\n', '           The function returns no value.  It may throw :class:`ParseFatalException`\n', '           if it is desired to stop parsing immediately."""\n', '        self.failAction = fn\n', '        return self\n', '\n', '    def _skipIgnorables(self, instring, loc):\n', '        exprsFound = True\n', '        while exprsFound:\n', '            exprsFound = False\n', '            for e in self.ignoreExprs:\n', '                try:\n', '                    while 1:\n', '                        loc, dummy = e._parse(instring, loc)\n', '                        exprsFound = True\n', '                except ParseException:\n', '                    pass\n', '        return loc\n', '\n', '    def preParse(self, instring, loc):\n', '        if self.ignoreExprs:\n', '            loc = self._skipIgnorables(instring, loc)\n', '\n', '        if self.skipWhitespace:\n', '            wt = self.whiteChars\n', '            instrlen = len(instring)\n', '            while loc < instrlen and instring[loc] in wt:\n', '                loc += 1\n', '\n', '        return loc\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        return loc, []\n', '\n', '    def postParse(self, instring, loc, tokenlist):\n', '        return tokenlist\n', '\n', '    # ~ @profile\n', '    def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):\n', '        TRY, MATCH, FAIL = 0, 1, 2\n', '        debugging = (self.debug)  # and doActions)\n', '\n', '        if debugging or self.failAction:\n', '            # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring)))\n', '            if self.debugActions[TRY]:\n', '                self.debugActions[TRY](instring, loc, self)\n', '            try:\n', '                if callPreParse and self.callPreparse:\n', '                    preloc = self.preParse(instring, loc)\n', '                else:\n', '                    preloc = loc\n', '                tokensStart = preloc\n', '                if self.mayIndexError or preloc >= len(instring):\n', '                    try:\n', '                        loc, tokens = self.parseImpl(instring, preloc, doActions)\n', '                    except IndexError:\n', '                        raise ParseException(instring, len(instring), self.errmsg, self)\n', '                else:\n', '                    loc, tokens = self.parseImpl(instring, preloc, doActions)\n', '            except Exception as err:\n', '                # ~ print ("Exception raised:", err)\n', '                if self.debugActions[FAIL]:\n', '                    self.debugActions[FAIL](instring, tokensStart, self, err)\n', '                if self.failAction:\n', '                    self.failAction(instring, tokensStart, self, err)\n', '                raise\n', '        else:\n', '            if callPreParse and self.callPreparse:\n', '                preloc = self.preParse(instring, loc)\n', '            else:\n', '                preloc = loc\n', '            tokensStart = preloc\n', '            if self.mayIndexError or preloc >= len(instring):\n', '                try:\n', '                    loc, tokens = self.parseImpl(instring, preloc, doActions)\n', '                except IndexError:\n', '                    raise ParseException(instring, len(instring), self.errmsg, self)\n', '            else:\n', '                loc, tokens = self.parseImpl(instring, preloc, doActions)\n', '\n', '        tokens = self.postParse(instring, loc, tokens)\n', '\n', '        retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)\n', '        if self.parseAction and (doActions or self.callDuringTry):\n', '            if debugging:\n', '                try:\n', '                    for fn in self.parseAction:\n', '                        try:\n', '                            tokens = fn(instring, tokensStart, retTokens)\n', '                        except IndexError as parse_action_exc:\n', '                            exc = ParseException("exception raised in parse action")\n', '                            exc.__cause__ = parse_action_exc\n', '                            raise exc\n', '\n', '                        if tokens is not None and tokens is not retTokens:\n', '                            retTokens = ParseResults(tokens,\n', '                                                      self.resultsName,\n', '                                                      asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),\n', '                                                      modal=self.modalResults)\n', '                except Exception as err:\n', '                    # ~ print "Exception raised in user parse action:", err\n', '                    if self.debugActions[FAIL]:\n', '                        self.debugActions[FAIL](instring, tokensStart, self, err)\n', '                    raise\n', '            else:\n', '                for fn in self.parseAction:\n', '                    try:\n', '                        tokens = fn(instring, tokensStart, retTokens)\n', '                    except IndexError as parse_action_exc:\n', '                        exc = ParseException("exception raised in parse action")\n', '                        exc.__cause__ = parse_action_exc\n', '                        raise exc\n', '\n', '                    if tokens is not None and tokens is not retTokens:\n', '                        retTokens = ParseResults(tokens,\n', '                                                  self.resultsName,\n', '                                                  asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),\n', '                                                  modal=self.modalResults)\n', '        if debugging:\n', '            # ~ print ("Matched", self, "->", retTokens.asList())\n', '            if self.debugActions[MATCH]:\n', '                self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens)\n', '\n', '        return loc, retTokens\n', '\n', '    def tryParse(self, instring, loc):\n', '        try:\n', '            return self._parse(instring, loc, doActions=False)[0]\n', '        except ParseFatalException:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '    def canParseNext(self, instring, loc):\n', '        try:\n', '            self.tryParse(instring, loc)\n', '        except (ParseException, IndexError):\n', '            return False\n', '        else:\n', '            return True\n', '\n', '    class _UnboundedCache(object):\n', '        def __init__(self):\n', '            cache = {}\n', '            self.not_in_cache = not_in_cache = object()\n', '\n', '            def get(self, key):\n', '                return cache.get(key, not_in_cache)\n', '\n', '            def set(self, key, value):\n', '                cache[key] = value\n', '\n', '            def clear(self):\n', '                cache.clear()\n', '\n', '            def cache_len(self):\n', '                return len(cache)\n', '\n', '            self.get = types.MethodType(get, self)\n', '            self.set = types.MethodType(set, self)\n', '            self.clear = types.MethodType(clear, self)\n', '            self.__len__ = types.MethodType(cache_len, self)\n', '\n', '    if _OrderedDict is not None:\n', '        class _FifoCache(object):\n', '            def __init__(self, size):\n', '                self.not_in_cache = not_in_cache = object()\n', '\n', '                cache = _OrderedDict()\n', '\n', '                def get(self, key):\n', '                    return cache.get(key, not_in_cache)\n', '\n', '                def set(self, key, value):\n', '                    cache[key] = value\n', '                    while len(cache) > size:\n', '                        try:\n', '                            cache.popitem(False)\n', '                        except KeyError:\n', '                            pass\n', '\n', '                def clear(self):\n', '                    cache.clear()\n', '\n', '                def cache_len(self):\n', '                    return len(cache)\n', '\n', '                self.get = types.MethodType(get, self)\n', '                self.set = types.MethodType(set, self)\n', '                self.clear = types.MethodType(clear, self)\n', '                self.__len__ = types.MethodType(cache_len, self)\n', '\n', '    else:\n', '        class _FifoCache(object):\n', '            def __init__(self, size):\n', '                self.not_in_cache = not_in_cache = object()\n', '\n', '                cache = {}\n', '                key_fifo = collections.deque([], size)\n', '\n', '                def get(self, key):\n', '                    return cache.get(key, not_in_cache)\n', '\n', '                def set(self, key, value):\n', '                    cache[key] = value\n', '                    while len(key_fifo) > size:\n', '                        cache.pop(key_fifo.popleft(), None)\n', '                    key_fifo.append(key)\n', '\n', '                def clear(self):\n', '                    cache.clear()\n', '                    key_fifo.clear()\n', '\n', '                def cache_len(self):\n', '                    return len(cache)\n', '\n', '                self.get = types.MethodType(get, self)\n', '                self.set = types.MethodType(set, self)\n', '                self.clear = types.MethodType(clear, self)\n', '                self.__len__ = types.MethodType(cache_len, self)\n', '\n', '    # argument cache for optimizing repeated calls when backtracking through recursive expressions\n', "    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail\n", '    packrat_cache_lock = RLock()\n', '    packrat_cache_stats = [0, 0]\n', '\n', '    # this method gets repeatedly called during backtracking with the same arguments -\n', '    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression\n', '    def _parseCache(self, instring, loc, doActions=True, callPreParse=True):\n', '        HIT, MISS = 0, 1\n', '        lookup = (self, instring, loc, callPreParse, doActions)\n', '        with ParserElement.packrat_cache_lock:\n', '            cache = ParserElement.packrat_cache\n', '            value = cache.get(lookup)\n', '            if value is cache.not_in_cache:\n', '                ParserElement.packrat_cache_stats[MISS] += 1\n', '                try:\n', '                    value = self._parseNoCache(instring, loc, doActions, callPreParse)\n', '                except ParseBaseException as pe:\n', '                    # cache a copy of the exception, without the traceback\n', '                    cache.set(lookup, pe.__class__(*pe.args))\n', '                    raise\n', '                else:\n', '                    cache.set(lookup, (value[0], value[1].copy()))\n', '                    return value\n', '            else:\n', '                ParserElement.packrat_cache_stats[HIT] += 1\n', '                if isinstance(value, Exception):\n', '                    raise value\n', '                return value[0], value[1].copy()\n', '\n', '    _parse = _parseNoCache\n', '\n', '    @staticmethod\n', '    def resetCache():\n', '        ParserElement.packrat_cache.clear()\n', '        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)\n', '\n', '    _packratEnabled = False\n', '    @staticmethod\n', '    def enablePackrat(cache_size_limit=128):\n', '        """Enables "packrat" parsing, which adds memoizing to the parsing logic.\n', '           Repeated parse attempts at the same string location (which happens\n', '           often in many complex grammars) can immediately return a cached value,\n', '           instead of re-executing parsing/validating code.  Memoizing is done of\n', '           both valid results and parsing exceptions.\n', '\n', '           Parameters:\n', '\n', '           - cache_size_limit - (default= ``128``) - if an integer value is provided\n', '             will limit the size of the packrat cache; if None is passed, then\n', '             the cache size will be unbounded; if 0 is passed, the cache will\n', '             be effectively disabled.\n', '\n', '           This speedup may break existing programs that use parse actions that\n', '           have side-effects.  For this reason, packrat parsing is disabled when\n', '           you first import pyparsing.  To activate the packrat feature, your\n', '           program must call the class method :class:`ParserElement.enablePackrat`.\n', '           For best results, call ``enablePackrat()`` immediately after\n', '           importing pyparsing.\n', '\n', '           Example::\n', '\n', '               import pyparsing\n', '               pyparsing.ParserElement.enablePackrat()\n', '        """\n', '        if not ParserElement._packratEnabled:\n', '            ParserElement._packratEnabled = True\n', '            if cache_size_limit is None:\n', '                ParserElement.packrat_cache = ParserElement._UnboundedCache()\n', '            else:\n', '                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)\n', '            ParserElement._parse = ParserElement._parseCache\n', '\n', '    def parseString(self, instring, parseAll=False):\n', '        """\n', '        Execute the parse expression with the given string.\n', '        This is the main interface to the client code, once the complete\n', '        expression has been built.\n', '\n', '        Returns the parsed data as a :class:`ParseResults` object, which may be\n', '        accessed as a list, or as a dict or object with attributes if the given parser\n', '        includes results names.\n', '\n', '        If you want the grammar to require that the entire input string be\n', '        successfully parsed, then set ``parseAll`` to True (equivalent to ending\n', '        the grammar with ``StringEnd()``).\n', '\n', '        Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,\n', '        in order to report proper column numbers in parse actions.\n', '        If the input string contains tabs and\n', '        the grammar uses parse actions that use the ``loc`` argument to index into the\n', '        string being parsed, you can ensure you have a consistent view of the input\n', '        string by:\n', '\n', '        - calling ``parseWithTabs`` on your grammar before calling ``parseString``\n', '          (see :class:`parseWithTabs`)\n', '        - define your parse action using the full ``(s, loc, toks)`` signature, and\n', "          reference the input string using the parse action's ``s`` argument\n", '        - explictly expand the tabs in your input string before calling\n', '          ``parseString``\n', '\n', '        Example::\n', '\n', "            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']\n", "            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text\n", '        """\n', '        ParserElement.resetCache()\n', '        if not self.streamlined:\n', '            self.streamline()\n', '            # ~ self.saveAsList = True\n', '        for e in self.ignoreExprs:\n', '            e.streamline()\n', '        if not self.keepTabs:\n', '            instring = instring.expandtabs()\n', '        try:\n', '            loc, tokens = self._parse(instring, 0)\n', '            if parseAll:\n', '                loc = self.preParse(instring, loc)\n', '                se = Empty() + StringEnd()\n', '                se._parse(instring, loc)\n', '        except ParseBaseException as exc:\n', '            if ParserElement.verbose_stacktrace:\n', '                raise\n', '            else:\n', '                # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', "                if getattr(exc, '__traceback__', None) is not None:\n", '                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', '                raise exc\n', '        else:\n', '            return tokens\n', '\n', '    def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):\n', '        """\n', '        Scan the input string for expression matches.  Each match will return the\n', '        matching tokens, start location, and end location.  May be called with optional\n', "        ``maxMatches`` argument, to clip scanning after 'n' matches are found.  If\n", '        ``overlap`` is specified, then overlapping matches will be reported.\n', '\n', '        Note that the start and end locations are reported relative to the string\n', '        being parsed.  See :class:`parseString` for more information on parsing\n', '        strings with embedded tabs.\n', '\n', '        Example::\n', '\n', '            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"\n', '            print(source)\n', '            for tokens, start, end in Word(alphas).scanString(source):\n', "                print(' '*start + '^'*(end-start))\n", "                print(' '*start + tokens[0])\n", '\n', '        prints::\n', '\n', '            sldjf123lsdjjkf345sldkjf879lkjsfd987\n', '            ^^^^^\n', '            sldjf\n', '                    ^^^^^^^\n', '                    lsdjjkf\n', '                              ^^^^^^\n', '                              sldkjf\n', '                                       ^^^^^^\n', '                                       lkjsfd\n', '        """\n', '        if not self.streamlined:\n', '            self.streamline()\n', '        for e in self.ignoreExprs:\n', '            e.streamline()\n', '\n', '        if not self.keepTabs:\n', '            instring = _ustr(instring).expandtabs()\n', '        instrlen = len(instring)\n', '        loc = 0\n', '        preparseFn = self.preParse\n', '        parseFn = self._parse\n', '        ParserElement.resetCache()\n', '        matches = 0\n', '        try:\n', '            while loc <= instrlen and matches < maxMatches:\n', '                try:\n', '                    preloc = preparseFn(instring, loc)\n', '                    nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)\n', '                except ParseException:\n', '                    loc = preloc + 1\n', '                else:\n', '                    if nextLoc > loc:\n', '                        matches += 1\n', '                        yield tokens, preloc, nextLoc\n', '                        if overlap:\n', '                            nextloc = preparseFn(instring, loc)\n', '                            if nextloc > loc:\n', '                                loc = nextLoc\n', '                            else:\n', '                                loc += 1\n', '                        else:\n', '                            loc = nextLoc\n', '                    else:\n', '                        loc = preloc + 1\n', '        except ParseBaseException as exc:\n', '            if ParserElement.verbose_stacktrace:\n', '                raise\n', '            else:\n', '                # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', "                if getattr(exc, '__traceback__', None) is not None:\n", '                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', '                raise exc\n', '\n', '    def transformString(self, instring):\n', '        """\n', '        Extension to :class:`scanString`, to modify matching text with modified tokens that may\n', '        be returned from a parse action.  To use ``transformString``, define a grammar and\n', '        attach a parse action to it that modifies the returned token list.\n', '        Invoking ``transformString()`` on a target string will then scan for matches,\n', '        and replace the matched text patterns according to the logic in the parse\n', '        action.  ``transformString()`` returns the resulting transformed string.\n', '\n', '        Example::\n', '\n', '            wd = Word(alphas)\n', '            wd.setParseAction(lambda toks: toks[0].title())\n', '\n', '            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))\n', '\n', '        prints::\n', '\n', '            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.\n', '        """\n', '        out = []\n', '        lastE = 0\n', '        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to\n', '        # keep string locs straight between transformString and scanString\n', '        self.keepTabs = True\n', '        try:\n', '            for t, s, e in self.scanString(instring):\n', '                out.append(instring[lastE:s])\n', '                if t:\n', '                    if isinstance(t, ParseResults):\n', '                        out += t.asList()\n', '                    elif isinstance(t, list):\n', '                        out += t\n', '                    else:\n', '                        out.append(t)\n', '                lastE = e\n', '            out.append(instring[lastE:])\n', '            out = [o for o in out if o]\n', '            return "".join(map(_ustr, _flatten(out)))\n', '        except ParseBaseException as exc:\n', '            if ParserElement.verbose_stacktrace:\n', '                raise\n', '            else:\n', '                # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', "                if getattr(exc, '__traceback__', None) is not None:\n", '                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', '                raise exc\n', '\n', '    def searchString(self, instring, maxMatches=_MAX_INT):\n', '        """\n', '        Another extension to :class:`scanString`, simplifying the access to the tokens found\n', '        to match the given parse expression.  May be called with optional\n', "        ``maxMatches`` argument, to clip searching after 'n' matches are found.\n", '\n', '        Example::\n', '\n', '            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters\n', '            cap_word = Word(alphas.upper(), alphas.lower())\n', '\n', '            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))\n', '\n', '            # the sum() builtin can be used to merge results into a single ParseResults object\n', '            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))\n', '\n', '        prints::\n', '\n', "            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]\n", "            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']\n", '        """\n', '        try:\n', '            return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)])\n', '        except ParseBaseException as exc:\n', '            if ParserElement.verbose_stacktrace:\n', '                raise\n', '            else:\n', '                # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', "                if getattr(exc, '__traceback__', None) is not None:\n", '                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', '                raise exc\n', '\n', '    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):\n', '        """\n', '        Generator method to split a string using the given expression as a separator.\n', '        May be called with optional ``maxsplit`` argument, to limit the number of splits;\n', '        and the optional ``includeSeparators`` argument (default= ``False``), if the separating\n', '        matching text should be included in the split results.\n', '\n', '        Example::\n', '\n', '            punc = oneOf(list(".,;:/-!?"))\n', '            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))\n', '\n', '        prints::\n', '\n', "            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']\n", '        """\n', '        splits = 0\n', '        last = 0\n', '        for t, s, e in self.scanString(instring, maxMatches=maxsplit):\n', '            yield instring[last:s]\n', '            if includeSeparators:\n', '                yield t[0]\n', '            last = e\n', '        yield instring[last:]\n', '\n', '    def __add__(self, other):\n', '        """\n', '        Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement\n', '        converts them to :class:`Literal`s by default.\n', '\n', '        Example::\n', '\n', '            greet = Word(alphas) + "," + Word(alphas) + "!"\n', '            hello = "Hello, World!"\n', '            print (hello, "->", greet.parseString(hello))\n', '\n', '        prints::\n', '\n', "            Hello, World! -> ['Hello', ',', 'World', '!']\n", '\n', '        ``...`` may be used as a parse expression as a short form of :class:`SkipTo`.\n', '\n', "            Literal('start') + ... + Literal('end')\n", '\n', '        is equivalent to:\n', '\n', '            Literal(\'start\') + SkipTo(\'end\')("_skipped*") + Literal(\'end\')\n', '\n', "        Note that the skipped text is returned with '_skipped' as a results name,\n", '        and to support having multiple skips in the same parser, the value returned is\n', '        a list of all skipped text.\n', '        """\n', '        if other is Ellipsis:\n', '            return _PendingSkip(self)\n', '\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return And([self, other])\n', '\n', '    def __radd__(self, other):\n', '        """\n', '        Implementation of + operator when left operand is not a :class:`ParserElement`\n', '        """\n', '        if other is Ellipsis:\n', '            return SkipTo(self)("_skipped*") + self\n', '\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return other + self\n', '\n', '    def __sub__(self, other):\n', '        """\n', '        Implementation of - operator, returns :class:`And` with error stop\n', '        """\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return self + And._ErrorStop() + other\n', '\n', '    def __rsub__(self, other):\n', '        """\n', '        Implementation of - operator when left operand is not a :class:`ParserElement`\n', '        """\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return other - self\n', '\n', '    def __mul__(self, other):\n', '        """\n', '        Implementation of * operator, allows use of ``expr * 3`` in place of\n', '        ``expr + expr + expr``.  Expressions may also me multiplied by a 2-integer\n', '        tuple, similar to ``{min, max}`` multipliers in regular expressions.  Tuples\n', '        may also include ``None`` as in:\n', '         - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent\n', '              to ``expr*n + ZeroOrMore(expr)``\n', '              (read as "at least n instances of ``expr``")\n', '         - ``expr*(None, n)`` is equivalent to ``expr*(0, n)``\n', '              (read as "0 to n instances of ``expr``")\n', '         - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``\n', '         - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``\n', '\n', '        Note that ``expr*(None, n)`` does not raise an exception if\n', '        more than n exprs exist in the input stream; that is,\n', '        ``expr*(None, n)`` does not enforce a maximum number of expr\n', '        occurrences.  If this behavior is desired, then write\n', '        ``expr*(None, n) + ~expr``\n', '        """\n', '        if other is Ellipsis:\n', '            other = (0, None)\n', '        elif isinstance(other, tuple) and other[:1] == (Ellipsis,):\n', '            other = ((0, ) + other[1:] + (None,))[:2]\n', '\n', '        if isinstance(other, int):\n', '            minElements, optElements = other, 0\n', '        elif isinstance(other, tuple):\n', '            other = tuple(o if o is not Ellipsis else None for o in other)\n', '            other = (other + (None, None))[:2]\n', '            if other[0] is None:\n', '                other = (0, other[1])\n', '            if isinstance(other[0], int) and other[1] is None:\n', '                if other[0] == 0:\n', '                    return ZeroOrMore(self)\n', '                if other[0] == 1:\n', '                    return OneOrMore(self)\n', '                else:\n', '                    return self * other[0] + ZeroOrMore(self)\n', '            elif isinstance(other[0], int) and isinstance(other[1], int):\n', '                minElements, optElements = other\n', '                optElements -= minElements\n', '            else:\n', '                raise TypeError("cannot multiply \'ParserElement\' and (\'%s\', \'%s\') objects", type(other[0]), type(other[1]))\n', '        else:\n', '            raise TypeError("cannot multiply \'ParserElement\' and \'%s\' objects", type(other))\n', '\n', '        if minElements < 0:\n', '            raise ValueError("cannot multiply ParserElement by negative value")\n', '        if optElements < 0:\n', '            raise ValueError("second tuple value must be greater or equal to first tuple value")\n', '        if minElements == optElements == 0:\n', '            raise ValueError("cannot multiply ParserElement by 0 or (0, 0)")\n', '\n', '        if optElements:\n', '            def makeOptionalList(n):\n', '                if n > 1:\n', '                    return Optional(self + makeOptionalList(n - 1))\n', '                else:\n', '                    return Optional(self)\n', '            if minElements:\n', '                if minElements == 1:\n', '                    ret = self + makeOptionalList(optElements)\n', '                else:\n', '                    ret = And([self] * minElements) + makeOptionalList(optElements)\n', '            else:\n', '                ret = makeOptionalList(optElements)\n', '        else:\n', '            if minElements == 1:\n', '                ret = self\n', '            else:\n', '                ret = And([self] * minElements)\n', '        return ret\n', '\n', '    def __rmul__(self, other):\n', '        return self.__mul__(other)\n', '\n', '    def __or__(self, other):\n', '        """\n', '        Implementation of | operator - returns :class:`MatchFirst`\n', '        """\n', '        if other is Ellipsis:\n', '            return _PendingSkip(self, must_skip=True)\n', '\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return MatchFirst([self, other])\n', '\n', '    def __ror__(self, other):\n', '        """\n', '        Implementation of | operator when left operand is not a :class:`ParserElement`\n', '        """\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return other | self\n', '\n', '    def __xor__(self, other):\n', '        """\n', '        Implementation of ^ operator - returns :class:`Or`\n', '        """\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return Or([self, other])\n', '\n', '    def __rxor__(self, other):\n', '        """\n', '        Implementation of ^ operator when left operand is not a :class:`ParserElement`\n', '        """\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return other ^ self\n', '\n', '    def __and__(self, other):\n', '        """\n', '        Implementation of & operator - returns :class:`Each`\n', '        """\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return Each([self, other])\n', '\n', '    def __rand__(self, other):\n', '        """\n', '        Implementation of & operator when left operand is not a :class:`ParserElement`\n', '        """\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        if not isinstance(other, ParserElement):\n', '            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', '                          SyntaxWarning, stacklevel=2)\n', '            return None\n', '        return other & self\n', '\n', '    def __invert__(self):\n', '        """\n', '        Implementation of ~ operator - returns :class:`NotAny`\n', '        """\n', '        return NotAny(self)\n', '\n', '    def __iter__(self):\n', '        # must implement __iter__ to override legacy use of sequential access to __getitem__ to\n', '        # iterate over a sequence\n', "        raise TypeError('%r object is not iterable' % self.__class__.__name__)\n", '\n', '    def __getitem__(self, key):\n', '        """\n', '        use ``[]`` indexing notation as a short form for expression repetition:\n', '         - ``expr[n]`` is equivalent to ``expr*n``\n', '         - ``expr[m, n]`` is equivalent to ``expr*(m, n)``\n', '         - ``expr[n, ...]`` or ``expr[n,]`` is equivalent\n', '              to ``expr*n + ZeroOrMore(expr)``\n', '              (read as "at least n instances of ``expr``")\n', '         - ``expr[..., n]`` is equivalent to ``expr*(0, n)``\n', '              (read as "0 to n instances of ``expr``")\n', '         - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``\n', '         - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``\n', '         ``None`` may be used in place of ``...``.\n', '\n', '        Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception\n', '        if more than ``n`` ``expr``s exist in the input stream.  If this behavior is\n', '        desired, then write ``expr[..., n] + ~expr``.\n', '       """\n', '\n', '        # convert single arg keys to tuples\n', '        try:\n', '            if isinstance(key, str):\n', '                key = (key,)\n', '            iter(key)\n', '        except TypeError:\n', '            key = (key, key)\n', '\n', '        if len(key) > 2:\n', '            warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5],\n', "                                                                                '... [{0}]'.format(len(key))\n", "                                                                                if len(key) > 5 else ''))\n", '\n', '        # clip to 2 elements\n', '        ret = self * tuple(key[:2])\n', '        return ret\n', '\n', '    def __call__(self, name=None):\n', '        """\n', '        Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.\n', '\n', "        If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be\n", '        passed as ``True``.\n', '\n', '        If ``name` is omitted, same as calling :class:`copy`.\n', '\n', '        Example::\n', '\n', '            # these are equivalent\n', '            userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno")\n', '            userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")\n', '        """\n', '        if name is not None:\n', '            return self._setResultsName(name)\n', '        else:\n', '            return self.copy()\n', '\n', '    def suppress(self):\n', '        """\n', '        Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from\n', '        cluttering up returned output.\n', '        """\n', '        return Suppress(self)\n', '\n', '    def leaveWhitespace(self):\n', '        """\n', '        Disables the skipping of whitespace before matching the characters in the\n', "        :class:`ParserElement`'s defined pattern.  This is normally only used internally by\n", '        the pyparsing module, but may be needed in some whitespace-sensitive grammars.\n', '        """\n', '        self.skipWhitespace = False\n', '        return self\n', '\n', '    def setWhitespaceChars(self, chars):\n', '        """\n', '        Overrides the default whitespace chars\n', '        """\n', '        self.skipWhitespace = True\n', '        self.whiteChars = chars\n', '        self.copyDefaultWhiteChars = False\n', '        return self\n', '\n', '    def parseWithTabs(self):\n', '        """\n', '        Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string.\n', '        Must be called before ``parseString`` when the input grammar contains elements that\n', '        match ``<TAB>`` characters.\n', '        """\n', '        self.keepTabs = True\n', '        return self\n', '\n', '    def ignore(self, other):\n', '        """\n', '        Define expression to be ignored (e.g., comments) while doing pattern\n', '        matching; may be called repeatedly, to define multiple comment or other\n', '        ignorable patterns.\n', '\n', '        Example::\n', '\n', '            patt = OneOrMore(Word(alphas))\n', "            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']\n", '\n', '            patt.ignore(cStyleComment)\n', "            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']\n", '        """\n', '        if isinstance(other, basestring):\n', '            other = Suppress(other)\n', '\n', '        if isinstance(other, Suppress):\n', '            if other not in self.ignoreExprs:\n', '                self.ignoreExprs.append(other)\n', '        else:\n', '            self.ignoreExprs.append(Suppress(other.copy()))\n', '        return self\n', '\n', '    def setDebugActions(self, startAction, successAction, exceptionAction):\n', '        """\n', '        Enable display of debugging messages while doing pattern matching.\n', '        """\n', '        self.debugActions = (startAction or _defaultStartDebugAction,\n', '                             successAction or _defaultSuccessDebugAction,\n', '                             exceptionAction or _defaultExceptionDebugAction)\n', '        self.debug = True\n', '        return self\n', '\n', '    def setDebug(self, flag=True):\n', '        """\n', '        Enable display of debugging messages while doing pattern matching.\n', '        Set ``flag`` to True to enable, False to disable.\n', '\n', '        Example::\n', '\n', '            wd = Word(alphas).setName("alphaword")\n', '            integer = Word(nums).setName("numword")\n', '            term = wd | integer\n', '\n', '            # turn on debugging for wd\n', '            wd.setDebug()\n', '\n', '            OneOrMore(term).parseString("abc 123 xyz 890")\n', '\n', '        prints::\n', '\n', '            Match alphaword at loc 0(1,1)\n', "            Matched alphaword -> ['abc']\n", '            Match alphaword at loc 3(1,4)\n', '            Exception raised:Expected alphaword (at char 4), (line:1, col:5)\n', '            Match alphaword at loc 7(1,8)\n', "            Matched alphaword -> ['xyz']\n", '            Match alphaword at loc 11(1,12)\n', '            Exception raised:Expected alphaword (at char 12), (line:1, col:13)\n', '            Match alphaword at loc 15(1,16)\n', '            Exception raised:Expected alphaword (at char 15), (line:1, col:16)\n', '\n', '        The output shown is that produced by the default debug actions - custom debug actions can be\n', '        specified using :class:`setDebugActions`. Prior to attempting\n', '        to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``\n', '        is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``\n', '        message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression,\n', '        which makes debugging and exception messages easier to understand - for instance, the default\n', '        name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``.\n', '        """\n', '        if flag:\n', '            self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)\n', '        else:\n', '            self.debug = False\n', '        return self\n', '\n', '    def __str__(self):\n', '        return self.name\n', '\n', '    def __repr__(self):\n', '        return _ustr(self)\n', '\n', '    def streamline(self):\n', '        self.streamlined = True\n', '        self.strRepr = None\n', '        return self\n', '\n', '    def checkRecursion(self, parseElementList):\n', '        pass\n', '\n', '    def validate(self, validateTrace=None):\n', '        """\n', '        Check defined expressions for valid structure, check for infinite recursive definitions.\n', '        """\n', '        self.checkRecursion([])\n', '\n', '    def parseFile(self, file_or_filename, parseAll=False):\n', '        """\n', '        Execute the parse expression on the given file or filename.\n', '        If a filename is specified (instead of a file object),\n', '        the entire file is opened, read, and closed before parsing.\n', '        """\n', '        try:\n', '            file_contents = file_or_filename.read()\n', '        except AttributeError:\n', '            with open(file_or_filename, "r") as f:\n', '                file_contents = f.read()\n', '        try:\n', '            return self.parseString(file_contents, parseAll)\n', '        except ParseBaseException as exc:\n', '            if ParserElement.verbose_stacktrace:\n', '                raise\n', '            else:\n', '                # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', "                if getattr(exc, '__traceback__', None) is not None:\n", '                    exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', '                raise exc\n', '\n', '    def __eq__(self, other):\n', '        if self is other:\n', '            return True\n', '        elif isinstance(other, basestring):\n', '            return self.matches(other)\n', '        elif isinstance(other, ParserElement):\n', '            return vars(self) == vars(other)\n', '        return False\n', '\n', '    def __ne__(self, other):\n', '        return not (self == other)\n', '\n', '    def __hash__(self):\n', '        return id(self)\n', '\n', '    def __req__(self, other):\n', '        return self == other\n', '\n', '    def __rne__(self, other):\n', '        return not (self == other)\n', '\n', '    def matches(self, testString, parseAll=True):\n', '        """\n', '        Method for quick testing of a parser against a test string. Good for simple\n', '        inline microtests of sub expressions while building up larger parser.\n', '\n', '        Parameters:\n', '         - testString - to test against this expression for a match\n', '         - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests\n', '\n', '        Example::\n', '\n', '            expr = Word(nums)\n', '            assert expr.matches("100")\n', '        """\n', '        try:\n', '            self.parseString(_ustr(testString), parseAll=parseAll)\n', '            return True\n', '        except ParseBaseException:\n', '            return False\n', '\n', "    def runTests(self, tests, parseAll=True, comment='#',\n", '                 fullDump=True, printResults=True, failureTests=False, postParse=None,\n', '                 file=None):\n', '        """\n', '        Execute the parse expression on a series of test strings, showing each\n', '        test, the parsed results or where the parse failed. Quick and easy way to\n', '        run a parse expression against a list of sample strings.\n', '\n', '        Parameters:\n', '         - tests - a list of separate test strings, or a multiline string of test strings\n', '         - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests\n', "         - comment - (default= ``'#'``) - expression for indicating embedded comments in the test\n", '              string; pass None to disable comment filtering\n', '         - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline;\n', '              if False, only dump nested list\n', '         - printResults - (default= ``True``) prints test output to stdout\n', '         - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing\n', '         - postParse - (default= ``None``) optional callback for successful parse results; called as\n', '              `fn(test_string, parse_results)` and returns a string to be added to the test output\n', '         - file - (default=``None``) optional file-like object to which test output will be written;\n', '              if None, will default to ``sys.stdout``\n', '\n', '        Returns: a (success, results) tuple, where success indicates that all tests succeeded\n', '        (or failed if ``failureTests`` is True), and the results contain a list of lines of each\n', "        test's output\n", '\n', '        Example::\n', '\n', '            number_expr = pyparsing_common.number.copy()\n', '\n', "            result = number_expr.runTests('''\n", '                # unsigned integer\n', '                100\n', '                # negative integer\n', '                -100\n', '                # float with scientific notation\n', '                6.02e23\n', '                # integer with scientific notation\n', '                1e-12\n', "                ''')\n", '            print("Success" if result[0] else "Failed!")\n', '\n', "            result = number_expr.runTests('''\n", '                # stray character\n', '                100Z\n', "                # missing leading digit before '.'\n", '                -.100\n', "                # too many '.'\n", '                3.14.159\n', "                ''', failureTests=True)\n", '            print("Success" if result[0] else "Failed!")\n', '\n', '        prints::\n', '\n', '            # unsigned integer\n', '            100\n', '            [100]\n', '\n', '            # negative integer\n', '            -100\n', '            [-100]\n', '\n', '            # float with scientific notation\n', '            6.02e23\n', '            [6.02e+23]\n', '\n', '            # integer with scientific notation\n', '            1e-12\n', '            [1e-12]\n', '\n', '            Success\n', '\n', '            # stray character\n', '            100Z\n', '               ^\n', '            FAIL: Expected end of text (at char 3), (line:1, col:4)\n', '\n', "            # missing leading digit before '.'\n", '            -.100\n', '            ^\n', '            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)\n', '\n', "            # too many '.'\n", '            3.14.159\n', '                ^\n', '            FAIL: Expected end of text (at char 4), (line:1, col:5)\n', '\n', '            Success\n', '\n', '        Each test string must be on a single line. If you want to test a string that spans multiple\n', '        lines, create a test like this::\n', '\n', '            expr.runTest(r"this is a test\\\\n of strings that spans \\\\n 3 lines")\n', '\n', "        (Note that this is a raw string literal, you must include the leading 'r'.)\n", '        """\n', '        if isinstance(tests, basestring):\n', '            tests = list(map(str.strip, tests.rstrip().splitlines()))\n', '        if isinstance(comment, basestring):\n', '            comment = Literal(comment)\n', '        if file is None:\n', '            file = sys.stdout\n', '        print_ = file.write\n', '\n', '        allResults = []\n', '        comments = []\n', '        success = True\n', "        NL = Literal(r'\\n').addParseAction(replaceWith('\\n')).ignore(quotedString)\n", "        BOM = u'\\ufeff'\n", '        for t in tests:\n', '            if comment is not None and comment.matches(t, False) or comments and not t:\n', '                comments.append(t)\n', '                continue\n', '            if not t:\n', '                continue\n', "            out = ['\\n' + '\\n'.join(comments) if comments else '', t]\n", '            comments = []\n', '            try:\n', '                # convert newline marks to actual newlines, and strip leading BOM if present\n', '                t = NL.transformString(t.lstrip(BOM))\n', '                result = self.parseString(t, parseAll=parseAll)\n', '            except ParseBaseException as pe:\n', '                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""\n', "                if '\\n' in t:\n", '                    out.append(line(pe.loc, t))\n', "                    out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal)\n", '                else:\n', "                    out.append(' ' * pe.loc + '^' + fatal)\n", '                out.append("FAIL: " + str(pe))\n', '                success = success and failureTests\n', '                result = pe\n', '            except Exception as exc:\n', '                out.append("FAIL-EXCEPTION: " + str(exc))\n', '                success = success and failureTests\n', '                result = exc\n', '            else:\n', '                success = success and not failureTests\n', '                if postParse is not None:\n', '                    try:\n', '                        pp_value = postParse(t, result)\n', '                        if pp_value is not None:\n', '                            if isinstance(pp_value, ParseResults):\n', '                                out.append(pp_value.dump())\n', '                            else:\n', '                                out.append(str(pp_value))\n', '                        else:\n', '                            out.append(result.dump())\n', '                    except Exception as e:\n', '                        out.append(result.dump(full=fullDump))\n', '                        out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e))\n', '                else:\n', '                    out.append(result.dump(full=fullDump))\n', '\n', '            if printResults:\n', '                if fullDump:\n', "                    out.append('')\n", "                print_('\\n'.join(out))\n", '\n', '            allResults.append((t, result))\n', '\n', '        return success, allResults\n', '\n', '\n', 'class _PendingSkip(ParserElement):\n', "    # internal placeholder class to hold a place were '...' is added to a parser element,\n", '    # once another ParserElement is added, this placeholder will be replaced with a SkipTo\n', '    def __init__(self, expr, must_skip=False):\n', '        super(_PendingSkip, self).__init__()\n', "        self.strRepr = str(expr + Empty()).replace('Empty', '...')\n", '        self.name = self.strRepr\n', '        self.anchor = expr\n', '        self.must_skip = must_skip\n', '\n', '    def __add__(self, other):\n', '        skipper = SkipTo(other).setName("...")("_skipped*")\n', '        if self.must_skip:\n', '            def must_skip(t):\n', "                if not t._skipped or t._skipped.asList() == ['']:\n", '                    del t[0]\n', '                    t.pop("_skipped", None)\n', '            def show_skip(t):\n', "                if t._skipped.asList()[-1:] == ['']:\n", "                    skipped = t.pop('_skipped')\n", "                    t['_skipped'] = 'missing <' + repr(self.anchor) + '>'\n", '            return (self.anchor + skipper().addParseAction(must_skip)\n', '                    | skipper().addParseAction(show_skip)) + other\n', '\n', '        return self.anchor + skipper + other\n', '\n', '    def __repr__(self):\n', '        return self.strRepr\n', '\n', '    def parseImpl(self, *args):\n', '        raise Exception("use of `...` expression without following SkipTo target expression")\n', '\n', '\n', 'class Token(ParserElement):\n', '    """Abstract :class:`ParserElement` subclass, for defining atomic\n', '    matching patterns.\n', '    """\n', '    def __init__(self):\n', '        super(Token, self).__init__(savelist=False)\n', '\n', '\n', 'class Empty(Token):\n', '    """An empty token, will always match.\n', '    """\n', '    def __init__(self):\n', '        super(Empty, self).__init__()\n', '        self.name = "Empty"\n', '        self.mayReturnEmpty = True\n', '        self.mayIndexError = False\n', '\n', '\n', 'class NoMatch(Token):\n', '    """A token that will never match.\n', '    """\n', '    def __init__(self):\n', '        super(NoMatch, self).__init__()\n', '        self.name = "NoMatch"\n', '        self.mayReturnEmpty = True\n', '        self.mayIndexError = False\n', '        self.errmsg = "Unmatchable token"\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '\n', 'class Literal(Token):\n', '    """Token to exactly match a specified string.\n', '\n', '    Example::\n', '\n', "        Literal('blah').parseString('blah')  # -> ['blah']\n", "        Literal('blah').parseString('blahfooblah')  # -> ['blah']\n", '        Literal(\'blah\').parseString(\'bla\')  # -> Exception: Expected "blah"\n', '\n', '    For case-insensitive matching, use :class:`CaselessLiteral`.\n', '\n', '    For keyword matching (force word break before and after the matched string),\n', '    use :class:`Keyword` or :class:`CaselessKeyword`.\n', '    """\n', '    def __init__(self, matchString):\n', '        super(Literal, self).__init__()\n', '        self.match = matchString\n', '        self.matchLen = len(matchString)\n', '        try:\n', '            self.firstMatchChar = matchString[0]\n', '        except IndexError:\n', '            warnings.warn("null string passed to Literal; use Empty() instead",\n', '                            SyntaxWarning, stacklevel=2)\n', '            self.__class__ = Empty\n', '        self.name = \'"%s"\' % _ustr(self.match)\n', '        self.errmsg = "Expected " + self.name\n', '        self.mayReturnEmpty = False\n', '        self.mayIndexError = False\n', '\n', '        # Performance tuning: modify __class__ to select\n', '        # a parseImpl optimized for single-character check\n', '        if self.matchLen == 1 and type(self) is Literal:\n', '            self.__class__ = _SingleCharLiteral\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc):\n', '            return loc + self.matchLen, self.match\n', '        raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class _SingleCharLiteral(Literal):\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if instring[loc] == self.firstMatchChar:\n', '            return loc + 1, self.match\n', '        raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '_L = Literal\n', 'ParserElement._literalStringClass = Literal\n', '\n', 'class Keyword(Token):\n', '    """Token to exactly match a specified string as a keyword, that is,\n', '    it must be immediately followed by a non-keyword character.  Compare\n', '    with :class:`Literal`:\n', '\n', '     - ``Literal("if")`` will match the leading ``\'if\'`` in\n', "       ``'ifAndOnlyIf'``.\n", '     - ``Keyword("if")`` will not; it will only match the leading\n', "       ``'if'`` in ``'if x=1'``, or ``'if(y==2)'``\n", '\n', '    Accepts two optional constructor arguments in addition to the\n', '    keyword string:\n', '\n', '     - ``identChars`` is a string of characters that would be valid\n', '       identifier characters, defaulting to all alphanumerics + "_" and\n', '       "$"\n', '     - ``caseless`` allows case-insensitive matching, default is ``False``.\n', '\n', '    Example::\n', '\n', '        Keyword("start").parseString("start")  # -> [\'start\']\n', '        Keyword("start").parseString("starting")  # -> Exception\n', '\n', '    For case-insensitive matching, use :class:`CaselessKeyword`.\n', '    """\n', '    DEFAULT_KEYWORD_CHARS = alphanums + "_$"\n', '\n', '    def __init__(self, matchString, identChars=None, caseless=False):\n', '        super(Keyword, self).__init__()\n', '        if identChars is None:\n', '            identChars = Keyword.DEFAULT_KEYWORD_CHARS\n', '        self.match = matchString\n', '        self.matchLen = len(matchString)\n', '        try:\n', '            self.firstMatchChar = matchString[0]\n', '        except IndexError:\n', '            warnings.warn("null string passed to Keyword; use Empty() instead",\n', '                          SyntaxWarning, stacklevel=2)\n', '        self.name = \'"%s"\' % self.match\n', '        self.errmsg = "Expected " + self.name\n', '        self.mayReturnEmpty = False\n', '        self.mayIndexError = False\n', '        self.caseless = caseless\n', '        if caseless:\n', '            self.caselessmatch = matchString.upper()\n', '            identChars = identChars.upper()\n', '        self.identChars = set(identChars)\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if self.caseless:\n', '            if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch)\n', '                    and (loc >= len(instring) - self.matchLen\n', '                         or instring[loc + self.matchLen].upper() not in self.identChars)\n', '                    and (loc == 0\n', '                         or instring[loc - 1].upper() not in self.identChars)):\n', '                return loc + self.matchLen, self.match\n', '\n', '        else:\n', '            if instring[loc] == self.firstMatchChar:\n', '                if ((self.matchLen == 1 or instring.startswith(self.match, loc))\n', '                        and (loc >= len(instring) - self.matchLen\n', '                             or instring[loc + self.matchLen] not in self.identChars)\n', '                        and (loc == 0 or instring[loc - 1] not in self.identChars)):\n', '                    return loc + self.matchLen, self.match\n', '\n', '        raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '    def copy(self):\n', '        c = super(Keyword, self).copy()\n', '        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS\n', '        return c\n', '\n', '    @staticmethod\n', '    def setDefaultKeywordChars(chars):\n', '        """Overrides the default Keyword chars\n', '        """\n', '        Keyword.DEFAULT_KEYWORD_CHARS = chars\n', '\n', 'class CaselessLiteral(Literal):\n', '    """Token to match a specified string, ignoring case of letters.\n', '    Note: the matched results will always be in the case of the given\n', '    match string, NOT the case of the input text.\n', '\n', '    Example::\n', '\n', '        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> [\'CMD\', \'CMD\', \'CMD\']\n', '\n', '    (Contrast with example for :class:`CaselessKeyword`.)\n', '    """\n', '    def __init__(self, matchString):\n', '        super(CaselessLiteral, self).__init__(matchString.upper())\n', '        # Preserve the defining literal.\n', '        self.returnString = matchString\n', '        self.name = "\'%s\'" % self.returnString\n', '        self.errmsg = "Expected " + self.name\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if instring[loc:loc + self.matchLen].upper() == self.match:\n', '            return loc + self.matchLen, self.returnString\n', '        raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class CaselessKeyword(Keyword):\n', '    """\n', '    Caseless version of :class:`Keyword`.\n', '\n', '    Example::\n', '\n', '        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> [\'CMD\', \'CMD\']\n', '\n', '    (Contrast with example for :class:`CaselessLiteral`.)\n', '    """\n', '    def __init__(self, matchString, identChars=None):\n', '        super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)\n', '\n', 'class CloseMatch(Token):\n', '    """A variation on :class:`Literal` which matches "close" matches,\n', "    that is, strings with at most 'n' mismatching characters.\n", '    :class:`CloseMatch` takes parameters:\n', '\n', '     - ``match_string`` - string to be matched\n', '     - ``maxMismatches`` - (``default=1``) maximum number of\n', '       mismatches allowed to count as a match\n', '\n', '    The results from a successful parse will contain the matched text\n', '    from the input string and the following named results:\n', '\n', '     - ``mismatches`` - a list of the positions within the\n', '       match_string where mismatches were found\n', '     - ``original`` - the original match_string used to compare\n', '       against the input string\n', '\n', '    If ``mismatches`` is an empty list, then the match was an exact\n', '    match.\n', '\n', '    Example::\n', '\n', '        patt = CloseMatch("ATCATCGAATGGA")\n', '        patt.parseString("ATCATCGAAXGGA") # -> ([\'ATCATCGAAXGGA\'], {\'mismatches\': [[9]], \'original\': [\'ATCATCGAATGGA\']})\n', '        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected \'ATCATCGAATGGA\' (with up to 1 mismatches) (at char 0), (line:1, col:1)\n', '\n', '        # exact match\n', '        patt.parseString("ATCATCGAATGGA") # -> ([\'ATCATCGAATGGA\'], {\'mismatches\': [[]], \'original\': [\'ATCATCGAATGGA\']})\n', '\n', '        # close match allowing up to 2 mismatches\n', '        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)\n', '        patt.parseString("ATCAXCGAAXGGA") # -> ([\'ATCAXCGAAXGGA\'], {\'mismatches\': [[4, 9]], \'original\': [\'ATCATCGAATGGA\']})\n', '    """\n', '    def __init__(self, match_string, maxMismatches=1):\n', '        super(CloseMatch, self).__init__()\n', '        self.name = match_string\n', '        self.match_string = match_string\n', '        self.maxMismatches = maxMismatches\n', '        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)\n', '        self.mayIndexError = False\n', '        self.mayReturnEmpty = False\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        start = loc\n', '        instrlen = len(instring)\n', '        maxloc = start + len(self.match_string)\n', '\n', '        if maxloc <= instrlen:\n', '            match_string = self.match_string\n', '            match_stringloc = 0\n', '            mismatches = []\n', '            maxMismatches = self.maxMismatches\n', '\n', '            for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)):\n', '                src, mat = s_m\n', '                if src != mat:\n', '                    mismatches.append(match_stringloc)\n', '                    if len(mismatches) > maxMismatches:\n', '                        break\n', '            else:\n', '                loc = match_stringloc + 1\n', '                results = ParseResults([instring[start:loc]])\n', "                results['original'] = match_string\n", "                results['mismatches'] = mismatches\n", '                return loc, results\n', '\n', '        raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '\n', 'class Word(Token):\n', '    """Token for matching words composed of allowed character sets.\n', '    Defined with string containing all allowed initial characters, an\n', '    optional string containing allowed body characters (if omitted,\n', '    defaults to the initial character set), and an optional minimum,\n', '    maximum, and/or exact length.  The default value for ``min`` is\n', '    1 (a minimum value < 1 is not valid); the default values for\n', '    ``max`` and ``exact`` are 0, meaning no maximum or exact\n', '    length restriction. An optional ``excludeChars`` parameter can\n', '    list characters that might be found in the input ``bodyChars``\n', '    string; useful to define a word of all printables except for one or\n', '    two characters, for instance.\n', '\n', '    :class:`srange` is useful for defining custom character set strings\n', '    for defining ``Word`` expressions, using range notation from\n', '    regular expression character sets.\n', '\n', '    A common mistake is to use :class:`Word` to match a specific literal\n', '    string, as in ``Word("Address")``. Remember that :class:`Word`\n', '    uses the string argument to define *sets* of matchable characters.\n', '    This expression would match "Add", "AAA", "dAred", or any other word\n', "    made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an\n", '    exact literal string, use :class:`Literal` or :class:`Keyword`.\n', '\n', '    pyparsing includes helper strings for building Words:\n', '\n', '     - :class:`alphas`\n', '     - :class:`nums`\n', '     - :class:`alphanums`\n', '     - :class:`hexnums`\n', '     - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255\n', '       - accented, tilded, umlauted, etc.)\n', '     - :class:`punc8bit` (non-alphabetic characters in ASCII range\n', '       128-255 - currency, symbols, superscripts, diacriticals, etc.)\n', '     - :class:`printables` (any non-whitespace character)\n', '\n', '    Example::\n', '\n', '        # a word composed of digits\n', '        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))\n', '\n', '        # a word with a leading capital, and zero or more lowercase\n', '        capital_word = Word(alphas.upper(), alphas.lower())\n', '\n', "        # hostnames are alphanumeric, with leading alpha, and '-'\n", "        hostname = Word(alphas, alphanums + '-')\n", '\n', '        # roman numeral (not a strict parser, accepts invalid mix of characters)\n', '        roman = Word("IVXLCDM")\n', '\n', "        # any string of non-whitespace characters, except for ','\n", '        csv_value = Word(printables, excludeChars=",")\n', '    """\n', '    def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None):\n', '        super(Word, self).__init__()\n', '        if excludeChars:\n', '            excludeChars = set(excludeChars)\n', "            initChars = ''.join(c for c in initChars if c not in excludeChars)\n", '            if bodyChars:\n', "                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)\n", '        self.initCharsOrig = initChars\n', '        self.initChars = set(initChars)\n', '        if bodyChars:\n', '            self.bodyCharsOrig = bodyChars\n', '            self.bodyChars = set(bodyChars)\n', '        else:\n', '            self.bodyCharsOrig = initChars\n', '            self.bodyChars = set(initChars)\n', '\n', '        self.maxSpecified = max > 0\n', '\n', '        if min < 1:\n', '            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")\n', '\n', '        self.minLen = min\n', '\n', '        if max > 0:\n', '            self.maxLen = max\n', '        else:\n', '            self.maxLen = _MAX_INT\n', '\n', '        if exact > 0:\n', '            self.maxLen = exact\n', '            self.minLen = exact\n', '\n', '        self.name = _ustr(self)\n', '        self.errmsg = "Expected " + self.name\n', '        self.mayIndexError = False\n', '        self.asKeyword = asKeyword\n', '\n', "        if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0):\n", '            if self.bodyCharsOrig == self.initCharsOrig:\n', '                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)\n', '            elif len(self.initCharsOrig) == 1:\n', '                self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig),\n', '                                             _escapeRegexRangeChars(self.bodyCharsOrig),)\n', '            else:\n', '                self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig),\n', '                                               _escapeRegexRangeChars(self.bodyCharsOrig),)\n', '            if self.asKeyword:\n', '                self.reString = r"\\b" + self.reString + r"\\b"\n', '\n', '            try:\n', '                self.re = re.compile(self.reString)\n', '            except Exception:\n', '                self.re = None\n', '            else:\n', '                self.re_match = self.re.match\n', '                self.__class__ = _WordRegex\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if instring[loc] not in self.initChars:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        start = loc\n', '        loc += 1\n', '        instrlen = len(instring)\n', '        bodychars = self.bodyChars\n', '        maxloc = start + self.maxLen\n', '        maxloc = min(maxloc, instrlen)\n', '        while loc < maxloc and instring[loc] in bodychars:\n', '            loc += 1\n', '\n', '        throwException = False\n', '        if loc - start < self.minLen:\n', '            throwException = True\n', '        elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:\n', '            throwException = True\n', '        elif self.asKeyword:\n', '            if (start > 0 and instring[start - 1] in bodychars\n', '                    or loc < instrlen and instring[loc] in bodychars):\n', '                throwException = True\n', '\n', '        if throwException:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        return loc, instring[start:loc]\n', '\n', '    def __str__(self):\n', '        try:\n', '            return super(Word, self).__str__()\n', '        except Exception:\n', '            pass\n', '\n', '        if self.strRepr is None:\n', '\n', '            def charsAsStr(s):\n', '                if len(s) > 4:\n', '                    return s[:4] + "..."\n', '                else:\n', '                    return s\n', '\n', '            if self.initCharsOrig != self.bodyCharsOrig:\n', '                self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig))\n', '            else:\n', '                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)\n', '\n', '        return self.strRepr\n', '\n', 'class _WordRegex(Word):\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        result = self.re_match(instring, loc)\n', '        if not result:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        loc = result.end()\n', '        return loc, result.group()\n', '\n', '\n', 'class Char(_WordRegex):\n', '    """A short-cut class for defining ``Word(characters, exact=1)``,\n', '    when defining a match of any single character in a string of\n', '    characters.\n', '    """\n', '    def __init__(self, charset, asKeyword=False, excludeChars=None):\n', '        super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars)\n', '        self.reString = "[%s]" % _escapeRegexRangeChars(\'\'.join(self.initChars))\n', '        if asKeyword:\n', '            self.reString = r"\\b%s\\b" % self.reString\n', '        self.re = re.compile(self.reString)\n', '        self.re_match = self.re.match\n', '\n', '\n', 'class Regex(Token):\n', '    r"""Token for matching strings that match a given regular\n', '    expression. Defined with string specifying the regular expression in\n', '    a form recognized by the stdlib Python  `re module <https://docs.python.org/3/library/re.html>`_.\n', '    If the given regex contains named groups (defined using ``(?P<name>...)``),\n', '    these will be preserved as named parse results.\n', '\n', '    If instead of the Python stdlib re module you wish to use a different RE module\n', '    (such as the `regex` module), you can replace it by either building your\n', '    Regex object with a compiled RE that was compiled using regex:\n', '\n', '    Example::\n', '\n', '        realnum = Regex(r"[+-]?\\d+\\.\\d*")\n', "        date = Regex(r'(?P<year>\\d{4})-(?P<month>\\d\\d?)-(?P<day>\\d\\d?)')\n", '        # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression\n', '        roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")\n', '\n', '        # use regex module instead of stdlib re module to construct a Regex using\n', '        # a compiled regular expression\n', '        import regex\n', "        parser = pp.Regex(regex.compile(r'[0-9]'))\n", '\n', '    """\n', '    def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False):\n', '        """The parameters ``pattern`` and ``flags`` are passed\n', '        to the ``re.compile()`` function as-is. See the Python\n', '        `re module <https://docs.python.org/3/library/re.html>`_ module for an\n', '        explanation of the acceptable patterns and flags.\n', '        """\n', '        super(Regex, self).__init__()\n', '\n', '        if isinstance(pattern, basestring):\n', '            if not pattern:\n', '                warnings.warn("null string passed to Regex; use Empty() instead",\n', '                              SyntaxWarning, stacklevel=2)\n', '\n', '            self.pattern = pattern\n', '            self.flags = flags\n', '\n', '            try:\n', '                self.re = re.compile(self.pattern, self.flags)\n', '                self.reString = self.pattern\n', '            except sre_constants.error:\n', '                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,\n', '                              SyntaxWarning, stacklevel=2)\n', '                raise\n', '\n', "        elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'):\n", '            self.re = pattern\n', '            self.pattern = self.reString = pattern.pattern\n', '            self.flags = flags\n', '\n', '        else:\n', '            raise TypeError("Regex may only be constructed with a string or a compiled RE object")\n', '\n', '        self.re_match = self.re.match\n', '\n', '        self.name = _ustr(self)\n', '        self.errmsg = "Expected " + self.name\n', '        self.mayIndexError = False\n', '        self.mayReturnEmpty = self.re_match("") is not None\n', '        self.asGroupList = asGroupList\n', '        self.asMatch = asMatch\n', '        if self.asGroupList:\n', '            self.parseImpl = self.parseImplAsGroupList\n', '        if self.asMatch:\n', '            self.parseImpl = self.parseImplAsMatch\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        result = self.re_match(instring, loc)\n', '        if not result:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        loc = result.end()\n', '        ret = ParseResults(result.group())\n', '        d = result.groupdict()\n', '        if d:\n', '            for k, v in d.items():\n', '                ret[k] = v\n', '        return loc, ret\n', '\n', '    def parseImplAsGroupList(self, instring, loc, doActions=True):\n', '        result = self.re_match(instring, loc)\n', '        if not result:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        loc = result.end()\n', '        ret = result.groups()\n', '        return loc, ret\n', '\n', '    def parseImplAsMatch(self, instring, loc, doActions=True):\n', '        result = self.re_match(instring, loc)\n', '        if not result:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        loc = result.end()\n', '        ret = result\n', '        return loc, ret\n', '\n', '    def __str__(self):\n', '        try:\n', '            return super(Regex, self).__str__()\n', '        except Exception:\n', '            pass\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "Re:(%s)" % repr(self.pattern)\n', '\n', '        return self.strRepr\n', '\n', '    def sub(self, repl):\n', '        r"""\n', '        Return Regex with an attached parse action to transform the parsed\n', '        result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.\n', '\n', '        Example::\n', '\n', '            make_html = Regex(r"(\\w+):(.*?):").sub(r"<\\1>\\2</\\1>")\n', '            print(make_html.transformString("h1:main title:"))\n', '            # prints "<h1>main title</h1>"\n', '        """\n', '        if self.asGroupList:\n', '            warnings.warn("cannot use sub() with Regex(asGroupList=True)",\n', '                          SyntaxWarning, stacklevel=2)\n', '            raise SyntaxError()\n', '\n', '        if self.asMatch and callable(repl):\n', '            warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)",\n', '                          SyntaxWarning, stacklevel=2)\n', '            raise SyntaxError()\n', '\n', '        if self.asMatch:\n', '            def pa(tokens):\n', '                return tokens[0].expand(repl)\n', '        else:\n', '            def pa(tokens):\n', '                return self.re.sub(repl, tokens[0])\n', '        return self.addParseAction(pa)\n', '\n', 'class QuotedString(Token):\n', '    r"""\n', '    Token for matching strings that are delimited by quoting characters.\n', '\n', '    Defined with the following parameters:\n', '\n', '        - quoteChar - string of one or more characters defining the\n', '          quote delimiting string\n', '        - escChar - character to escape quotes, typically backslash\n', '          (default= ``None``)\n', '        - escQuote - special quote sequence to escape an embedded quote\n', '          string (such as SQL\'s ``""`` to escape an embedded ``"``)\n', '          (default= ``None``)\n', '        - multiline - boolean indicating whether quotes can span\n', '          multiple lines (default= ``False``)\n', '        - unquoteResults - boolean indicating whether the matched text\n', '          should be unquoted (default= ``True``)\n', '        - endQuoteChar - string of one or more characters defining the\n', '          end of the quote delimited string (default= ``None``  => same as\n', '          quoteChar)\n', '        - convertWhitespaceEscapes - convert escaped whitespace\n', "          (``'\\t'``, ``'\\n'``, etc.) to actual whitespace\n", '          (default= ``True``)\n', '\n', '    Example::\n', '\n', '        qs = QuotedString(\'"\')\n', '        print(qs.searchString(\'lsjdf "This is the quote" sldjf\'))\n', "        complex_qs = QuotedString('{{', endQuoteChar='}}')\n", '        print(complex_qs.searchString(\'lsjdf {{This is the "quote"}} sldjf\'))\n', '        sql_qs = QuotedString(\'"\', escQuote=\'""\')\n', '        print(sql_qs.searchString(\'lsjdf "This is the quote with ""embedded"" quotes" sldjf\'))\n', '\n', '    prints::\n', '\n', "        [['This is the quote']]\n", '        [[\'This is the "quote"\']]\n', '        [[\'This is the quote with "embedded" quotes\']]\n', '    """\n', '    def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False,\n', '                 unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):\n', '        super(QuotedString, self).__init__()\n', '\n', '        # remove white space from quote chars - wont work anyway\n', '        quoteChar = quoteChar.strip()\n', '        if not quoteChar:\n', '            warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)\n', '            raise SyntaxError()\n', '\n', '        if endQuoteChar is None:\n', '            endQuoteChar = quoteChar\n', '        else:\n', '            endQuoteChar = endQuoteChar.strip()\n', '            if not endQuoteChar:\n', '                warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)\n', '                raise SyntaxError()\n', '\n', '        self.quoteChar = quoteChar\n', '        self.quoteCharLen = len(quoteChar)\n', '        self.firstQuoteChar = quoteChar[0]\n', '        self.endQuoteChar = endQuoteChar\n', '        self.endQuoteCharLen = len(endQuoteChar)\n', '        self.escChar = escChar\n', '        self.escQuote = escQuote\n', '        self.unquoteResults = unquoteResults\n', '        self.convertWhitespaceEscapes = convertWhitespaceEscapes\n', '\n', '        if multiline:\n', '            self.flags = re.MULTILINE | re.DOTALL\n', "            self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar),\n", '                                              _escapeRegexRangeChars(self.endQuoteChar[0]),\n', "                                              (escChar is not None and _escapeRegexRangeChars(escChar) or ''))\n", '        else:\n', '            self.flags = 0\n', "            self.pattern = r'%s(?:[^%s\\n\\r%s]' % (re.escape(self.quoteChar),\n", '                                                  _escapeRegexRangeChars(self.endQuoteChar[0]),\n', "                                                  (escChar is not None and _escapeRegexRangeChars(escChar) or ''))\n", '        if len(self.endQuoteChar) > 1:\n', '            self.pattern += (\n', '                \'|(?:\' + \')|(?:\'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),\n', '                                                   _escapeRegexRangeChars(self.endQuoteChar[i]))\n', "                                      for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')')\n", '\n', '        if escQuote:\n', "            self.pattern += (r'|(?:%s)' % re.escape(escQuote))\n", '        if escChar:\n', "            self.pattern += (r'|(?:%s.)' % re.escape(escChar))\n", '            self.escCharReplacePattern = re.escape(self.escChar) + "(.)"\n', "        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))\n", '\n', '        try:\n', '            self.re = re.compile(self.pattern, self.flags)\n', '            self.reString = self.pattern\n', '            self.re_match = self.re.match\n', '        except sre_constants.error:\n', '            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,\n', '                          SyntaxWarning, stacklevel=2)\n', '            raise\n', '\n', '        self.name = _ustr(self)\n', '        self.errmsg = "Expected " + self.name\n', '        self.mayIndexError = False\n', '        self.mayReturnEmpty = True\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None\n', '        if not result:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        loc = result.end()\n', '        ret = result.group()\n', '\n', '        if self.unquoteResults:\n', '\n', '            # strip off quotes\n', '            ret = ret[self.quoteCharLen: -self.endQuoteCharLen]\n', '\n', '            if isinstance(ret, basestring):\n', '                # replace escaped whitespace\n', "                if '\\\\' in ret and self.convertWhitespaceEscapes:\n", '                    ws_map = {\n', "                        r'\\t': '\\t',\n", "                        r'\\n': '\\n',\n", "                        r'\\f': '\\f',\n", "                        r'\\r': '\\r',\n", '                    }\n', '                    for wslit, wschar in ws_map.items():\n', '                        ret = ret.replace(wslit, wschar)\n', '\n', '                # replace escaped characters\n', '                if self.escChar:\n', '                    ret = re.sub(self.escCharReplacePattern, r"\\g<1>", ret)\n', '\n', '                # replace escaped quotes\n', '                if self.escQuote:\n', '                    ret = ret.replace(self.escQuote, self.endQuoteChar)\n', '\n', '        return loc, ret\n', '\n', '    def __str__(self):\n', '        try:\n', '            return super(QuotedString, self).__str__()\n', '        except Exception:\n', '            pass\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)\n', '\n', '        return self.strRepr\n', '\n', '\n', 'class CharsNotIn(Token):\n', '    """Token for matching words composed of characters *not* in a given\n', '    set (will include whitespace in matched characters if not listed in\n', '    the provided exclusion set - see example). Defined with string\n', '    containing all disallowed characters, and an optional minimum,\n', '    maximum, and/or exact length.  The default value for ``min`` is\n', '    1 (a minimum value < 1 is not valid); the default values for\n', '    ``max`` and ``exact`` are 0, meaning no maximum or exact\n', '    length restriction.\n', '\n', '    Example::\n', '\n', "        # define a comma-separated-value as anything that is not a ','\n", "        csv_value = CharsNotIn(',')\n", '        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))\n', '\n', '    prints::\n', '\n', "        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']\n", '    """\n', '    def __init__(self, notChars, min=1, max=0, exact=0):\n', '        super(CharsNotIn, self).__init__()\n', '        self.skipWhitespace = False\n', '        self.notChars = notChars\n', '\n', '        if min < 1:\n', '            raise ValueError("cannot specify a minimum length < 1; use "\n', '                             "Optional(CharsNotIn()) if zero-length char group is permitted")\n', '\n', '        self.minLen = min\n', '\n', '        if max > 0:\n', '            self.maxLen = max\n', '        else:\n', '            self.maxLen = _MAX_INT\n', '\n', '        if exact > 0:\n', '            self.maxLen = exact\n', '            self.minLen = exact\n', '\n', '        self.name = _ustr(self)\n', '        self.errmsg = "Expected " + self.name\n', '        self.mayReturnEmpty = (self.minLen == 0)\n', '        self.mayIndexError = False\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if instring[loc] in self.notChars:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        start = loc\n', '        loc += 1\n', '        notchars = self.notChars\n', '        maxlen = min(start + self.maxLen, len(instring))\n', '        while loc < maxlen and instring[loc] not in notchars:\n', '            loc += 1\n', '\n', '        if loc - start < self.minLen:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        return loc, instring[start:loc]\n', '\n', '    def __str__(self):\n', '        try:\n', '            return super(CharsNotIn, self).__str__()\n', '        except Exception:\n', '            pass\n', '\n', '        if self.strRepr is None:\n', '            if len(self.notChars) > 4:\n', '                self.strRepr = "!W:(%s...)" % self.notChars[:4]\n', '            else:\n', '                self.strRepr = "!W:(%s)" % self.notChars\n', '\n', '        return self.strRepr\n', '\n', 'class White(Token):\n', '    """Special matching class for matching whitespace.  Normally,\n', '    whitespace is ignored by pyparsing grammars.  This class is included\n', '    when some whitespace structures are significant.  Define with\n', '    a string containing the whitespace characters to be matched; default\n', '    is ``" \\\\t\\\\r\\\\n"``.  Also takes optional ``min``,\n', '    ``max``, and ``exact`` arguments, as defined for the\n', '    :class:`Word` class.\n', '    """\n', '    whiteStrs = {\n', "        ' ' : '<SP>',\n", "        '\\t': '<TAB>',\n", "        '\\n': '<LF>',\n", "        '\\r': '<CR>',\n", "        '\\f': '<FF>',\n", "        u'\\u00A0': '<NBSP>',\n", "        u'\\u1680': '<OGHAM_SPACE_MARK>',\n", "        u'\\u180E': '<MONGOLIAN_VOWEL_SEPARATOR>',\n", "        u'\\u2000': '<EN_QUAD>',\n", "        u'\\u2001': '<EM_QUAD>',\n", "        u'\\u2002': '<EN_SPACE>',\n", "        u'\\u2003': '<EM_SPACE>',\n", "        u'\\u2004': '<THREE-PER-EM_SPACE>',\n", "        u'\\u2005': '<FOUR-PER-EM_SPACE>',\n", "        u'\\u2006': '<SIX-PER-EM_SPACE>',\n", "        u'\\u2007': '<FIGURE_SPACE>',\n", "        u'\\u2008': '<PUNCTUATION_SPACE>',\n", "        u'\\u2009': '<THIN_SPACE>',\n", "        u'\\u200A': '<HAIR_SPACE>',\n", "        u'\\u200B': '<ZERO_WIDTH_SPACE>',\n", "        u'\\u202F': '<NNBSP>',\n", "        u'\\u205F': '<MMSP>',\n", "        u'\\u3000': '<IDEOGRAPHIC_SPACE>',\n", '        }\n', '    def __init__(self, ws=" \\t\\r\\n", min=1, max=0, exact=0):\n', '        super(White, self).__init__()\n', '        self.matchWhite = ws\n', '        self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite))\n', '        # ~ self.leaveWhitespace()\n', '        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))\n', '        self.mayReturnEmpty = True\n', '        self.errmsg = "Expected " + self.name\n', '\n', '        self.minLen = min\n', '\n', '        if max > 0:\n', '            self.maxLen = max\n', '        else:\n', '            self.maxLen = _MAX_INT\n', '\n', '        if exact > 0:\n', '            self.maxLen = exact\n', '            self.minLen = exact\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if instring[loc] not in self.matchWhite:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '        start = loc\n', '        loc += 1\n', '        maxloc = start + self.maxLen\n', '        maxloc = min(maxloc, len(instring))\n', '        while loc < maxloc and instring[loc] in self.matchWhite:\n', '            loc += 1\n', '\n', '        if loc - start < self.minLen:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        return loc, instring[start:loc]\n', '\n', '\n', 'class _PositionToken(Token):\n', '    def __init__(self):\n', '        super(_PositionToken, self).__init__()\n', '        self.name = self.__class__.__name__\n', '        self.mayReturnEmpty = True\n', '        self.mayIndexError = False\n', '\n', 'class GoToColumn(_PositionToken):\n', '    """Token to advance to a specific column of input text; useful for\n', '    tabular report scraping.\n', '    """\n', '    def __init__(self, colno):\n', '        super(GoToColumn, self).__init__()\n', '        self.col = colno\n', '\n', '    def preParse(self, instring, loc):\n', '        if col(loc, instring) != self.col:\n', '            instrlen = len(instring)\n', '            if self.ignoreExprs:\n', '                loc = self._skipIgnorables(instring, loc)\n', '            while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col:\n', '                loc += 1\n', '        return loc\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        thiscol = col(loc, instring)\n', '        if thiscol > self.col:\n', '            raise ParseException(instring, loc, "Text not in expected column", self)\n', '        newloc = loc + self.col - thiscol\n', '        ret = instring[loc: newloc]\n', '        return newloc, ret\n', '\n', '\n', 'class LineStart(_PositionToken):\n', '    r"""Matches if current position is at the beginning of a line within\n', '    the parse string\n', '\n', '    Example::\n', '\n', "        test = '''\\\n", '        AAA this line\n', '        AAA and this line\n', '          AAA but not this one\n', '        B AAA and definitely not this one\n', "        '''\n", '\n', "        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):\n", '            print(t)\n', '\n', '    prints::\n', '\n', "        ['AAA', ' this line']\n", "        ['AAA', ' and this line']\n", '\n', '    """\n', '    def __init__(self):\n', '        super(LineStart, self).__init__()\n', '        self.errmsg = "Expected start of line"\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if col(loc, instring) == 1:\n', '            return loc, []\n', '        raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class LineEnd(_PositionToken):\n', '    """Matches if current position is at the end of a line within the\n', '    parse string\n', '    """\n', '    def __init__(self):\n', '        super(LineEnd, self).__init__()\n', '        self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\\n", ""))\n', '        self.errmsg = "Expected end of line"\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if loc < len(instring):\n', '            if instring[loc] == "\\n":\n', '                return loc + 1, "\\n"\n', '            else:\n', '                raise ParseException(instring, loc, self.errmsg, self)\n', '        elif loc == len(instring):\n', '            return loc + 1, []\n', '        else:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class StringStart(_PositionToken):\n', '    """Matches if current position is at the beginning of the parse\n', '    string\n', '    """\n', '    def __init__(self):\n', '        super(StringStart, self).__init__()\n', '        self.errmsg = "Expected start of text"\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if loc != 0:\n', '            # see if entire string up to here is just whitespace and ignoreables\n', '            if loc != self.preParse(instring, 0):\n', '                raise ParseException(instring, loc, self.errmsg, self)\n', '        return loc, []\n', '\n', 'class StringEnd(_PositionToken):\n', '    """Matches if current position is at the end of the parse string\n', '    """\n', '    def __init__(self):\n', '        super(StringEnd, self).__init__()\n', '        self.errmsg = "Expected end of text"\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if loc < len(instring):\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '        elif loc == len(instring):\n', '            return loc + 1, []\n', '        elif loc > len(instring):\n', '            return loc, []\n', '        else:\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class WordStart(_PositionToken):\n', '    """Matches if the current position is at the beginning of a Word,\n', '    and is not preceded by any character in a given set of\n', '    ``wordChars`` (default= ``printables``). To emulate the\n', '    ``\\b`` behavior of regular expressions, use\n', '    ``WordStart(alphanums)``. ``WordStart`` will also match at\n', '    the beginning of the string being parsed, or at the beginning of\n', '    a line.\n', '    """\n', '    def __init__(self, wordChars=printables):\n', '        super(WordStart, self).__init__()\n', '        self.wordChars = set(wordChars)\n', '        self.errmsg = "Not at the start of a word"\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if loc != 0:\n', '            if (instring[loc - 1] in self.wordChars\n', '                    or instring[loc] not in self.wordChars):\n', '                raise ParseException(instring, loc, self.errmsg, self)\n', '        return loc, []\n', '\n', 'class WordEnd(_PositionToken):\n', '    """Matches if the current position is at the end of a Word, and is\n', '    not followed by any character in a given set of ``wordChars``\n', '    (default= ``printables``). To emulate the ``\\b`` behavior of\n', '    regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``\n', '    will also match at the end of the string being parsed, or at the end\n', '    of a line.\n', '    """\n', '    def __init__(self, wordChars=printables):\n', '        super(WordEnd, self).__init__()\n', '        self.wordChars = set(wordChars)\n', '        self.skipWhitespace = False\n', '        self.errmsg = "Not at the end of a word"\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        instrlen = len(instring)\n', '        if instrlen > 0 and loc < instrlen:\n', '            if (instring[loc] in self.wordChars or\n', '                    instring[loc - 1] not in self.wordChars):\n', '                raise ParseException(instring, loc, self.errmsg, self)\n', '        return loc, []\n', '\n', '\n', 'class ParseExpression(ParserElement):\n', '    """Abstract subclass of ParserElement, for combining and\n', '    post-processing parsed tokens.\n', '    """\n', '    def __init__(self, exprs, savelist=False):\n', '        super(ParseExpression, self).__init__(savelist)\n', '        if isinstance(exprs, _generatorType):\n', '            exprs = list(exprs)\n', '\n', '        if isinstance(exprs, basestring):\n', '            self.exprs = [self._literalStringClass(exprs)]\n', '        elif isinstance(exprs, ParserElement):\n', '            self.exprs = [exprs]\n', '        elif isinstance(exprs, Iterable):\n', '            exprs = list(exprs)\n', '            # if sequence of strings provided, wrap with Literal\n', '            if any(isinstance(expr, basestring) for expr in exprs):\n', '                exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs)\n', '            self.exprs = list(exprs)\n', '        else:\n', '            try:\n', '                self.exprs = list(exprs)\n', '            except TypeError:\n', '                self.exprs = [exprs]\n', '        self.callPreparse = False\n', '\n', '    def append(self, other):\n', '        self.exprs.append(other)\n', '        self.strRepr = None\n', '        return self\n', '\n', '    def leaveWhitespace(self):\n', '        """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on\n', '           all contained expressions."""\n', '        self.skipWhitespace = False\n', '        self.exprs = [e.copy() for e in self.exprs]\n', '        for e in self.exprs:\n', '            e.leaveWhitespace()\n', '        return self\n', '\n', '    def ignore(self, other):\n', '        if isinstance(other, Suppress):\n', '            if other not in self.ignoreExprs:\n', '                super(ParseExpression, self).ignore(other)\n', '                for e in self.exprs:\n', '                    e.ignore(self.ignoreExprs[-1])\n', '        else:\n', '            super(ParseExpression, self).ignore(other)\n', '            for e in self.exprs:\n', '                e.ignore(self.ignoreExprs[-1])\n', '        return self\n', '\n', '    def __str__(self):\n', '        try:\n', '            return super(ParseExpression, self).__str__()\n', '        except Exception:\n', '            pass\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs))\n', '        return self.strRepr\n', '\n', '    def streamline(self):\n', '        super(ParseExpression, self).streamline()\n', '\n', '        for e in self.exprs:\n', '            e.streamline()\n', '\n', "        # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d)\n", "        # but only if there are no parse actions or resultsNames on the nested And's\n", "        # (likewise for Or's and MatchFirst's)\n", '        if len(self.exprs) == 2:\n', '            other = self.exprs[0]\n', '            if (isinstance(other, self.__class__)\n', '                    and not other.parseAction\n', '                    and other.resultsName is None\n', '                    and not other.debug):\n', '                self.exprs = other.exprs[:] + [self.exprs[1]]\n', '                self.strRepr = None\n', '                self.mayReturnEmpty |= other.mayReturnEmpty\n', '                self.mayIndexError  |= other.mayIndexError\n', '\n', '            other = self.exprs[-1]\n', '            if (isinstance(other, self.__class__)\n', '                    and not other.parseAction\n', '                    and other.resultsName is None\n', '                    and not other.debug):\n', '                self.exprs = self.exprs[:-1] + other.exprs[:]\n', '                self.strRepr = None\n', '                self.mayReturnEmpty |= other.mayReturnEmpty\n', '                self.mayIndexError  |= other.mayIndexError\n', '\n', '        self.errmsg = "Expected " + _ustr(self)\n', '\n', '        return self\n', '\n', '    def validate(self, validateTrace=None):\n', '        tmp = (validateTrace if validateTrace is not None else [])[:] + [self]\n', '        for e in self.exprs:\n', '            e.validate(tmp)\n', '        self.checkRecursion([])\n', '\n', '    def copy(self):\n', '        ret = super(ParseExpression, self).copy()\n', '        ret.exprs = [e.copy() for e in self.exprs]\n', '        return ret\n', '\n', '    def _setResultsName(self, name, listAllMatches=False):\n', '        if __diag__.warn_ungrouped_named_tokens_in_collection:\n', '            for e in self.exprs:\n', '                if isinstance(e, ParserElement) and e.resultsName:\n', '                    warnings.warn("{0}: setting results name {1!r} on {2} expression "\n', '                                  "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",\n', '                                                                                       name,\n', '                                                                                       type(self).__name__,\n', '                                                                                       e.resultsName),\n', '                                  stacklevel=3)\n', '\n', '        return super(ParseExpression, self)._setResultsName(name, listAllMatches)\n', '\n', '\n', 'class And(ParseExpression):\n', '    """\n', '    Requires all given :class:`ParseExpression` s to be found in the given order.\n', '    Expressions may be separated by whitespace.\n', "    May be constructed using the ``'+'`` operator.\n", "    May also be constructed using the ``'-'`` operator, which will\n", '    suppress backtracking.\n', '\n', '    Example::\n', '\n', '        integer = Word(nums)\n', '        name_expr = OneOrMore(Word(alphas))\n', '\n', '        expr = And([integer("id"), name_expr("name"), integer("age")])\n', '        # more easily written as:\n', '        expr = integer("id") + name_expr("name") + integer("age")\n', '    """\n', '\n', '    class _ErrorStop(Empty):\n', '        def __init__(self, *args, **kwargs):\n', '            super(And._ErrorStop, self).__init__(*args, **kwargs)\n', "            self.name = '-'\n", '            self.leaveWhitespace()\n', '\n', '    def __init__(self, exprs, savelist=True):\n', '        exprs = list(exprs)\n', '        if exprs and Ellipsis in exprs:\n', '            tmp = []\n', '            for i, expr in enumerate(exprs):\n', '                if expr is Ellipsis:\n', '                    if i < len(exprs) - 1:\n', '                        skipto_arg = (Empty() + exprs[i + 1]).exprs[-1]\n', '                        tmp.append(SkipTo(skipto_arg)("_skipped*"))\n', '                    else:\n', '                        raise Exception("cannot construct And with sequence ending in ...")\n', '                else:\n', '                    tmp.append(expr)\n', '            exprs[:] = tmp\n', '        super(And, self).__init__(exprs, savelist)\n', '        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)\n', '        self.setWhitespaceChars(self.exprs[0].whiteChars)\n', '        self.skipWhitespace = self.exprs[0].skipWhitespace\n', '        self.callPreparse = True\n', '\n', '    def streamline(self):\n', "        # collapse any _PendingSkip's\n", '        if self.exprs:\n', '            if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip)\n', '                   for e in self.exprs[:-1]):\n', '                for i, e in enumerate(self.exprs[:-1]):\n', '                    if e is None:\n', '                        continue\n', '                    if (isinstance(e, ParseExpression)\n', '                            and e.exprs and isinstance(e.exprs[-1], _PendingSkip)):\n', '                        e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]\n', '                        self.exprs[i + 1] = None\n', '                self.exprs = [e for e in self.exprs if e is not None]\n', '\n', '        super(And, self).streamline()\n', '        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)\n', '        return self\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        # pass False as last arg to _parse for first element, since we already\n', '        # pre-parsed the string as part of our And pre-parsing\n', '        loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False)\n', '        errorStop = False\n', '        for e in self.exprs[1:]:\n', '            if isinstance(e, And._ErrorStop):\n', '                errorStop = True\n', '                continue\n', '            if errorStop:\n', '                try:\n', '                    loc, exprtokens = e._parse(instring, loc, doActions)\n', '                except ParseSyntaxException:\n', '                    raise\n', '                except ParseBaseException as pe:\n', '                    pe.__traceback__ = None\n', '                    raise ParseSyntaxException._from_exception(pe)\n', '                except IndexError:\n', '                    raise ParseSyntaxException(instring, len(instring), self.errmsg, self)\n', '            else:\n', '                loc, exprtokens = e._parse(instring, loc, doActions)\n', '            if exprtokens or exprtokens.haskeys():\n', '                resultlist += exprtokens\n', '        return loc, resultlist\n', '\n', '    def __iadd__(self, other):\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        return self.append(other)  # And([self, other])\n', '\n', '    def checkRecursion(self, parseElementList):\n', '        subRecCheckList = parseElementList[:] + [self]\n', '        for e in self.exprs:\n', '            e.checkRecursion(subRecCheckList)\n', '            if not e.mayReturnEmpty:\n', '                break\n', '\n', '    def __str__(self):\n', '        if hasattr(self, "name"):\n', '            return self.name\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"\n', '\n', '        return self.strRepr\n', '\n', '\n', 'class Or(ParseExpression):\n', '    """Requires that at least one :class:`ParseExpression` is found. If\n', '    two expressions match, the expression that matches the longest\n', "    string will be used. May be constructed using the ``'^'``\n", '    operator.\n', '\n', '    Example::\n', '\n', "        # construct Or using '^' operator\n", '\n', "        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))\n", '        print(number.searchString("123 3.1416 789"))\n', '\n', '    prints::\n', '\n', "        [['123'], ['3.1416'], ['789']]\n", '    """\n', '    def __init__(self, exprs, savelist=False):\n', '        super(Or, self).__init__(exprs, savelist)\n', '        if self.exprs:\n', '            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)\n', '        else:\n', '            self.mayReturnEmpty = True\n', '\n', '    def streamline(self):\n', '        super(Or, self).streamline()\n', '        if __compat__.collect_all_And_tokens:\n', '            self.saveAsList = any(e.saveAsList for e in self.exprs)\n', '        return self\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        maxExcLoc = -1\n', '        maxException = None\n', '        matches = []\n', '        for e in self.exprs:\n', '            try:\n', '                loc2 = e.tryParse(instring, loc)\n', '            except ParseException as err:\n', '                err.__traceback__ = None\n', '                if err.loc > maxExcLoc:\n', '                    maxException = err\n', '                    maxExcLoc = err.loc\n', '            except IndexError:\n', '                if len(instring) > maxExcLoc:\n', '                    maxException = ParseException(instring, len(instring), e.errmsg, self)\n', '                    maxExcLoc = len(instring)\n', '            else:\n', '                # save match among all matches, to retry longest to shortest\n', '                matches.append((loc2, e))\n', '\n', '        if matches:\n', '            # re-evaluate all matches in descending order of length of match, in case attached actions\n', '            # might change whether or how much they match of the input.\n', '            matches.sort(key=itemgetter(0), reverse=True)\n', '\n', '            if not doActions:\n', '                # no further conditions or parse actions to change the selection of\n', '                # alternative, so the first match will be the best match\n', '                best_expr = matches[0][1]\n', '                return best_expr._parse(instring, loc, doActions)\n', '\n', '            longest = -1, None\n', '            for loc1, expr1 in matches:\n', '                if loc1 <= longest[0]:\n', '                    # already have a longer match than this one will deliver, we are done\n', '                    return longest\n', '\n', '                try:\n', '                    loc2, toks = expr1._parse(instring, loc, doActions)\n', '                except ParseException as err:\n', '                    err.__traceback__ = None\n', '                    if err.loc > maxExcLoc:\n', '                        maxException = err\n', '                        maxExcLoc = err.loc\n', '                else:\n', '                    if loc2 >= loc1:\n', '                        return loc2, toks\n', "                    # didn't match as much as before\n", '                    elif loc2 > longest[0]:\n', '                        longest = loc2, toks\n', '\n', '            if longest != (-1, None):\n', '                return longest\n', '\n', '        if maxException is not None:\n', '            maxException.msg = self.errmsg\n', '            raise maxException\n', '        else:\n', '            raise ParseException(instring, loc, "no defined alternatives to match", self)\n', '\n', '\n', '    def __ixor__(self, other):\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        return self.append(other)  # Or([self, other])\n', '\n', '    def __str__(self):\n', '        if hasattr(self, "name"):\n', '            return self.name\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"\n', '\n', '        return self.strRepr\n', '\n', '    def checkRecursion(self, parseElementList):\n', '        subRecCheckList = parseElementList[:] + [self]\n', '        for e in self.exprs:\n', '            e.checkRecursion(subRecCheckList)\n', '\n', '    def _setResultsName(self, name, listAllMatches=False):\n', '        if (not __compat__.collect_all_And_tokens\n', '                and __diag__.warn_multiple_tokens_in_named_alternation):\n', '            if any(isinstance(e, And) for e in self.exprs):\n', '                warnings.warn("{0}: setting results name {1!r} on {2} expression "\n', '                              "may only return a single token for an And alternative, "\n', '                              "in future will return the full list of tokens".format(\n', '                    "warn_multiple_tokens_in_named_alternation", name, type(self).__name__),\n', '                    stacklevel=3)\n', '\n', '        return super(Or, self)._setResultsName(name, listAllMatches)\n', '\n', '\n', 'class MatchFirst(ParseExpression):\n', '    """Requires that at least one :class:`ParseExpression` is found. If\n', '    two expressions match, the first one listed is the one that will\n', "    match. May be constructed using the ``'|'`` operator.\n", '\n', '    Example::\n', '\n', "        # construct MatchFirst using '|' operator\n", '\n', '        # watch the order of expressions to match\n', "        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))\n", '        print(number.searchString("123 3.1416 789")) #  Fail! -> [[\'123\'], [\'3\'], [\'1416\'], [\'789\']]\n', '\n', '        # put more selective expression first\n', "        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)\n", '        print(number.searchString("123 3.1416 789")) #  Better -> [[\'123\'], [\'3.1416\'], [\'789\']]\n', '    """\n', '    def __init__(self, exprs, savelist=False):\n', '        super(MatchFirst, self).__init__(exprs, savelist)\n', '        if self.exprs:\n', '            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)\n', '        else:\n', '            self.mayReturnEmpty = True\n', '\n', '    def streamline(self):\n', '        super(MatchFirst, self).streamline()\n', '        if __compat__.collect_all_And_tokens:\n', '            self.saveAsList = any(e.saveAsList for e in self.exprs)\n', '        return self\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        maxExcLoc = -1\n', '        maxException = None\n', '        for e in self.exprs:\n', '            try:\n', '                ret = e._parse(instring, loc, doActions)\n', '                return ret\n', '            except ParseException as err:\n', '                if err.loc > maxExcLoc:\n', '                    maxException = err\n', '                    maxExcLoc = err.loc\n', '            except IndexError:\n', '                if len(instring) > maxExcLoc:\n', '                    maxException = ParseException(instring, len(instring), e.errmsg, self)\n', '                    maxExcLoc = len(instring)\n', '\n', '        # only got here if no expression matched, raise exception for match that made it the furthest\n', '        else:\n', '            if maxException is not None:\n', '                maxException.msg = self.errmsg\n', '                raise maxException\n', '            else:\n', '                raise ParseException(instring, loc, "no defined alternatives to match", self)\n', '\n', '    def __ior__(self, other):\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        return self.append(other)  # MatchFirst([self, other])\n', '\n', '    def __str__(self):\n', '        if hasattr(self, "name"):\n', '            return self.name\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"\n', '\n', '        return self.strRepr\n', '\n', '    def checkRecursion(self, parseElementList):\n', '        subRecCheckList = parseElementList[:] + [self]\n', '        for e in self.exprs:\n', '            e.checkRecursion(subRecCheckList)\n', '\n', '    def _setResultsName(self, name, listAllMatches=False):\n', '        if (not __compat__.collect_all_And_tokens\n', '                and __diag__.warn_multiple_tokens_in_named_alternation):\n', '            if any(isinstance(e, And) for e in self.exprs):\n', '                warnings.warn("{0}: setting results name {1!r} on {2} expression "\n', '                              "may only return a single token for an And alternative, "\n', '                              "in future will return the full list of tokens".format(\n', '                    "warn_multiple_tokens_in_named_alternation", name, type(self).__name__),\n', '                    stacklevel=3)\n', '\n', '        return super(MatchFirst, self)._setResultsName(name, listAllMatches)\n', '\n', '\n', 'class Each(ParseExpression):\n', '    """Requires all given :class:`ParseExpression` s to be found, but in\n', '    any order. Expressions may be separated by whitespace.\n', '\n', "    May be constructed using the ``'&'`` operator.\n", '\n', '    Example::\n', '\n', '        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")\n', '        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")\n', '        integer = Word(nums)\n', '        shape_attr = "shape:" + shape_type("shape")\n', '        posn_attr = "posn:" + Group(integer("x") + \',\' + integer("y"))("posn")\n', '        color_attr = "color:" + color("color")\n', '        size_attr = "size:" + integer("size")\n', '\n', "        # use Each (using operator '&') to accept attributes in any order\n", '        # (shape and posn are required, color and size are optional)\n', '        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)\n', '\n', "        shape_spec.runTests('''\n", '            shape: SQUARE color: BLACK posn: 100, 120\n', '            shape: CIRCLE size: 50 color: BLUE posn: 50,80\n', '            color:GREEN size:20 shape:TRIANGLE posn:20,40\n', "            '''\n", '            )\n', '\n', '    prints::\n', '\n', '        shape: SQUARE color: BLACK posn: 100, 120\n', "        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]\n", '        - color: BLACK\n', "        - posn: ['100', ',', '120']\n", '          - x: 100\n', '          - y: 120\n', '        - shape: SQUARE\n', '\n', '\n', '        shape: CIRCLE size: 50 color: BLUE posn: 50,80\n', "        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]\n", '        - color: BLUE\n', "        - posn: ['50', ',', '80']\n", '          - x: 50\n', '          - y: 80\n', '        - shape: CIRCLE\n', '        - size: 50\n', '\n', '\n', '        color: GREEN size: 20 shape: TRIANGLE posn: 20,40\n', "        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]\n", '        - color: GREEN\n', "        - posn: ['20', ',', '40']\n", '          - x: 20\n', '          - y: 40\n', '        - shape: TRIANGLE\n', '        - size: 20\n', '    """\n', '    def __init__(self, exprs, savelist=True):\n', '        super(Each, self).__init__(exprs, savelist)\n', '        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)\n', '        self.skipWhitespace = True\n', '        self.initExprGroups = True\n', '        self.saveAsList = True\n', '\n', '    def streamline(self):\n', '        super(Each, self).streamline()\n', '        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)\n', '        return self\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if self.initExprGroups:\n', '            self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional))\n', '            opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)]\n', '            opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))]\n', '            self.optionals = opt1 + opt2\n', '            self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)]\n', '            self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)]\n', '            self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))]\n', '            self.required += self.multirequired\n', '            self.initExprGroups = False\n', '        tmpLoc = loc\n', '        tmpReqd = self.required[:]\n', '        tmpOpt  = self.optionals[:]\n', '        matchOrder = []\n', '\n', '        keepMatching = True\n', '        while keepMatching:\n', '            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired\n', '            failed = []\n', '            for e in tmpExprs:\n', '                try:\n', '                    tmpLoc = e.tryParse(instring, tmpLoc)\n', '                except ParseException:\n', '                    failed.append(e)\n', '                else:\n', '                    matchOrder.append(self.opt1map.get(id(e), e))\n', '                    if e in tmpReqd:\n', '                        tmpReqd.remove(e)\n', '                    elif e in tmpOpt:\n', '                        tmpOpt.remove(e)\n', '            if len(failed) == len(tmpExprs):\n', '                keepMatching = False\n', '\n', '        if tmpReqd:\n', '            missing = ", ".join(_ustr(e) for e in tmpReqd)\n', '            raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing)\n', '\n', '        # add any unmatched Optionals, in case they have default values defined\n', '        matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt]\n', '\n', '        resultlist = []\n', '        for e in matchOrder:\n', '            loc, results = e._parse(instring, loc, doActions)\n', '            resultlist.append(results)\n', '\n', '        finalResults = sum(resultlist, ParseResults([]))\n', '        return loc, finalResults\n', '\n', '    def __str__(self):\n', '        if hasattr(self, "name"):\n', '            return self.name\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"\n', '\n', '        return self.strRepr\n', '\n', '    def checkRecursion(self, parseElementList):\n', '        subRecCheckList = parseElementList[:] + [self]\n', '        for e in self.exprs:\n', '            e.checkRecursion(subRecCheckList)\n', '\n', '\n', 'class ParseElementEnhance(ParserElement):\n', '    """Abstract subclass of :class:`ParserElement`, for combining and\n', '    post-processing parsed tokens.\n', '    """\n', '    def __init__(self, expr, savelist=False):\n', '        super(ParseElementEnhance, self).__init__(savelist)\n', '        if isinstance(expr, basestring):\n', '            if issubclass(self._literalStringClass, Token):\n', '                expr = self._literalStringClass(expr)\n', '            else:\n', '                expr = self._literalStringClass(Literal(expr))\n', '        self.expr = expr\n', '        self.strRepr = None\n', '        if expr is not None:\n', '            self.mayIndexError = expr.mayIndexError\n', '            self.mayReturnEmpty = expr.mayReturnEmpty\n', '            self.setWhitespaceChars(expr.whiteChars)\n', '            self.skipWhitespace = expr.skipWhitespace\n', '            self.saveAsList = expr.saveAsList\n', '            self.callPreparse = expr.callPreparse\n', '            self.ignoreExprs.extend(expr.ignoreExprs)\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if self.expr is not None:\n', '            return self.expr._parse(instring, loc, doActions, callPreParse=False)\n', '        else:\n', '            raise ParseException("", loc, self.errmsg, self)\n', '\n', '    def leaveWhitespace(self):\n', '        self.skipWhitespace = False\n', '        self.expr = self.expr.copy()\n', '        if self.expr is not None:\n', '            self.expr.leaveWhitespace()\n', '        return self\n', '\n', '    def ignore(self, other):\n', '        if isinstance(other, Suppress):\n', '            if other not in self.ignoreExprs:\n', '                super(ParseElementEnhance, self).ignore(other)\n', '                if self.expr is not None:\n', '                    self.expr.ignore(self.ignoreExprs[-1])\n', '        else:\n', '            super(ParseElementEnhance, self).ignore(other)\n', '            if self.expr is not None:\n', '                self.expr.ignore(self.ignoreExprs[-1])\n', '        return self\n', '\n', '    def streamline(self):\n', '        super(ParseElementEnhance, self).streamline()\n', '        if self.expr is not None:\n', '            self.expr.streamline()\n', '        return self\n', '\n', '    def checkRecursion(self, parseElementList):\n', '        if self in parseElementList:\n', '            raise RecursiveGrammarException(parseElementList + [self])\n', '        subRecCheckList = parseElementList[:] + [self]\n', '        if self.expr is not None:\n', '            self.expr.checkRecursion(subRecCheckList)\n', '\n', '    def validate(self, validateTrace=None):\n', '        if validateTrace is None:\n', '            validateTrace = []\n', '        tmp = validateTrace[:] + [self]\n', '        if self.expr is not None:\n', '            self.expr.validate(tmp)\n', '        self.checkRecursion([])\n', '\n', '    def __str__(self):\n', '        try:\n', '            return super(ParseElementEnhance, self).__str__()\n', '        except Exception:\n', '            pass\n', '\n', '        if self.strRepr is None and self.expr is not None:\n', '            self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr))\n', '        return self.strRepr\n', '\n', '\n', 'class FollowedBy(ParseElementEnhance):\n', '    """Lookahead matching of the given parse expression.\n', '    ``FollowedBy`` does *not* advance the parsing position within\n', '    the input string, it only verifies that the specified parse\n', '    expression matches at the current position.  ``FollowedBy``\n', '    always returns a null token list. If any results names are defined\n', '    in the lookahead expression, those *will* be returned for access by\n', '    name.\n', '\n', '    Example::\n', '\n', "        # use FollowedBy to match a label only if it is followed by a ':'\n", '        data_word = Word(alphas)\n', "        label = data_word + FollowedBy(':')\n", "        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))\n", '\n', '        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()\n', '\n', '    prints::\n', '\n', "        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]\n", '    """\n', '    def __init__(self, expr):\n', '        super(FollowedBy, self).__init__(expr)\n', '        self.mayReturnEmpty = True\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        # by using self._expr.parse and deleting the contents of the returned ParseResults list\n', '        # we keep any named results that were defined in the FollowedBy expression\n', '        _, ret = self.expr._parse(instring, loc, doActions=doActions)\n', '        del ret[:]\n', '\n', '        return loc, ret\n', '\n', '\n', 'class PrecededBy(ParseElementEnhance):\n', '    """Lookbehind matching of the given parse expression.\n', '    ``PrecededBy`` does not advance the parsing position within the\n', '    input string, it only verifies that the specified parse expression\n', '    matches prior to the current position.  ``PrecededBy`` always\n', '    returns a null token list, but if a results name is defined on the\n', '    given expression, it is returned.\n', '\n', '    Parameters:\n', '\n', '     - expr - expression that must match prior to the current parse\n', '       location\n', '     - retreat - (default= ``None``) - (int) maximum number of characters\n', '       to lookbehind prior to the current parse location\n', '\n', '    If the lookbehind expression is a string, Literal, Keyword, or\n', '    a Word or CharsNotIn with a specified exact or maximum length, then\n', '    the retreat parameter is not required. Otherwise, retreat must be\n', '    specified to give a maximum number of characters to look back from\n', '    the current parse position for a lookbehind match.\n', '\n', '    Example::\n', '\n', '        # VB-style variable names with type prefixes\n', '        int_var = PrecededBy("#") + pyparsing_common.identifier\n', '        str_var = PrecededBy("$") + pyparsing_common.identifier\n', '\n', '    """\n', '    def __init__(self, expr, retreat=None):\n', '        super(PrecededBy, self).__init__(expr)\n', '        self.expr = self.expr().leaveWhitespace()\n', '        self.mayReturnEmpty = True\n', '        self.mayIndexError = False\n', '        self.exact = False\n', '        if isinstance(expr, str):\n', '            retreat = len(expr)\n', '            self.exact = True\n', '        elif isinstance(expr, (Literal, Keyword)):\n', '            retreat = expr.matchLen\n', '            self.exact = True\n', '        elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:\n', '            retreat = expr.maxLen\n', '            self.exact = True\n', '        elif isinstance(expr, _PositionToken):\n', '            retreat = 0\n', '            self.exact = True\n', '        self.retreat = retreat\n', '        self.errmsg = "not preceded by " + str(expr)\n', '        self.skipWhitespace = False\n', '        self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))\n', '\n', '    def parseImpl(self, instring, loc=0, doActions=True):\n', '        if self.exact:\n', '            if loc < self.retreat:\n', '                raise ParseException(instring, loc, self.errmsg)\n', '            start = loc - self.retreat\n', '            _, ret = self.expr._parse(instring, start)\n', '        else:\n', '            # retreat specified a maximum lookbehind window, iterate\n', '            test_expr = self.expr + StringEnd()\n', '            instring_slice = instring[max(0, loc - self.retreat):loc]\n', '            last_expr = ParseException(instring, loc, self.errmsg)\n', '            for offset in range(1, min(loc, self.retreat + 1)+1):\n', '                try:\n', "                    # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))\n", '                    _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset)\n', '                except ParseBaseException as pbe:\n', '                    last_expr = pbe\n', '                else:\n', '                    break\n', '            else:\n', '                raise last_expr\n', '        return loc, ret\n', '\n', '\n', 'class NotAny(ParseElementEnhance):\n', '    """Lookahead to disallow matching with the given parse expression.\n', '    ``NotAny`` does *not* advance the parsing position within the\n', '    input string, it only verifies that the specified parse expression\n', '    does *not* match at the current position.  Also, ``NotAny`` does\n', '    *not* skip over leading whitespace. ``NotAny`` always returns\n', "    a null token list.  May be constructed using the '~' operator.\n", '\n', '    Example::\n', '\n', '        AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())\n', '\n', '        # take care not to mistake keywords for identifiers\n', '        ident = ~(AND | OR | NOT) + Word(alphas)\n', '        boolean_term = Optional(NOT) + ident\n', '\n', '        # very crude boolean expression - to support parenthesis groups and\n', '        # operation hierarchy, use infixNotation\n', '        boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)\n', '\n', '        # integers that are followed by "." are actually floats\n', '        integer = Word(nums) + ~Char(".")\n', '    """\n', '    def __init__(self, expr):\n', '        super(NotAny, self).__init__(expr)\n', '        # ~ self.leaveWhitespace()\n', "        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs\n", '        self.mayReturnEmpty = True\n', '        self.errmsg = "Found unwanted token, " + _ustr(self.expr)\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        if self.expr.canParseNext(instring, loc):\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '        return loc, []\n', '\n', '    def __str__(self):\n', '        if hasattr(self, "name"):\n', '            return self.name\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "~{" + _ustr(self.expr) + "}"\n', '\n', '        return self.strRepr\n', '\n', 'class _MultipleMatch(ParseElementEnhance):\n', '    def __init__(self, expr, stopOn=None):\n', '        super(_MultipleMatch, self).__init__(expr)\n', '        self.saveAsList = True\n', '        ender = stopOn\n', '        if isinstance(ender, basestring):\n', '            ender = self._literalStringClass(ender)\n', '        self.stopOn(ender)\n', '\n', '    def stopOn(self, ender):\n', '        if isinstance(ender, basestring):\n', '            ender = self._literalStringClass(ender)\n', '        self.not_ender = ~ender if ender is not None else None\n', '        return self\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        self_expr_parse = self.expr._parse\n', '        self_skip_ignorables = self._skipIgnorables\n', '        check_ender = self.not_ender is not None\n', '        if check_ender:\n', '            try_not_ender = self.not_ender.tryParse\n', '\n', '        # must be at least one (but first see if we are the stopOn sentinel;\n', '        # if so, fail)\n', '        if check_ender:\n', '            try_not_ender(instring, loc)\n', '        loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False)\n', '        try:\n', '            hasIgnoreExprs = (not not self.ignoreExprs)\n', '            while 1:\n', '                if check_ender:\n', '                    try_not_ender(instring, loc)\n', '                if hasIgnoreExprs:\n', '                    preloc = self_skip_ignorables(instring, loc)\n', '                else:\n', '                    preloc = loc\n', '                loc, tmptokens = self_expr_parse(instring, preloc, doActions)\n', '                if tmptokens or tmptokens.haskeys():\n', '                    tokens += tmptokens\n', '        except (ParseException, IndexError):\n', '            pass\n', '\n', '        return loc, tokens\n', '\n', '    def _setResultsName(self, name, listAllMatches=False):\n', '        if __diag__.warn_ungrouped_named_tokens_in_collection:\n', "            for e in [self.expr] + getattr(self.expr, 'exprs', []):\n", '                if isinstance(e, ParserElement) and e.resultsName:\n', '                    warnings.warn("{0}: setting results name {1!r} on {2} expression "\n', '                                  "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",\n', '                                                                                       name,\n', '                                                                                       type(self).__name__,\n', '                                                                                       e.resultsName),\n', '                                  stacklevel=3)\n', '\n', '        return super(_MultipleMatch, self)._setResultsName(name, listAllMatches)\n', '\n', '\n', 'class OneOrMore(_MultipleMatch):\n', '    """Repetition of one or more of the given expression.\n', '\n', '    Parameters:\n', '     - expr - expression that must match one or more times\n', '     - stopOn - (default= ``None``) - expression for a terminating sentinel\n', '          (only required if the sentinel would ordinarily match the repetition\n', '          expression)\n', '\n', '    Example::\n', '\n', '        data_word = Word(alphas)\n', "        label = data_word + FollowedBy(':')\n", "        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))\n", '\n', '        text = "shape: SQUARE posn: upper left color: BLACK"\n', "        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]\n", '\n', '        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data\n', "        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))\n", "        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]\n", '\n', '        # could also be written as\n', '        (attr_expr * (1,)).parseString(text).pprint()\n', '    """\n', '\n', '    def __str__(self):\n', '        if hasattr(self, "name"):\n', '            return self.name\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "{" + _ustr(self.expr) + "}..."\n', '\n', '        return self.strRepr\n', '\n', 'class ZeroOrMore(_MultipleMatch):\n', '    """Optional repetition of zero or more of the given expression.\n', '\n', '    Parameters:\n', '     - expr - expression that must match zero or more times\n', '     - stopOn - (default= ``None``) - expression for a terminating sentinel\n', '          (only required if the sentinel would ordinarily match the repetition\n', '          expression)\n', '\n', '    Example: similar to :class:`OneOrMore`\n', '    """\n', '    def __init__(self, expr, stopOn=None):\n', '        super(ZeroOrMore, self).__init__(expr, stopOn=stopOn)\n', '        self.mayReturnEmpty = True\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        try:\n', '            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)\n', '        except (ParseException, IndexError):\n', '            return loc, []\n', '\n', '    def __str__(self):\n', '        if hasattr(self, "name"):\n', '            return self.name\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "[" + _ustr(self.expr) + "]..."\n', '\n', '        return self.strRepr\n', '\n', '\n', 'class _NullToken(object):\n', '    def __bool__(self):\n', '        return False\n', '    __nonzero__ = __bool__\n', '    def __str__(self):\n', '        return ""\n', '\n', 'class Optional(ParseElementEnhance):\n', '    """Optional matching of the given expression.\n', '\n', '    Parameters:\n', '     - expr - expression that must match zero or more times\n', '     - default (optional) - value to be returned if the optional expression is not found.\n', '\n', '    Example::\n', '\n', '        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier\n', "        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))\n", "        zip.runTests('''\n", '            # traditional ZIP code\n', '            12345\n', '\n', '            # ZIP+4 form\n', '            12101-0001\n', '\n', '            # invalid ZIP\n', '            98765-\n', "            ''')\n", '\n', '    prints::\n', '\n', '        # traditional ZIP code\n', '        12345\n', "        ['12345']\n", '\n', '        # ZIP+4 form\n', '        12101-0001\n', "        ['12101-0001']\n", '\n', '        # invalid ZIP\n', '        98765-\n', '             ^\n', '        FAIL: Expected end of text (at char 5), (line:1, col:6)\n', '    """\n', '    __optionalNotMatched = _NullToken()\n', '\n', '    def __init__(self, expr, default=__optionalNotMatched):\n', '        super(Optional, self).__init__(expr, savelist=False)\n', '        self.saveAsList = self.expr.saveAsList\n', '        self.defaultValue = default\n', '        self.mayReturnEmpty = True\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        try:\n', '            loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)\n', '        except (ParseException, IndexError):\n', '            if self.defaultValue is not self.__optionalNotMatched:\n', '                if self.expr.resultsName:\n', '                    tokens = ParseResults([self.defaultValue])\n', '                    tokens[self.expr.resultsName] = self.defaultValue\n', '                else:\n', '                    tokens = [self.defaultValue]\n', '            else:\n', '                tokens = []\n', '        return loc, tokens\n', '\n', '    def __str__(self):\n', '        if hasattr(self, "name"):\n', '            return self.name\n', '\n', '        if self.strRepr is None:\n', '            self.strRepr = "[" + _ustr(self.expr) + "]"\n', '\n', '        return self.strRepr\n', '\n', 'class SkipTo(ParseElementEnhance):\n', '    """Token for skipping over all undefined text until the matched\n', '    expression is found.\n', '\n', '    Parameters:\n', '     - expr - target expression marking the end of the data to be skipped\n', '     - include - (default= ``False``) if True, the target expression is also parsed\n', '          (the skipped text and target expression are returned as a 2-element list).\n', '     - ignore - (default= ``None``) used to define grammars (typically quoted strings and\n', '          comments) that might contain false matches to the target expression\n', '     - failOn - (default= ``None``) define expressions that are not allowed to be\n', '          included in the skipped test; if found before the target expression is found,\n', '          the SkipTo is not a match\n', '\n', '    Example::\n', '\n', "        report = '''\n", '            Outstanding Issues Report - 1 Jan 2000\n', '\n', '               # | Severity | Description                               |  Days Open\n', '            -----+----------+-------------------------------------------+-----------\n', '             101 | Critical | Intermittent system crash                 |          6\n', "              94 | Cosmetic | Spelling error on Login ('log|n')         |         14\n", '              79 | Minor    | System slow when running too many reports |         47\n', "            '''\n", '        integer = Word(nums)\n', "        SEP = Suppress('|')\n", '        # use SkipTo to simply match everything up until the next SEP\n', "        # - ignore quoted strings, so that a '|' character inside a quoted string does not match\n", '        # - parse action will call token.strip() for each matched token, i.e., the description body\n', '        string_data = SkipTo(SEP, ignore=quotedString)\n', '        string_data.setParseAction(tokenMap(str.strip))\n', '        ticket_expr = (integer("issue_num") + SEP\n', '                      + string_data("sev") + SEP\n', '                      + string_data("desc") + SEP\n', '                      + integer("days_open"))\n', '\n', '        for tkt in ticket_expr.searchString(report):\n', '            print tkt.dump()\n', '\n', '    prints::\n', '\n', "        ['101', 'Critical', 'Intermittent system crash', '6']\n", '        - days_open: 6\n', '        - desc: Intermittent system crash\n', '        - issue_num: 101\n', '        - sev: Critical\n', '        [\'94\', \'Cosmetic\', "Spelling error on Login (\'log|n\')", \'14\']\n', '        - days_open: 14\n', "        - desc: Spelling error on Login ('log|n')\n", '        - issue_num: 94\n', '        - sev: Cosmetic\n', "        ['79', 'Minor', 'System slow when running too many reports', '47']\n", '        - days_open: 47\n', '        - desc: System slow when running too many reports\n', '        - issue_num: 79\n', '        - sev: Minor\n', '    """\n', '    def __init__(self, other, include=False, ignore=None, failOn=None):\n', '        super(SkipTo, self).__init__(other)\n', '        self.ignoreExpr = ignore\n', '        self.mayReturnEmpty = True\n', '        self.mayIndexError = False\n', '        self.includeMatch = include\n', '        self.saveAsList = False\n', '        if isinstance(failOn, basestring):\n', '            self.failOn = self._literalStringClass(failOn)\n', '        else:\n', '            self.failOn = failOn\n', '        self.errmsg = "No match found for " + _ustr(self.expr)\n', '\n', '    def parseImpl(self, instring, loc, doActions=True):\n', '        startloc = loc\n', '        instrlen = len(instring)\n', '        expr = self.expr\n', '        expr_parse = self.expr._parse\n', '        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None\n', '        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None\n', '\n', '        tmploc = loc\n', '        while tmploc <= instrlen:\n', '            if self_failOn_canParseNext is not None:\n', '                # break if failOn expression matches\n', '                if self_failOn_canParseNext(instring, tmploc):\n', '                    break\n', '\n', '            if self_ignoreExpr_tryParse is not None:\n', '                # advance past ignore expressions\n', '                while 1:\n', '                    try:\n', '                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)\n', '                    except ParseBaseException:\n', '                        break\n', '\n', '            try:\n', '                expr_parse(instring, tmploc, doActions=False, callPreParse=False)\n', '            except (ParseException, IndexError):\n', '                # no match, advance loc in string\n', '                tmploc += 1\n', '            else:\n', '                # matched skipto expr, done\n', '                break\n', '\n', '        else:\n', '            # ran off the end of the input string without matching skipto expr, fail\n', '            raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '        # build up return values\n', '        loc = tmploc\n', '        skiptext = instring[startloc:loc]\n', '        skipresult = ParseResults(skiptext)\n', '\n', '        if self.includeMatch:\n', '            loc, mat = expr_parse(instring, loc, doActions, callPreParse=False)\n', '            skipresult += mat\n', '\n', '        return loc, skipresult\n', '\n', 'class Forward(ParseElementEnhance):\n', '    """Forward declaration of an expression to be defined later -\n', '    used for recursive grammars, such as algebraic infix notation.\n', '    When the expression is known, it is assigned to the ``Forward``\n', "    variable using the '<<' operator.\n", '\n', '    Note: take care when assigning to ``Forward`` not to overlook\n', '    precedence of operators.\n', '\n', "    Specifically, '|' has a lower precedence than '<<', so that::\n", '\n', '        fwdExpr << a | b | c\n', '\n', '    will actually be evaluated as::\n', '\n', '        (fwdExpr << a) | b | c\n', '\n', '    thereby leaving b and c out as parseable alternatives.  It is recommended that you\n', '    explicitly group the values inserted into the ``Forward``::\n', '\n', '        fwdExpr << (a | b | c)\n', '\n', "    Converting to use the '<<=' operator instead will avoid this problem.\n", '\n', '    See :class:`ParseResults.pprint` for an example of a recursive\n', '    parser created using ``Forward``.\n', '    """\n', '    def __init__(self, other=None):\n', '        super(Forward, self).__init__(other, savelist=False)\n', '\n', '    def __lshift__(self, other):\n', '        if isinstance(other, basestring):\n', '            other = self._literalStringClass(other)\n', '        self.expr = other\n', '        self.strRepr = None\n', '        self.mayIndexError = self.expr.mayIndexError\n', '        self.mayReturnEmpty = self.expr.mayReturnEmpty\n', '        self.setWhitespaceChars(self.expr.whiteChars)\n', '        self.skipWhitespace = self.expr.skipWhitespace\n', '        self.saveAsList = self.expr.saveAsList\n', '        self.ignoreExprs.extend(self.expr.ignoreExprs)\n', '        return self\n', '\n', '    def __ilshift__(self, other):\n', '        return self << other\n', '\n', '    def leaveWhitespace(self):\n', '        self.skipWhitespace = False\n', '        return self\n', '\n', '    def streamline(self):\n', '        if not self.streamlined:\n', '            self.streamlined = True\n', '            if self.expr is not None:\n', '                self.expr.streamline()\n', '        return self\n', '\n', '    def validate(self, validateTrace=None):\n', '        if validateTrace is None:\n', '            validateTrace = []\n', '\n', '        if self not in validateTrace:\n', '            tmp = validateTrace[:] + [self]\n', '            if self.expr is not None:\n', '                self.expr.validate(tmp)\n', '        self.checkRecursion([])\n', '\n', '    def __str__(self):\n', '        if hasattr(self, "name"):\n', '            return self.name\n', '        if self.strRepr is not None:\n', '            return self.strRepr\n', '\n', '        # Avoid infinite recursion by setting a temporary strRepr\n', '        self.strRepr = ": ..."\n', '\n', '        # Use the string representation of main expression.\n', "        retString = '...'\n", '        try:\n', '            if self.expr is not None:\n', '                retString = _ustr(self.expr)[:1000]\n', '            else:\n', '                retString = "None"\n', '        finally:\n', '            self.strRepr = self.__class__.__name__ + ": " + retString\n', '        return self.strRepr\n', '\n', '    def copy(self):\n', '        if self.expr is not None:\n', '            return super(Forward, self).copy()\n', '        else:\n', '            ret = Forward()\n', '            ret <<= self\n', '            return ret\n', '\n', '    def _setResultsName(self, name, listAllMatches=False):\n', '        if __diag__.warn_name_set_on_empty_Forward:\n', '            if self.expr is None:\n', '                warnings.warn("{0}: setting results name {0!r} on {1} expression "\n', '                              "that has no contained expression".format("warn_name_set_on_empty_Forward",\n', '                                                                        name,\n', '                                                                        type(self).__name__),\n', '                              stacklevel=3)\n', '\n', '        return super(Forward, self)._setResultsName(name, listAllMatches)\n', '\n', 'class TokenConverter(ParseElementEnhance):\n', '    """\n', '    Abstract subclass of :class:`ParseExpression`, for converting parsed results.\n', '    """\n', '    def __init__(self, expr, savelist=False):\n', '        super(TokenConverter, self).__init__(expr)  # , savelist)\n', '        self.saveAsList = False\n', '\n', 'class Combine(TokenConverter):\n', '    """Converter to concatenate all matching tokens to a single string.\n', '    By default, the matching patterns must also be contiguous in the\n', '    input string; this can be disabled by specifying\n', "    ``'adjacent=False'`` in the constructor.\n", '\n', '    Example::\n', '\n', "        real = Word(nums) + '.' + Word(nums)\n", "        print(real.parseString('3.1416')) # -> ['3', '.', '1416']\n", '        # will also erroneously match the following\n', "        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']\n", '\n', "        real = Combine(Word(nums) + '.' + Word(nums))\n", "        print(real.parseString('3.1416')) # -> ['3.1416']\n", '        # no match when there are internal spaces\n', "        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)\n", '    """\n', '    def __init__(self, expr, joinString="", adjacent=True):\n', '        super(Combine, self).__init__(expr)\n', '        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself\n', '        if adjacent:\n', '            self.leaveWhitespace()\n', '        self.adjacent = adjacent\n', '        self.skipWhitespace = True\n', '        self.joinString = joinString\n', '        self.callPreparse = True\n', '\n', '    def ignore(self, other):\n', '        if self.adjacent:\n', '            ParserElement.ignore(self, other)\n', '        else:\n', '            super(Combine, self).ignore(other)\n', '        return self\n', '\n', '    def postParse(self, instring, loc, tokenlist):\n', '        retToks = tokenlist.copy()\n', '        del retToks[:]\n', '        retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults)\n', '\n', '        if self.resultsName and retToks.haskeys():\n', '            return [retToks]\n', '        else:\n', '            return retToks\n', '\n', 'class Group(TokenConverter):\n', '    """Converter to return the matched tokens as a list - useful for\n', '    returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.\n', '\n', '    Example::\n', '\n', '        ident = Word(alphas)\n', '        num = Word(nums)\n', '        term = ident | num\n', '        func = ident + Optional(delimitedList(term))\n', '        print(func.parseString("fn a, b, 100"))  # -> [\'fn\', \'a\', \'b\', \'100\']\n', '\n', '        func = ident + Group(Optional(delimitedList(term)))\n', '        print(func.parseString("fn a, b, 100"))  # -> [\'fn\', [\'a\', \'b\', \'100\']]\n', '    """\n', '    def __init__(self, expr):\n', '        super(Group, self).__init__(expr)\n', '        self.saveAsList = True\n', '\n', '    def postParse(self, instring, loc, tokenlist):\n', '        return [tokenlist]\n', '\n', 'class Dict(TokenConverter):\n', '    """Converter to return a repetitive expression as a list, but also\n', '    as a dictionary. Each element can also be referenced using the first\n', '    token in the expression as its key. Useful for tabular report\n', '    scraping when the first column can be used as a item key.\n', '\n', '    Example::\n', '\n', '        data_word = Word(alphas)\n', "        label = data_word + FollowedBy(':')\n", "        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))\n", '\n', '        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"\n', "        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))\n", '\n', '        # print attributes as plain groups\n', '        print(OneOrMore(attr_expr).parseString(text).dump())\n', '\n', '        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names\n', '        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)\n', '        print(result.dump())\n', '\n', '        # access named fields as dict entries, or output as dict\n', "        print(result['shape'])\n", '        print(result.asDict())\n', '\n', '    prints::\n', '\n', "        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']\n", "        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]\n", '        - color: light blue\n', '        - posn: upper left\n', '        - shape: SQUARE\n', '        - texture: burlap\n', '        SQUARE\n', "        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}\n", '\n', '    See more examples at :class:`ParseResults` of accessing fields by results name.\n', '    """\n', '    def __init__(self, expr):\n', '        super(Dict, self).__init__(expr)\n', '        self.saveAsList = True\n', '\n', '    def postParse(self, instring, loc, tokenlist):\n', '        for i, tok in enumerate(tokenlist):\n', '            if len(tok) == 0:\n', '                continue\n', '            ikey = tok[0]\n', '            if isinstance(ikey, int):\n', '                ikey = _ustr(tok[0]).strip()\n', '            if len(tok) == 1:\n', '                tokenlist[ikey] = _ParseResultsWithOffset("", i)\n', '            elif len(tok) == 2 and not isinstance(tok[1], ParseResults):\n', '                tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)\n', '            else:\n', '                dictvalue = tok.copy()  # ParseResults(i)\n', '                del dictvalue[0]\n', '                if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()):\n', '                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)\n', '                else:\n', '                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)\n', '\n', '        if self.resultsName:\n', '            return [tokenlist]\n', '        else:\n', '            return tokenlist\n', '\n', '\n', 'class Suppress(TokenConverter):\n', '    """Converter for ignoring the results of a parsed expression.\n', '\n', '    Example::\n', '\n', '        source = "a, b, c,d"\n', '        wd = Word(alphas)\n', "        wd_list1 = wd + ZeroOrMore(',' + wd)\n", '        print(wd_list1.parseString(source))\n', '\n', '        # often, delimiters that are useful during parsing are just in the\n', '        # way afterward - use Suppress to keep them out of the parsed output\n', "        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)\n", '        print(wd_list2.parseString(source))\n', '\n', '    prints::\n', '\n', "        ['a', ',', 'b', ',', 'c', ',', 'd']\n", "        ['a', 'b', 'c', 'd']\n", '\n', '    (See also :class:`delimitedList`.)\n', '    """\n', '    def postParse(self, instring, loc, tokenlist):\n', '        return []\n', '\n', '    def suppress(self):\n', '        return self\n', '\n', '\n', 'class OnlyOnce(object):\n', '    """Wrapper for parse actions, to ensure they are only called once.\n', '    """\n', '    def __init__(self, methodCall):\n', '        self.callable = _trim_arity(methodCall)\n', '        self.called = False\n', '    def __call__(self, s, l, t):\n', '        if not self.called:\n', '            results = self.callable(s, l, t)\n', '            self.called = True\n', '            return results\n', '        raise ParseException(s, l, "")\n', '    def reset(self):\n', '        self.called = False\n', '\n', 'def traceParseAction(f):\n', '    """Decorator for debugging parse actions.\n', '\n', '    When the parse action is called, this decorator will print\n', '    ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.\n', '    When the parse action completes, the decorator will print\n', '    ``"<<"`` followed by the returned value, or any exception that the parse action raised.\n', '\n', '    Example::\n', '\n', '        wd = Word(alphas)\n', '\n', '        @traceParseAction\n', '        def remove_duplicate_chars(tokens):\n', "            return ''.join(sorted(set(''.join(tokens))))\n", '\n', '        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)\n', '        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))\n', '\n', '    prints::\n', '\n', "        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))\n", "        <<leaving remove_duplicate_chars (ret: 'dfjkls')\n", "        ['dfjkls']\n", '    """\n', '    f = _trim_arity(f)\n', '    def z(*paArgs):\n', '        thisFunc = f.__name__\n', '        s, l, t = paArgs[-3:]\n', '        if len(paArgs) > 3:\n', "            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc\n", '        sys.stderr.write(">>entering %s(line: \'%s\', %d, %r)\\n" % (thisFunc, line(l, s), l, t))\n', '        try:\n', '            ret = f(*paArgs)\n', '        except Exception as exc:\n', '            sys.stderr.write("<<leaving %s (exception: %s)\\n" % (thisFunc, exc))\n', '            raise\n', '        sys.stderr.write("<<leaving %s (ret: %r)\\n" % (thisFunc, ret))\n', '        return ret\n', '    try:\n', '        z.__name__ = f.__name__\n', '    except AttributeError:\n', '        pass\n', '    return z\n', '\n', '#\n', '# global helpers\n', '#\n', 'def delimitedList(expr, delim=",", combine=False):\n', '    """Helper to define a delimited list of expressions - the delimiter\n', "    defaults to ','. By default, the list elements and delimiters can\n", '    have intervening whitespace, and comments, but this can be\n', '    overridden by passing ``combine=True`` in the constructor. If\n', '    ``combine`` is set to ``True``, the matching tokens are\n', '    returned as a single token string, with the delimiters included;\n', '    otherwise, the matching tokens are returned as a list of tokens,\n', '    with the delimiters suppressed.\n', '\n', '    Example::\n', '\n', '        delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> [\'aa\', \'bb\', \'cc\']\n', '        delimitedList(Word(hexnums), delim=\':\', combine=True).parseString("AA:BB:CC:DD:EE") # -> [\'AA:BB:CC:DD:EE\']\n', '    """\n', '    dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..."\n', '    if combine:\n', '        return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)\n', '    else:\n', '        return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)\n', '\n', 'def countedArray(expr, intExpr=None):\n', '    """Helper to define a counted list of expressions.\n', '\n', '    This helper defines a pattern of the form::\n', '\n', '        integer expr expr expr...\n', '\n', '    where the leading integer tells how many expr expressions follow.\n', '    The matched tokens returns the array of expr tokens as a list - the\n', '    leading count token is suppressed.\n', '\n', '    If ``intExpr`` is specified, it should be a pyparsing expression\n', '    that produces an integer value.\n', '\n', '    Example::\n', '\n', "        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']\n", '\n', '        # in this parser, the leading integer value is given in binary,\n', "        # '10' indicating that 2 values are in the array\n", "        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))\n", "        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']\n", '    """\n', '    arrayExpr = Forward()\n', '    def countFieldParseAction(s, l, t):\n', '        n = t[0]\n', '        arrayExpr << (n and Group(And([expr] * n)) or Group(empty))\n', '        return []\n', '    if intExpr is None:\n', '        intExpr = Word(nums).setParseAction(lambda t: int(t[0]))\n', '    else:\n', '        intExpr = intExpr.copy()\n', '    intExpr.setName("arrayLen")\n', '    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)\n', "    return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...')\n", '\n', 'def _flatten(L):\n', '    ret = []\n', '    for i in L:\n', '        if isinstance(i, list):\n', '            ret.extend(_flatten(i))\n', '        else:\n', '            ret.append(i)\n', '    return ret\n', '\n', 'def matchPreviousLiteral(expr):\n', '    """Helper to define an expression that is indirectly defined from\n', '    the tokens matched in a previous expression, that is, it looks for\n', "    a 'repeat' of a previous expression.  For example::\n", '\n', '        first = Word(nums)\n', '        second = matchPreviousLiteral(first)\n', '        matchExpr = first + ":" + second\n', '\n', '    will match ``"1:1"``, but not ``"1:2"``.  Because this\n', '    matches a previous literal, will also match the leading\n', '    ``"1:1"`` in ``"1:10"``. If this is not desired, use\n', '    :class:`matchPreviousExpr`. Do *not* use with packrat parsing\n', '    enabled.\n', '    """\n', '    rep = Forward()\n', '    def copyTokenToRepeater(s, l, t):\n', '        if t:\n', '            if len(t) == 1:\n', '                rep << t[0]\n', '            else:\n', '                # flatten t tokens\n', '                tflat = _flatten(t.asList())\n', '                rep << And(Literal(tt) for tt in tflat)\n', '        else:\n', '            rep << Empty()\n', '    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)\n', "    rep.setName('(prev) ' + _ustr(expr))\n", '    return rep\n', '\n', 'def matchPreviousExpr(expr):\n', '    """Helper to define an expression that is indirectly defined from\n', '    the tokens matched in a previous expression, that is, it looks for\n', "    a 'repeat' of a previous expression.  For example::\n", '\n', '        first = Word(nums)\n', '        second = matchPreviousExpr(first)\n', '        matchExpr = first + ":" + second\n', '\n', '    will match ``"1:1"``, but not ``"1:2"``.  Because this\n', '    matches by expressions, will *not* match the leading ``"1:1"``\n', '    in ``"1:10"``; the expressions are evaluated first, and then\n', '    compared, so ``"1"`` is compared with ``"10"``. Do *not* use\n', '    with packrat parsing enabled.\n', '    """\n', '    rep = Forward()\n', '    e2 = expr.copy()\n', '    rep <<= e2\n', '    def copyTokenToRepeater(s, l, t):\n', '        matchTokens = _flatten(t.asList())\n', '        def mustMatchTheseTokens(s, l, t):\n', '            theseTokens = _flatten(t.asList())\n', '            if theseTokens != matchTokens:\n', "                raise ParseException('', 0, '')\n", '        rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)\n', '    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)\n', "    rep.setName('(prev) ' + _ustr(expr))\n", '    return rep\n', '\n', 'def _escapeRegexRangeChars(s):\n', '    # ~  escape these chars: ^-[]\n', '    for c in r"\\^-[]":\n', '        s = s.replace(c, _bslash + c)\n', '    s = s.replace("\\n", r"\\n")\n', '    s = s.replace("\\t", r"\\t")\n', '    return _ustr(s)\n', '\n', 'def oneOf(strs, caseless=False, useRegex=True, asKeyword=False):\n', '    """Helper to quickly define a set of alternative Literals, and makes\n', '    sure to do longest-first testing when there is a conflict,\n', '    regardless of the input order, but returns\n', '    a :class:`MatchFirst` for best performance.\n', '\n', '    Parameters:\n', '\n', '     - strs - a string of space-delimited literals, or a collection of\n', '       string literals\n', '     - caseless - (default= ``False``) - treat all literals as\n', '       caseless\n', '     - useRegex - (default= ``True``) - as an optimization, will\n', '       generate a Regex object; otherwise, will generate\n', '       a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if\n', '       creating a :class:`Regex` raises an exception)\n', '     - asKeyword - (default=``False``) - enforce Keyword-style matching on the\n', '       generated expressions\n', '\n', '    Example::\n', '\n', '        comp_oper = oneOf("< = > <= >= !=")\n', '        var = Word(alphas)\n', '        number = Word(nums)\n', '        term = var | number\n', '        comparison_expr = term + comp_oper + term\n', '        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))\n', '\n', '    prints::\n', '\n', "        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]\n", '    """\n', '    if isinstance(caseless, basestring):\n', '        warnings.warn("More than one string argument passed to oneOf, pass "\n', '                      "choices as a list or space-delimited string", stacklevel=2)\n', '\n', '    if caseless:\n', '        isequal = (lambda a, b: a.upper() == b.upper())\n', '        masks = (lambda a, b: b.upper().startswith(a.upper()))\n', '        parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral\n', '    else:\n', '        isequal = (lambda a, b: a == b)\n', '        masks = (lambda a, b: b.startswith(a))\n', '        parseElementClass = Keyword if asKeyword else Literal\n', '\n', '    symbols = []\n', '    if isinstance(strs, basestring):\n', '        symbols = strs.split()\n', '    elif isinstance(strs, Iterable):\n', '        symbols = list(strs)\n', '    else:\n', '        warnings.warn("Invalid argument to oneOf, expected string or iterable",\n', '                      SyntaxWarning, stacklevel=2)\n', '    if not symbols:\n', '        return NoMatch()\n', '\n', '    if not asKeyword:\n', '        # if not producing keywords, need to reorder to take care to avoid masking\n', '        # longer choices with shorter ones\n', '        i = 0\n', '        while i < len(symbols) - 1:\n', '            cur = symbols[i]\n', '            for j, other in enumerate(symbols[i + 1:]):\n', '                if isequal(other, cur):\n', '                    del symbols[i + j + 1]\n', '                    break\n', '                elif masks(cur, other):\n', '                    del symbols[i + j + 1]\n', '                    symbols.insert(i, other)\n', '                    break\n', '            else:\n', '                i += 1\n', '\n', '    if not (caseless or asKeyword) and useRegex:\n', '        # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols]))\n', '        try:\n', '            if len(symbols) == len("".join(symbols)):\n', '                return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(\' | \'.join(symbols))\n', '            else:\n', '                return Regex("|".join(re.escape(sym) for sym in symbols)).setName(\' | \'.join(symbols))\n', '        except Exception:\n', '            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",\n', '                    SyntaxWarning, stacklevel=2)\n', '\n', '    # last resort, just use MatchFirst\n', "    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))\n", '\n', 'def dictOf(key, value):\n', '    """Helper to easily and clearly define a dictionary by specifying\n', '    the respective patterns for the key and value.  Takes care of\n', '    defining the :class:`Dict`, :class:`ZeroOrMore`, and\n', '    :class:`Group` tokens in the proper order.  The key pattern\n', '    can include delimiting markers or punctuation, as long as they are\n', '    suppressed, thereby leaving the significant key text.  The value\n', '    pattern can include named results, so that the :class:`Dict` results\n', '    can include named token fields.\n', '\n', '    Example::\n', '\n', '        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"\n', "        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))\n", '        print(OneOrMore(attr_expr).parseString(text).dump())\n', '\n', '        attr_label = label\n', "        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)\n", '\n', '        # similar to Dict, but simpler call format\n', '        result = dictOf(attr_label, attr_value).parseString(text)\n', '        print(result.dump())\n', "        print(result['shape'])\n", '        print(result.shape)  # object attribute access works too\n', '        print(result.asDict())\n', '\n', '    prints::\n', '\n', "        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]\n", '        - color: light blue\n', '        - posn: upper left\n', '        - shape: SQUARE\n', '        - texture: burlap\n', '        SQUARE\n', '        SQUARE\n', "        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}\n", '    """\n', '    return Dict(OneOrMore(Group(key + value)))\n', '\n', 'def originalTextFor(expr, asString=True):\n', '    """Helper to return the original, untokenized text for a given\n', '    expression.  Useful to restore the parsed fields of an HTML start\n', '    tag into the raw tag text itself, or to revert separate tokens with\n', '    intervening whitespace back to the original matching input text. By\n', '    default, returns astring containing the original parsed text.\n', '\n', '    If the optional ``asString`` argument is passed as\n', '    ``False``, then the return value is\n', '    a :class:`ParseResults` containing any results names that\n', '    were originally matched, and a single token containing the original\n', '    matched text from the input string.  So if the expression passed to\n', '    :class:`originalTextFor` contains expressions with defined\n', '    results names, you must set ``asString`` to ``False`` if you\n', '    want to preserve those results name values.\n', '\n', '    Example::\n', '\n', '        src = "this is test <b> bold <i>text</i> </b> normal text "\n', '        for tag in ("b", "i"):\n', '            opener, closer = makeHTMLTags(tag)\n', '            patt = originalTextFor(opener + SkipTo(closer) + closer)\n', '            print(patt.searchString(src)[0])\n', '\n', '    prints::\n', '\n', "        ['<b> bold <i>text</i> </b>']\n", "        ['<i>text</i>']\n", '    """\n', '    locMarker = Empty().setParseAction(lambda s, loc, t: loc)\n', '    endlocMarker = locMarker.copy()\n', '    endlocMarker.callPreparse = False\n', '    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")\n', '    if asString:\n', '        extractText = lambda s, l, t: s[t._original_start: t._original_end]\n', '    else:\n', '        def extractText(s, l, t):\n', "            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]\n", '    matchExpr.setParseAction(extractText)\n', '    matchExpr.ignoreExprs = expr.ignoreExprs\n', '    return matchExpr\n', '\n', 'def ungroup(expr):\n', '    """Helper to undo pyparsing\'s default grouping of And expressions,\n', '    even if all but one are non-empty.\n', '    """\n', '    return TokenConverter(expr).addParseAction(lambda t: t[0])\n', '\n', 'def locatedExpr(expr):\n', '    """Helper to decorate a returned token with its starting and ending\n', '    locations in the input string.\n', '\n', '    This helper adds the following results names:\n', '\n', '     - locn_start = location where matched expression begins\n', '     - locn_end = location where matched expression ends\n', '     - value = the actual parsed results\n', '\n', '    Be careful if the input text contains ``<TAB>`` characters, you\n', '    may want to call :class:`ParserElement.parseWithTabs`\n', '\n', '    Example::\n', '\n', '        wd = Word(alphas)\n', '        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):\n', '            print(match)\n', '\n', '    prints::\n', '\n', "        [[0, 'ljsdf', 5]]\n", "        [[8, 'lksdjjf', 15]]\n", "        [[18, 'lkkjj', 23]]\n", '    """\n', '    locator = Empty().setParseAction(lambda s, l, t: l)\n', '    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))\n', '\n', '\n', '# convenience constants for positional expressions\n', 'empty       = Empty().setName("empty")\n', 'lineStart   = LineStart().setName("lineStart")\n', 'lineEnd     = LineEnd().setName("lineEnd")\n', 'stringStart = StringStart().setName("stringStart")\n', 'stringEnd   = StringEnd().setName("stringEnd")\n', '\n', '_escapedPunc = Word(_bslash, r"\\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1])\n', '_escapedHexChar = Regex(r"\\\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r\'\\0x\'), 16)))\n', '_escapedOctChar = Regex(r"\\\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8)))\n', "_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\\]', exact=1)\n", '_charRange = Group(_singleChar + Suppress("-") + _singleChar)\n', '_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]"\n', '\n', 'def srange(s):\n', '    r"""Helper to easily define string ranges for use in Word\n', "    construction. Borrows syntax from regexp '[]' string range\n", '    definitions::\n', '\n', '        srange("[0-9]")   -> "0123456789"\n', '        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"\n', '        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"\n', '\n', "    The input string must be enclosed in []'s, and the returned string\n", '    is the expanded character set joined into a single string. The\n', "    values enclosed in the []'s may be:\n", '\n', '     - a single character\n', '     - an escaped character with a leading backslash (such as ``\\-``\n', '       or ``\\]``)\n', "     - an escaped hex character with a leading ``'\\x'``\n", "       (``\\x21``, which is a ``'!'`` character) (``\\0x##``\n", '       is also supported for backwards compatibility)\n', "     - an escaped octal character with a leading ``'\\0'``\n", "       (``\\041``, which is a ``'!'`` character)\n", "     - a range of any of the above, separated by a dash (``'a-z'``,\n", '       etc.)\n', "     - any combination of the above (``'aeiouy'``,\n", "       ``'a-zA-Z0-9_


``, etc.)\n", '    """\n', "    _expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1))\n", '    try:\n', '        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)\n', '    except Exception:\n', '        return ""\n', '\n', 'def matchOnlyAtCol(n):\n', '    """Helper method for defining parse actions that require matching at\n', '    a specific column in the input text.\n', '    """\n', '    def verifyCol(strg, locn, toks):\n', '        if col(locn, strg) != n:\n', '            raise ParseException(strg, locn, "matched token not at column %d" % n)\n', '    return verifyCol\n', '\n', 'def replaceWith(replStr):\n', '    """Helper method for common parse actions that simply return\n', '    a literal value.  Especially useful when used with\n', '    :class:`transformString<ParserElement.transformString>` ().\n', '\n', '    Example::\n', '\n', '        num = Word(nums).setParseAction(lambda toks: int(toks[0]))\n', '        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))\n', '        term = na | num\n', '\n', '        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]\n', '    """\n', '    return lambda s, l, t: [replStr]\n', '\n', 'def removeQuotes(s, l, t):\n', '    """Helper parse action for removing quotation marks from parsed\n', '    quoted strings.\n', '\n', '    Example::\n', '\n', '        # by default, quotation marks are included in parsed results\n', '        quotedString.parseString("\'Now is the Winter of our Discontent\'") # -> ["\'Now is the Winter of our Discontent\'"]\n', '\n', '        # use removeQuotes to strip quotation marks from parsed results\n', '        quotedString.setParseAction(removeQuotes)\n', '        quotedString.parseString("\'Now is the Winter of our Discontent\'") # -> ["Now is the Winter of our Discontent"]\n', '    """\n', '    return t[0][1:-1]\n', '\n', 'def tokenMap(func, *args):\n', '    """Helper to define a parse action by mapping a function to all\n', '    elements of a ParseResults list. If any additional args are passed,\n', '    they are forwarded to the given function as additional arguments\n', '    after the token, as in\n', '    ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``,\n', '    which will convert the parsed data to an integer using base 16.\n', '\n', '    Example (compare the last to example in :class:`ParserElement.transformString`::\n', '\n', '        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))\n', "        hex_ints.runTests('''\n", '            00 11 22 aa FF 0a 0d 1a\n', "            ''')\n", '\n', '        upperword = Word(alphas).setParseAction(tokenMap(str.upper))\n', "        OneOrMore(upperword).runTests('''\n", '            my kingdom for a horse\n', "            ''')\n", '\n', '        wd = Word(alphas).setParseAction(tokenMap(str.title))\n', "        OneOrMore(wd).setParseAction(' '.join).runTests('''\n", '            now is the winter of our discontent made glorious summer by this sun of york\n', "            ''')\n", '\n', '    prints::\n', '\n', '        00 11 22 aa FF 0a 0d 1a\n', '        [0, 17, 34, 170, 255, 10, 13, 26]\n', '\n', '        my kingdom for a horse\n', "        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']\n", '\n', '        now is the winter of our discontent made glorious summer by this sun of york\n', "        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']\n", '    """\n', '    def pa(s, l, t):\n', '        return [func(tokn, *args) for tokn in t]\n', '\n', '    try:\n', "        func_name = getattr(func, '__name__',\n", "                            getattr(func, '__class__').__name__)\n", '    except Exception:\n', '        func_name = str(func)\n', '    pa.__name__ = func_name\n', '\n', '    return pa\n', '\n', 'upcaseTokens = tokenMap(lambda t: _ustr(t).upper())\n', '"""(Deprecated) Helper parse action to convert tokens to upper case.\n', 'Deprecated in favor of :class:`pyparsing_common.upcaseTokens`"""\n', '\n', 'downcaseTokens = tokenMap(lambda t: _ustr(t).lower())\n', '"""(Deprecated) Helper parse action to convert tokens to lower case.\n', 'Deprecated in favor of :class:`pyparsing_common.downcaseTokens`"""\n', '\n', 'def _makeTags(tagStr, xml,\n', '              suppress_LT=Suppress("<"),\n', '              suppress_GT=Suppress(">")):\n', '    """Internal helper to construct opening and closing tag expressions, given a tag name"""\n', '    if isinstance(tagStr, basestring):\n', '        resname = tagStr\n', '        tagStr = Keyword(tagStr, caseless=not xml)\n', '    else:\n', '        resname = tagStr.name\n', '\n', '    tagAttrName = Word(alphas, alphanums + "_-:")\n', '    if xml:\n', '        tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)\n', '        openTag = (suppress_LT\n', '                   + tagStr("tag")\n', '                   + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))\n', '                   + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == \'/\')\n', '                   + suppress_GT)\n', '    else:\n', '        tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">")\n', '        openTag = (suppress_LT\n', '                   + tagStr("tag")\n', '                   + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens)\n', '                                           + Optional(Suppress("=") + tagAttrValue))))\n', '                   + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == \'/\')\n', '                   + suppress_GT)\n', '    closeTag = Combine(_L("</") + tagStr + ">", adjacent=False)\n', '\n', '    openTag.setName("<%s>" % resname)\n', '    # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels\n', '    openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy()))\n', '    closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("</%s>" % resname)\n', '    openTag.tag = resname\n', '    closeTag.tag = resname\n', '    openTag.tag_body = SkipTo(closeTag())\n', '    return openTag, closeTag\n', '\n', 'def makeHTMLTags(tagStr):\n', '    """Helper to construct opening and closing tag expressions for HTML,\n', '    given a tag name. Matches tags in either upper or lower case,\n', '    attributes with namespaces and with quoted or unquoted values.\n', '\n', '    Example::\n', '\n', '        text = \'<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>\'\n', '        # makeHTMLTags returns pyparsing expressions for the opening and\n', '        # closing tags as a 2-tuple\n', '        a, a_end = makeHTMLTags("A")\n', '        link_expr = a + SkipTo(a_end)("link_text") + a_end\n', '\n', '        for link in link_expr.searchString(text):\n', '            # attributes in the <A> tag (like "href" shown here) are\n', '            # also accessible as named results\n', "            print(link.link_text, '->', link.href)\n", '\n', '    prints::\n', '\n', '        pyparsing -> https://github.com/pyparsing/pyparsing/wiki\n', '    """\n', '    return _makeTags(tagStr, False)\n', '\n', 'def makeXMLTags(tagStr):\n', '    """Helper to construct opening and closing tag expressions for XML,\n', '    given a tag name. Matches tags only in the given upper/lower case.\n', '\n', '    Example: similar to :class:`makeHTMLTags`\n', '    """\n', '    return _makeTags(tagStr, True)\n', '\n', 'def withAttribute(*args, **attrDict):\n', '    """Helper to create a validating parse action to be used with start\n', '    tags created with :class:`makeXMLTags` or\n', '    :class:`makeHTMLTags`. Use ``withAttribute`` to qualify\n', '    a starting tag with a required attribute value, to avoid false\n', '    matches on common tags such as ``<TD>`` or ``<DIV>``.\n', '\n', '    Call ``withAttribute`` with a series of attribute names and\n', '    values. Specify the list of filter attributes names and values as:\n', '\n', '     - keyword arguments, as in ``(align="right")``, or\n', '     - as an explicit dict with ``**`` operator, when an attribute\n', '       name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``\n', '     - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``\n', '\n', '    For attribute names with a namespace prefix, you must use the second\n', '    form.  Attribute names are matched insensitive to upper/lower case.\n', '\n', '    If just testing for ``class`` (with or without a namespace), use\n', '    :class:`withClass`.\n', '\n', '    To verify that the attribute exists, but without specifying a value,\n', '    pass ``withAttribute.ANY_VALUE`` as the value.\n', '\n', '    Example::\n', '\n', "        html = '''\n", '            <div>\n', '            Some text\n', '            <div type="grid">1 4 0 1 0</div>\n', '            <div type="graph">1,3 2,3 1,1</div>\n', '            <div>this has no type</div>\n', '            </div>\n', '\n', "        '''\n", '        div,div_end = makeHTMLTags("div")\n', '\n', '        # only match div tag having a type attribute with value "grid"\n', '        div_grid = div().setParseAction(withAttribute(type="grid"))\n', '        grid_expr = div_grid + SkipTo(div | div_end)("body")\n', '        for grid_header in grid_expr.searchString(html):\n', '            print(grid_header.body)\n', '\n', '        # construct a match with any div tag having a type attribute, regardless of the value\n', '        div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))\n', '        div_expr = div_any_type + SkipTo(div | div_end)("body")\n', '        for div_header in div_expr.searchString(html):\n', '            print(div_header.body)\n', '\n', '    prints::\n', '\n', '        1 4 0 1 0\n', '\n', '        1 4 0 1 0\n', '        1,3 2,3 1,1\n', '    """\n', '    if args:\n', '        attrs = args[:]\n', '    else:\n', '        attrs = attrDict.items()\n', '    attrs = [(k, v) for k, v in attrs]\n', '    def pa(s, l, tokens):\n', '        for attrName, attrValue in attrs:\n', '            if attrName not in tokens:\n', '                raise ParseException(s, l, "no matching attribute " + attrName)\n', '            if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:\n', '                raise ParseException(s, l, "attribute \'%s\' has value \'%s\', must be \'%s\'" %\n', '                                            (attrName, tokens[attrName], attrValue))\n', '    return pa\n', 'withAttribute.ANY_VALUE = object()\n', '\n', "def withClass(classname, namespace=''):\n", '    """Simplified version of :class:`withAttribute` when\n', '    matching on a div class - made difficult because ``class`` is\n', '    a reserved word in Python.\n', '\n', '    Example::\n', '\n', "        html = '''\n", '            <div>\n', '            Some text\n', '            <div class="grid">1 4 0 1 0</div>\n', '            <div class="graph">1,3 2,3 1,1</div>\n', '            <div>this &lt;div&gt; has no class</div>\n', '            </div>\n', '\n', "        '''\n", '        div,div_end = makeHTMLTags("div")\n', '        div_grid = div().setParseAction(withClass("grid"))\n', '\n', '        grid_expr = div_grid + SkipTo(div | div_end)("body")\n', '        for grid_header in grid_expr.searchString(html):\n', '            print(grid_header.body)\n', '\n', '        div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))\n', '        div_expr = div_any_type + SkipTo(div | div_end)("body")\n', '        for div_header in div_expr.searchString(html):\n', '            print(div_header.body)\n', '\n', '    prints::\n', '\n', '        1 4 0 1 0\n', '\n', '        1 4 0 1 0\n', '        1,3 2,3 1,1\n', '    """\n', '    classattr = "%s:class" % namespace if namespace else "class"\n', '    return withAttribute(**{classattr: classname})\n', '\n', 'opAssoc = SimpleNamespace()\n', 'opAssoc.LEFT = object()\n', 'opAssoc.RIGHT = object()\n', '\n', "def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')):\n", '    """Helper method for constructing grammars of expressions made up of\n', '    operators working in a precedence hierarchy.  Operators may be unary\n', '    or binary, left- or right-associative.  Parse actions can also be\n', '    attached to operator expressions. The generated parser will also\n', '    recognize the use of parentheses to override operator precedences\n', '    (see example below).\n', '\n', '    Note: if you define a deep operator list, you may see performance\n', '    issues when using infixNotation. See\n', '    :class:`ParserElement.enablePackrat` for a mechanism to potentially\n', '    improve your parser performance.\n', '\n', '    Parameters:\n', '     - baseExpr - expression representing the most basic element for the\n', '       nested\n', '     - opList - list of tuples, one for each operator precedence level\n', '       in the expression grammar; each tuple is of the form ``(opExpr,\n', '       numTerms, rightLeftAssoc, parseAction)``, where:\n', '\n', '       - opExpr is the pyparsing expression for the operator; may also\n', '         be a string, which will be converted to a Literal; if numTerms\n', '         is 3, opExpr is a tuple of two expressions, for the two\n', '         operators separating the 3 terms\n', '       - numTerms is the number of terms for this operator (must be 1,\n', '         2, or 3)\n', '       - rightLeftAssoc is the indicator whether the operator is right\n', '         or left associative, using the pyparsing-defined constants\n', '         ``opAssoc.RIGHT`` and ``opAssoc.LEFT``.\n', '       - parseAction is the parse action to be associated with\n', '         expressions matching this operator expression (the parse action\n', '         tuple member may be omitted); if the parse action is passed\n', '         a tuple or list of functions, this is equivalent to calling\n', '         ``setParseAction(*fn)``\n', '         (:class:`ParserElement.setParseAction`)\n', '     - lpar - expression for matching left-parentheses\n', "       (default= ``Suppress('(')``)\n", '     - rpar - expression for matching right-parentheses\n', "       (default= ``Suppress(')')``)\n", '\n', '    Example::\n', '\n', '        # simple example of four-function arithmetic with ints and\n', '        # variable names\n', '        integer = pyparsing_common.signed_integer\n', '        varname = pyparsing_common.identifier\n', '\n', '        arith_expr = infixNotation(integer | varname,\n', '            [\n', "            ('-', 1, opAssoc.RIGHT),\n", "            (oneOf('* /'), 2, opAssoc.LEFT),\n", "            (oneOf('+ -'), 2, opAssoc.LEFT),\n", '            ])\n', '\n', "        arith_expr.runTests('''\n", '            5+3*6\n', '            (5+3)*6\n', '            -2--11\n', "            ''', fullDump=False)\n", '\n', '    prints::\n', '\n', '        5+3*6\n', "        [[5, '+', [3, '*', 6]]]\n", '\n', '        (5+3)*6\n', "        [[[5, '+', 3], '*', 6]]\n", '\n', '        -2--11\n', "        [[['-', 2], '-', ['-', 11]]]\n", '    """\n', '    # captive version of FollowedBy that does not do parse actions or capture results names\n', '    class _FB(FollowedBy):\n', '        def parseImpl(self, instring, loc, doActions=True):\n', '            self.expr.tryParse(instring, loc)\n', '            return loc, []\n', '\n', '    ret = Forward()\n', '    lastExpr = baseExpr | (lpar + ret + rpar)\n', '    for i, operDef in enumerate(opList):\n', '        opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4]\n', '        termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr\n', '        if arity == 3:\n', '            if opExpr is None or len(opExpr) != 2:\n', '                raise ValueError(\n', '                    "if numterms=3, opExpr must be a tuple or list of two expressions")\n', '            opExpr1, opExpr2 = opExpr\n', '        thisExpr = Forward().setName(termName)\n', '        if rightLeftAssoc == opAssoc.LEFT:\n', '            if arity == 1:\n', '                matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr))\n', '            elif arity == 2:\n', '                if opExpr is not None:\n', '                    matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr))\n', '                else:\n', '                    matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr))\n', '            elif arity == 3:\n', '                matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)\n', '                             + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)))\n', '            else:\n', '                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")\n', '        elif rightLeftAssoc == opAssoc.RIGHT:\n', '            if arity == 1:\n', '                # try to avoid LR with this extra test\n', '                if not isinstance(opExpr, Optional):\n', '                    opExpr = Optional(opExpr)\n', '                matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)\n', '            elif arity == 2:\n', '                if opExpr is not None:\n', '                    matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr))\n', '                else:\n', '                    matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr))\n', '            elif arity == 3:\n', '                matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)\n', '                             + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr))\n', '            else:\n', '                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")\n', '        else:\n', '            raise ValueError("operator must indicate right or left associativity")\n', '        if pa:\n', '            if isinstance(pa, (tuple, list)):\n', '                matchExpr.setParseAction(*pa)\n', '            else:\n', '                matchExpr.setParseAction(pa)\n', '        thisExpr <<= (matchExpr.setName(termName) | lastExpr)\n', '        lastExpr = thisExpr\n', '    ret <<= lastExpr\n', '    return ret\n', '\n', 'operatorPrecedence = infixNotation\n', '"""(Deprecated) Former name of :class:`infixNotation`, will be\n', 'dropped in a future release."""\n', '\n', 'dblQuotedString = Combine(Regex(r\'"(?:[^"\\n\\r\\\\]|(?:"")|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*\') + \'"\').setName("string enclosed in double quotes")\n', 'sglQuotedString = Combine(Regex(r"\'(?:[^\'\\n\\r\\\\]|(?:\'\')|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*") + "\'").setName("string enclosed in single quotes")\n', 'quotedString = Combine(Regex(r\'"(?:[^"\\n\\r\\\\]|(?:"")|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*\') + \'"\'\n', '                       | Regex(r"\'(?:[^\'\\n\\r\\\\]|(?:\'\')|(?:\\\\(?:[^x]|x[0-9a-fA-F]+)))*") + "\'").setName("quotedString using single or double quotes")\n', 'unicodeString = Combine(_L(\'u\') + quotedString.copy()).setName("unicode string literal")\n', '\n', 'def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):\n', '    """Helper method for defining nested lists enclosed in opening and\n', '    closing delimiters ("(" and ")" are the default).\n', '\n', '    Parameters:\n', '     - opener - opening character for a nested list\n', '       (default= ``"("``); can also be a pyparsing expression\n', '     - closer - closing character for a nested list\n', '       (default= ``")"``); can also be a pyparsing expression\n', '     - content - expression for items within the nested lists\n', '       (default= ``None``)\n', '     - ignoreExpr - expression for ignoring opening and closing\n', '       delimiters (default= :class:`quotedString`)\n', '\n', '    If an expression is not provided for the content argument, the\n', '    nested expression will capture all whitespace-delimited content\n', '    between delimiters as a list of separate values.\n', '\n', '    Use the ``ignoreExpr`` argument to define expressions that may\n', '    contain opening or closing characters that should not be treated as\n', '    opening or closing characters for nesting, such as quotedString or\n', '    a comment expression.  Specify multiple expressions using an\n', '    :class:`Or` or :class:`MatchFirst`. The default is\n', '    :class:`quotedString`, but if no expressions are to be ignored, then\n', '    pass ``None`` for this argument.\n', '\n', '    Example::\n', '\n', '        data_type = oneOf("void int short long char float double")\n', "        decl_data_type = Combine(data_type + Optional(Word('*')))\n", "        ident = Word(alphas+'_', alphanums+'_')\n", '        number = pyparsing_common.number\n', '        arg = Group(decl_data_type + ident)\n', '        LPAR, RPAR = map(Suppress, "()")\n', '\n', "        code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))\n", '\n', '        c_function = (decl_data_type("type")\n', '                      + ident("name")\n', '                      + LPAR + Optional(delimitedList(arg), [])("args") + RPAR\n', '                      + code_body("body"))\n', '        c_function.ignore(cStyleComment)\n', '\n', "        source_code = '''\n", '            int is_odd(int x) {\n', '                return (x%2);\n', '            }\n', '\n', '            int dec_to_hex(char hchar) {\n', "                if (hchar >= '0' && hchar <= '9') {\n", "                    return (ord(hchar)-ord('0'));\n", '                } else {\n', "                    return (10+ord(hchar)-ord('A'));\n", '                }\n', '            }\n', "        '''\n", '        for func in c_function.searchString(source_code):\n', '            print("%(name)s (%(type)s) args: %(args)s" % func)\n', '\n', '\n', '    prints::\n', '\n', "        is_odd (int) args: [['int', 'x']]\n", "        dec_to_hex (int) args: [['char', 'hchar']]\n", '    """\n', '    if opener == closer:\n', '        raise ValueError("opening and closing strings cannot be the same")\n', '    if content is None:\n', '        if isinstance(opener, basestring) and isinstance(closer, basestring):\n', '            if len(opener) == 1 and len(closer) == 1:\n', '                if ignoreExpr is not None:\n', '                    content = (Combine(OneOrMore(~ignoreExpr\n', '                                                 + CharsNotIn(opener\n', '                                                              + closer\n', '                                                              + ParserElement.DEFAULT_WHITE_CHARS, exact=1)\n', '                                                 )\n', '                                       ).setParseAction(lambda t: t[0].strip()))\n', '                else:\n', '                    content = (empty.copy() + CharsNotIn(opener\n', '                                                         + closer\n', '                                                         + ParserElement.DEFAULT_WHITE_CHARS\n', '                                                         ).setParseAction(lambda t: t[0].strip()))\n', '            else:\n', '                if ignoreExpr is not None:\n', '                    content = (Combine(OneOrMore(~ignoreExpr\n', '                                                 + ~Literal(opener)\n', '                                                 + ~Literal(closer)\n', '                                                 + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1))\n', '                                       ).setParseAction(lambda t: t[0].strip()))\n', '                else:\n', '                    content = (Combine(OneOrMore(~Literal(opener)\n', '                                                 + ~Literal(closer)\n', '                                                 + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1))\n', '                                       ).setParseAction(lambda t: t[0].strip()))\n', '        else:\n', '            raise ValueError("opening and closing arguments must be strings if no content expression is given")\n', '    ret = Forward()\n', '    if ignoreExpr is not None:\n', '        ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer))\n', '    else:\n', '        ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content)  + Suppress(closer))\n', "    ret.setName('nested %s%s expression' % (opener, closer))\n", '    return ret\n', '\n', 'def indentedBlock(blockStatementExpr, indentStack, indent=True):\n', '    """Helper method for defining space-delimited indentation blocks,\n', '    such as those used to define block statements in Python source code.\n', '\n', '    Parameters:\n', '\n', '     - blockStatementExpr - expression defining syntax of statement that\n', '       is repeated within the indented block\n', '     - indentStack - list created by caller to manage indentation stack\n', '       (multiple statementWithIndentedBlock expressions within a single\n', '       grammar should share a common indentStack)\n', '     - indent - boolean indicating whether block must be indented beyond\n', '       the current level; set to False for block of left-most\n', '       statements (default= ``True``)\n', '\n', '    A valid block must contain at least one ``blockStatement``.\n', '\n', '    Example::\n', '\n', "        data = '''\n", '        def A(z):\n', '          A1\n', '          B = 100\n', '          G = A2\n', '          A2\n', '          A3\n', '        B\n', '        def BB(a,b,c):\n', '          BB1\n', '          def BBA():\n', '            bba1\n', '            bba2\n', '            bba3\n', '        C\n', '        D\n', '        def spam(x,y):\n', '             def eggs(z):\n', '                 pass\n', "        '''\n", '\n', '\n', '        indentStack = [1]\n', '        stmt = Forward()\n', '\n', '        identifier = Word(alphas, alphanums)\n', '        funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":")\n', '        func_body = indentedBlock(stmt, indentStack)\n', '        funcDef = Group(funcDecl + func_body)\n', '\n', '        rvalue = Forward()\n', '        funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")\n', '        rvalue << (funcCall | identifier | Word(nums))\n', '        assignment = Group(identifier + "=" + rvalue)\n', '        stmt << (funcDef | assignment | identifier)\n', '\n', '        module_body = OneOrMore(stmt)\n', '\n', '        parseTree = module_body.parseString(data)\n', '        parseTree.pprint()\n', '\n', '    prints::\n', '\n', "        [['def',\n", "          'A',\n", "          ['(', 'z', ')'],\n", "          ':',\n", "          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],\n", "         'B',\n", "         ['def',\n", "          'BB',\n", "          ['(', 'a', 'b', 'c', ')'],\n", "          ':',\n", "          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],\n", "         'C',\n", "         'D',\n", "         ['def',\n", "          'spam',\n", "          ['(', 'x', 'y', ')'],\n", "          ':',\n", "          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]\n", '    """\n', '    backup_stack = indentStack[:]\n', '\n', '    def reset_stack():\n', '        indentStack[:] = backup_stack\n', '\n', '    def checkPeerIndent(s, l, t):\n', '        if l >= len(s): return\n', '        curCol = col(l, s)\n', '        if curCol != indentStack[-1]:\n', '            if curCol > indentStack[-1]:\n', '                raise ParseException(s, l, "illegal nesting")\n', '            raise ParseException(s, l, "not a peer entry")\n', '\n', '    def checkSubIndent(s, l, t):\n', '        curCol = col(l, s)\n', '        if curCol > indentStack[-1]:\n', '            indentStack.append(curCol)\n', '        else:\n', '            raise ParseException(s, l, "not a subentry")\n', '\n', '    def checkUnindent(s, l, t):\n', '        if l >= len(s): return\n', '        curCol = col(l, s)\n', '        if not(indentStack and curCol in indentStack):\n', '            raise ParseException(s, l, "not an unindent")\n', '        if curCol < indentStack[-1]:\n', '            indentStack.pop()\n', '\n', '    NL = OneOrMore(LineEnd().setWhitespaceChars("\\t ").suppress(), stopOn=StringEnd())\n', "    INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')\n", "    PEER   = Empty().setParseAction(checkPeerIndent).setName('')\n", "    UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')\n", '    if indent:\n', '        smExpr = Group(Optional(NL)\n', '                       + INDENT\n', '                       + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd())\n', '                       + UNDENT)\n', '    else:\n', '        smExpr = Group(Optional(NL)\n', '                       + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd())\n', '                       + UNDENT)\n', '    smExpr.setFailAction(lambda a, b, c, d: reset_stack())\n', '    blockStatementExpr.ignore(_bslash + LineEnd())\n', "    return smExpr.setName('indented block')\n", '\n', 'alphas8bit = srange(r"[\\0xc0-\\0xd6\\0xd8-\\0xf6\\0xf8-\\0xff]")\n', 'punc8bit = srange(r"[\\0xa1-\\0xbf\\0xd7\\0xf7]")\n', '\n', 'anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName(\'any tag\'))\n', '_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), \'><& "\\\'\'))\n', 'commonHTMLEntity = Regex(\'&(?P<entity>\' + \'|\'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")\n', 'def replaceHTMLEntity(t):\n', '    """Helper parser action to replace common HTML entities with their special characters"""\n', '    return _htmlEntityMap.get(t.entity)\n', '\n', "# it's easy to get these comment structures wrong - they're very common, so may as well make them available\n", 'cStyleComment = Combine(Regex(r"/\\*(?:[^*]|\\*(?!/))*") + \'*/\').setName("C style comment")\n', '"Comment of the form ``/* ... */``"\n', '\n', 'htmlComment = Regex(r"<!--[\\s\\S]*?-->").setName("HTML comment")\n', '"Comment of the form ``<!-- ... -->``"\n', '\n', 'restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")\n', 'dblSlashComment = Regex(r"//(?:\\\\\\n|[^\\n])*").setName("// comment")\n', '"Comment of the form ``// ... (to end of line)``"\n', '\n', 'cppStyleComment = Combine(Regex(r"/\\*(?:[^*]|\\*(?!/))*") + \'*/\' | dblSlashComment).setName("C++ style comment")\n', '"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"\n', '\n', 'javaStyleComment = cppStyleComment\n', '"Same as :class:`cppStyleComment`"\n', '\n', 'pythonStyleComment = Regex(r"#.*").setName("Python style comment")\n', '"Comment of the form ``# ... (to end of line)``"\n', '\n', "_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',')\n", '                                  + Optional(Word(" \\t")\n', '                                             + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem")\n', 'commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList")\n', '"""(Deprecated) Predefined expression of 1 or more printable words or\n', 'quoted strings, separated by commas.\n', '\n', 'This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.\n', '"""\n', '\n', '# some other useful expressions - using lower-case class name since we are really using this as a namespace\n', 'class pyparsing_common:\n', '    """Here are some common low-level expressions that may be useful in\n', '    jump-starting parser development:\n', '\n', '     - numeric forms (:class:`integers<integer>`, :class:`reals<real>`,\n', '       :class:`scientific notation<sci_real>`)\n', '     - common :class:`programming identifiers<identifier>`\n', '     - network addresses (:class:`MAC<mac_address>`,\n', '       :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)\n', '     - ISO8601 :class:`dates<iso8601_date>` and\n', '       :class:`datetime<iso8601_datetime>`\n', '     - :class:`UUID<uuid>`\n', '     - :class:`comma-separated list<comma_separated_list>`\n', '\n', '    Parse actions:\n', '\n', '     - :class:`convertToInteger`\n', '     - :class:`convertToFloat`\n', '     - :class:`convertToDate`\n', '     - :class:`convertToDatetime`\n', '     - :class:`stripHTMLTags`\n', '     - :class:`upcaseTokens`\n', '     - :class:`downcaseTokens`\n', '\n', '    Example::\n', '\n', "        pyparsing_common.number.runTests('''\n", '            # any int or real number, returned as the appropriate type\n', '            100\n', '            -100\n', '            +100\n', '            3.14159\n', '            6.02e23\n', '            1e-12\n', "            ''')\n", '\n', "        pyparsing_common.fnumber.runTests('''\n", '            # any int or real number, returned as float\n', '            100\n', '            -100\n', '            +100\n', '            3.14159\n', '            6.02e23\n', '            1e-12\n', "            ''')\n", '\n', "        pyparsing_common.hex_integer.runTests('''\n", '            # hex numbers\n', '            100\n', '            FF\n', "            ''')\n", '\n', "        pyparsing_common.fraction.runTests('''\n", '            # fractions\n', '            1/2\n', '            -3/4\n', "            ''')\n", '\n', "        pyparsing_common.mixed_integer.runTests('''\n", '            # mixed fractions\n', '            1\n', '            1/2\n', '            -3/4\n', '            1-3/4\n', "            ''')\n", '\n', '        import uuid\n', '        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))\n', "        pyparsing_common.uuid.runTests('''\n", '            # uuid\n', '            12345678-1234-5678-1234-567812345678\n', "            ''')\n", '\n', '    prints::\n', '\n', '        # any int or real number, returned as the appropriate type\n', '        100\n', '        [100]\n', '\n', '        -100\n', '        [-100]\n', '\n', '        +100\n', '        [100]\n', '\n', '        3.14159\n', '        [3.14159]\n', '\n', '        6.02e23\n', '        [6.02e+23]\n', '\n', '        1e-12\n', '        [1e-12]\n', '\n', '        # any int or real number, returned as float\n', '        100\n', '        [100.0]\n', '\n', '        -100\n', '        [-100.0]\n', '\n', '        +100\n', '        [100.0]\n', '\n', '        3.14159\n', '        [3.14159]\n', '\n', '        6.02e23\n', '        [6.02e+23]\n', '\n', '        1e-12\n', '        [1e-12]\n', '\n', '        # hex numbers\n', '        100\n', '        [256]\n', '\n', '        FF\n', '        [255]\n', '\n', '        # fractions\n', '        1/2\n', '        [0.5]\n', '\n', '        -3/4\n', '        [-0.75]\n', '\n', '        # mixed fractions\n', '        1\n', '        [1]\n', '\n', '        1/2\n', '        [0.5]\n', '\n', '        -3/4\n', '        [-0.75]\n', '\n', '        1-3/4\n', '        [1.75]\n', '\n', '        # uuid\n', '        12345678-1234-5678-1234-567812345678\n', "        [UUID('12345678-1234-5678-1234-567812345678')]\n", '    """\n', '\n', '    convertToInteger = tokenMap(int)\n', '    """\n', '    Parse action for converting parsed integers to Python int\n', '    """\n', '\n', '    convertToFloat = tokenMap(float)\n', '    """\n', '    Parse action for converting parsed numbers to Python float\n', '    """\n', '\n', '    integer = Word(nums).setName("integer").setParseAction(convertToInteger)\n', '    """expression that parses an unsigned integer, returns an int"""\n', '\n', '    hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16))\n', '    """expression that parses a hexadecimal integer, returns an int"""\n', '\n', '    signed_integer = Regex(r\'[+-]?\\d+\').setName("signed integer").setParseAction(convertToInteger)\n', '    """expression that parses an integer with optional leading sign, returns an int"""\n', '\n', '    fraction = (signed_integer().setParseAction(convertToFloat) + \'/\' + signed_integer().setParseAction(convertToFloat)).setName("fraction")\n', '    """fractional expression of an integer divided by an integer, returns a float"""\n', '    fraction.addParseAction(lambda t: t[0]/t[-1])\n', '\n', '    mixed_integer = (fraction | signed_integer + Optional(Optional(\'-\').suppress() + fraction)).setName("fraction or mixed integer-fraction")\n', '    """mixed integer of the form \'integer - fraction\', with optional leading integer, returns float"""\n', '    mixed_integer.addParseAction(sum)\n', '\n', '    real = Regex(r\'[+-]?(?:\\d+\\.\\d*|\\.\\d+)\').setName("real number").setParseAction(convertToFloat)\n', '    """expression that parses a floating point number and returns a float"""\n', '\n', '    sci_real = Regex(r\'[+-]?(?:\\d+(?:[eE][+-]?\\d+)|(?:\\d+\\.\\d*|\\.\\d+)(?:[eE][+-]?\\d+)?)\').setName("real number with scientific notation").setParseAction(convertToFloat)\n', '    """expression that parses a floating point number with optional\n', '    scientific notation and returns a float"""\n', '\n', '    # streamlining this expression makes the docs nicer-looking\n', '    number = (sci_real | real | signed_integer).streamline()\n', '    """any numeric expression, returns the corresponding Python type"""\n', '\n', '    fnumber = Regex(r\'[+-]?\\d+\\.?\\d*([eE][+-]?\\d+)?\').setName("fnumber").setParseAction(convertToFloat)\n', '    """any int or real number, returned as float"""\n', '\n', '    identifier = Word(alphas + \'_\', alphanums + \'_\').setName("identifier")\n', '    """typical code identifier (leading alpha or \'_\', followed by 0 or more alphas, nums, or \'_\')"""\n', '\n', '    ipv4_address = Regex(r\'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}\').setName("IPv4 address")\n', '    "IPv4 address (``0.0.0.0 - 255.255.255.255``)"\n', '\n', '    _ipv6_part = Regex(r\'[0-9a-fA-F]{1,4}\').setName("hex_integer")\n', '    _full_ipv6_address = (_ipv6_part + (\':\' + _ipv6_part) * 7).setName("full IPv6 address")\n', "    _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6))\n", '                           + "::"\n', "                           + Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6))\n", '                           ).setName("short IPv6 address")\n', '    _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)\n', '    _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")\n', '    ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")\n', '    "IPv6 address (long, short, or mixed form)"\n', '\n', '    mac_address = Regex(r\'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\\1[0-9a-fA-F]{2}){4}\').setName("MAC address")\n', '    "MAC address xx:xx:xx:xx:xx (may also have \'-\' or \'.\' delimiters)"\n', '\n', '    @staticmethod\n', '    def convertToDate(fmt="%Y-%m-%d"):\n', '        """\n', '        Helper to create a parse action for converting parsed date string to Python datetime.date\n', '\n', '        Params -\n', '         - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)\n', '\n', '        Example::\n', '\n', '            date_expr = pyparsing_common.iso8601_date.copy()\n', '            date_expr.setParseAction(pyparsing_common.convertToDate())\n', '            print(date_expr.parseString("1999-12-31"))\n', '\n', '        prints::\n', '\n', '            [datetime.date(1999, 12, 31)]\n', '        """\n', '        def cvt_fn(s, l, t):\n', '            try:\n', '                return datetime.strptime(t[0], fmt).date()\n', '            except ValueError as ve:\n', '                raise ParseException(s, l, str(ve))\n', '        return cvt_fn\n', '\n', '    @staticmethod\n', '    def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):\n', '        """Helper to create a parse action for converting parsed\n', '        datetime string to Python datetime.datetime\n', '\n', '        Params -\n', '         - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)\n', '\n', '        Example::\n', '\n', '            dt_expr = pyparsing_common.iso8601_datetime.copy()\n', '            dt_expr.setParseAction(pyparsing_common.convertToDatetime())\n', '            print(dt_expr.parseString("1999-12-31T23:59:59.999"))\n', '\n', '        prints::\n', '\n', '            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]\n', '        """\n', '        def cvt_fn(s, l, t):\n', '            try:\n', '                return datetime.strptime(t[0], fmt)\n', '            except ValueError as ve:\n', '                raise ParseException(s, l, str(ve))\n', '        return cvt_fn\n', '\n', '    iso8601_date = Regex(r\'(?P<year>\\d{4})(?:-(?P<month>\\d\\d)(?:-(?P<day>\\d\\d))?)?\').setName("ISO8601 date")\n', '    "ISO8601 date (``yyyy-mm-dd``)"\n', '\n', '    iso8601_datetime = Regex(r\'(?P<year>\\d{4})-(?P<month>\\d\\d)-(?P<day>\\d\\d)[T ](?P<hour>\\d\\d):(?P<minute>\\d\\d)(:(?P<second>\\d\\d(\\.\\d*)?)?)?(?P<tz>Z|[+-]\\d\\d:?\\d\\d)?\').setName("ISO8601 datetime")\n', '    "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``\'T\'`` or ``\' \'``"\n', '\n', '    uuid = Regex(r\'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}\').setName("UUID")\n', '    "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"\n', '\n', '    _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()\n', '    @staticmethod\n', '    def stripHTMLTags(s, l, tokens):\n', '        """Parse action to remove HTML tags from web page HTML source\n', '\n', '        Example::\n', '\n', '            # strip HTML links from normal text\n', '            text = \'<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>\'\n', '            td, td_end = makeHTMLTags("TD")\n', '            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end\n', '            print(table_text.parseString(text).body)\n', '\n', '        Prints::\n', '\n', '            More info at the pyparsing wiki page\n', '        """\n', '        return pyparsing_common._html_stripper.transformString(tokens[0])\n', '\n', '    _commasepitem = Combine(OneOrMore(~Literal(",")\n', '                                      + ~LineEnd()\n', "                                      + Word(printables, excludeChars=',')\n", '                                      + Optional(White(" \\t")))).streamline().setName("commaItem")\n', '    comma_separated_list = delimitedList(Optional(quotedString.copy()\n', "                                                  | _commasepitem, default='')\n", '                                         ).setName("comma separated list")\n', '    """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""\n', '\n', '    upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))\n', '    """Parse action to convert tokens to upper case."""\n', '\n', '    downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))\n', '    """Parse action to convert tokens to lower case."""\n', '\n', '\n', 'class _lazyclassproperty(object):\n', '    def __init__(self, fn):\n', '        self.fn = fn\n', '        self.__doc__ = fn.__doc__\n', '        self.__name__ = fn.__name__\n', '\n', '    def __get__(self, obj, cls):\n', '        if cls is None:\n', '            cls = type(obj)\n', "        if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', [])\n", '                                              for superclass in cls.__mro__[1:]):\n', '            cls._intern = {}\n', '        attrname = self.fn.__name__\n', '        if attrname not in cls._intern:\n', '            cls._intern[attrname] = self.fn(cls)\n', '        return cls._intern[attrname]\n', '\n', '\n', 'class unicode_set(object):\n', '    """\n', '    A set of Unicode characters, for language-specific strings for\n', '    ``alphas``, ``nums``, ``alphanums``, and ``printables``.\n', '    A unicode_set is defined by a list of ranges in the Unicode character\n', '    set, in a class attribute ``_ranges``, such as::\n', '\n', '        _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]\n', '\n', '    A unicode set can also be defined using multiple inheritance of other unicode sets::\n', '\n', '        class CJK(Chinese, Japanese, Korean):\n', '            pass\n', '    """\n', '    _ranges = []\n', '\n', '    @classmethod\n', '    def _get_chars_for_ranges(cls):\n', '        ret = []\n', '        for cc in cls.__mro__:\n', '            if cc is unicode_set:\n', '                break\n', '            for rr in cc._ranges:\n', '                ret.extend(range(rr[0], rr[-1] + 1))\n', '        return [unichr(c) for c in sorted(set(ret))]\n', '\n', '    @_lazyclassproperty\n', '    def printables(cls):\n', '        "all non-whitespace characters in this range"\n', "        return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges()))\n", '\n', '    @_lazyclassproperty\n', '    def alphas(cls):\n', '        "all alphabetic characters in this range"\n', "        return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges()))\n", '\n', '    @_lazyclassproperty\n', '    def nums(cls):\n', '        "all numeric digit characters in this range"\n', "        return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges()))\n", '\n', '    @_lazyclassproperty\n', '    def alphanums(cls):\n', '        "all alphanumeric characters in this range"\n', '        return cls.alphas + cls.nums\n', '\n', '\n', 'class pyparsing_unicode(unicode_set):\n', '    """\n', '    A namespace class for defining common language unicode_sets.\n', '    """\n', '    _ranges = [(32, sys.maxunicode)]\n', '\n', '    class Latin1(unicode_set):\n', '        "Unicode set for Latin-1 Unicode Character Range"\n', '        _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]\n', '\n', '    class LatinA(unicode_set):\n', '        "Unicode set for Latin-A Unicode Character Range"\n', '        _ranges = [(0x0100, 0x017f),]\n', '\n', '    class LatinB(unicode_set):\n', '        "Unicode set for Latin-B Unicode Character Range"\n', '        _ranges = [(0x0180, 0x024f),]\n', '\n', '    class Greek(unicode_set):\n', '        "Unicode set for Greek Unicode Character Ranges"\n', '        _ranges = [\n', '            (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d),\n', '            (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4),\n', '            (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe),\n', '        ]\n', '\n', '    class Cyrillic(unicode_set):\n', '        "Unicode set for Cyrillic Unicode Character Range"\n', '        _ranges = [(0x0400, 0x04ff)]\n', '\n', '    class Chinese(unicode_set):\n', '        "Unicode set for Chinese Unicode Character Range"\n', '        _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),]\n', '\n', '    class Japanese(unicode_set):\n', '        "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"\n', '        _ranges = []\n', '\n', '        class Kanji(unicode_set):\n', '            "Unicode set for Kanji Unicode Character Range"\n', '            _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),]\n', '\n', '        class Hiragana(unicode_set):\n', '            "Unicode set for Hiragana Unicode Character Range"\n', '            _ranges = [(0x3040, 0x309f),]\n', '\n', '        class Katakana(unicode_set):\n', '            "Unicode set for Katakana  Unicode Character Range"\n', '            _ranges = [(0x30a0, 0x30ff),]\n', '\n', '    class Korean(unicode_set):\n', '        "Unicode set for Korean Unicode Character Range"\n', '        _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),]\n', '\n', '    class CJK(Chinese, Japanese, Korean):\n', '        "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"\n', '        pass\n', '\n', '    class Thai(unicode_set):\n', '        "Unicode set for Thai Unicode Character Range"\n', '        _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),]\n', '\n', '    class Arabic(unicode_set):\n', '        "Unicode set for Arabic Unicode Character Range"\n', '        _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),]\n', '\n', '    class Hebrew(unicode_set):\n', '        "Unicode set for Hebrew Unicode Character Range"\n', '        _ranges = [(0x0590, 0x05ff),]\n', '\n', '    class Devanagari(unicode_set):\n', '        "Unicode set for Devanagari Unicode Character Range"\n', '        _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)]\n', '\n', 'pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges\n', '                                      + pyparsing_unicode.Japanese.Hiragana._ranges\n', '                                      + pyparsing_unicode.Japanese.Katakana._ranges)\n', '\n', '# define ranges in language character sets\n', 'if PY_3:\n', '    setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic)\n', '    setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese)\n', '    setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic)\n', '    setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek)\n', '    setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew)\n', '    setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese)\n', '    setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji)\n', '    setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana)\n', '    setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana)\n', '    setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean)\n', '    setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai)\n', '    setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari)\n', '\n', '\n', 'class pyparsing_test:\n', '    """\n', '    namespace class for classes useful in writing unit tests\n', '    """\n', '\n', '    class reset_pyparsing_context:\n', '        """\n', '        Context manager to be used when writing unit tests that modify pyparsing config values:\n', '         - packrat parsing\n', '         - default whitespace characters.\n', '         - default keyword characters\n', '         - literal string auto-conversion class\n', '         - __diag__ settings\n', '\n', '        Example:\n', '            with reset_pyparsing_context():\n', '                # test that literals used to construct a grammar are automatically suppressed\n', '                ParserElement.inlineLiteralsUsing(Suppress)\n', '\n', '                term = Word(alphas) | Word(nums)\n', "                group = Group('(' + term[...] + ')')\n", '\n', "                # assert that the '()' characters are not included in the parsed tokens\n", '                self.assertParseAndCheckLisst(group, "(abc 123 def)", [\'abc\', \'123\', \'def\'])\n', '\n', '            # after exiting context manager, literals are converted to Literal expressions again\n', '        """\n', '\n', '        def __init__(self):\n', '            self._save_context = {}\n', '\n', '        def save(self):\n', '            self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS\n', '            self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS\n', '            self._save_context[\n', '                "literal_string_class"\n', '            ] = ParserElement._literalStringClass\n', '            self._save_context["packrat_enabled"] = ParserElement._packratEnabled\n', '            self._save_context["packrat_parse"] = ParserElement._parse\n', '            self._save_context["__diag__"] = {\n', '                name: getattr(__diag__, name) for name in __diag__._all_names\n', '            }\n', '            self._save_context["__compat__"] = {\n', '                "collect_all_And_tokens": __compat__.collect_all_And_tokens\n', '            }\n', '            return self\n', '\n', '        def restore(self):\n', '            # reset pyparsing global state\n', '            if (\n', '                ParserElement.DEFAULT_WHITE_CHARS\n', '                != self._save_context["default_whitespace"]\n', '            ):\n', '                ParserElement.setDefaultWhitespaceChars(\n', '                    self._save_context["default_whitespace"]\n', '                )\n', '            Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]\n', '            ParserElement.inlineLiteralsUsing(\n', '                self._save_context["literal_string_class"]\n', '            )\n', '            for name, value in self._save_context["__diag__"].items():\n', '                setattr(__diag__, name, value)\n', '            ParserElement._packratEnabled = self._save_context["packrat_enabled"]\n', '            ParserElement._parse = self._save_context["packrat_parse"]\n', '            __compat__.collect_all_And_tokens = self._save_context["__compat__"]\n', '\n', '        def __enter__(self):\n', '            return self.save()\n', '\n', '        def __exit__(self, *args):\n', '            return self.restore()\n', '\n', '    class TestParseResultsAsserts:\n', '        """\n', '        A mixin class to add parse results assertion methods to normal unittest.TestCase classes.\n', '        """\n', '        def assertParseResultsEquals(\n', '            self, result, expected_list=None, expected_dict=None, msg=None\n', '        ):\n', '            """\n', '            Unit test assertion to compare a ParseResults object with an optional expected_list,\n', '            and compare any defined results names with an optional expected_dict.\n', '            """\n', '            if expected_list is not None:\n', '                self.assertEqual(expected_list, result.asList(), msg=msg)\n', '            if expected_dict is not None:\n', '                self.assertEqual(expected_dict, result.asDict(), msg=msg)\n', '\n', '        def assertParseAndCheckList(\n', '            self, expr, test_string, expected_list, msg=None, verbose=True\n', '        ):\n', '            """\n', '            Convenience wrapper assert to test a parser element and input string, and assert that\n', '            the resulting ParseResults.asList() is equal to the expected_list.\n', '            """\n', '            result = expr.parseString(test_string, parseAll=True)\n', '            if verbose:\n', '                print(result.dump())\n', '            self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)\n', '\n', '        def assertParseAndCheckDict(\n', '            self, expr, test_string, expected_dict, msg=None, verbose=True\n', '        ):\n', '            """\n', '            Convenience wrapper assert to test a parser element and input string, and assert that\n', '            the resulting ParseResults.asDict() is equal to the expected_dict.\n', '            """\n', '            result = expr.parseString(test_string, parseAll=True)\n', '            if verbose:\n', '                print(result.dump())\n', '            self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)\n', '\n', '        def assertRunTestResults(\n', '            self, run_tests_report, expected_parse_results=None, msg=None\n', '        ):\n', '            """\n', '            Unit test assertion to evaluate output of ParserElement.runTests(). If a list of\n', '            list-dict tuples is given as the expected_parse_results argument, then these are zipped\n', '            with the report tuples returned by runTests and evaluated using assertParseResultsEquals.\n', '            Finally, asserts that the overall runTests() success value is True.\n', '\n', '            :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests\n', '            :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]\n', '            """\n', '            run_test_success, run_test_results = run_tests_report\n', '\n', '            if expected_parse_results is not None:\n', '                merged = [\n', '                    (rpt[0], rpt[1], expected)\n', '                    for rpt, expected in zip(run_test_results, expected_parse_results)\n', '                ]\n', '                for test_string, result, expected in merged:\n', '                    # expected should be a tuple containing a list and/or a dict or an exception,\n', '                    # and optional failure message string\n', '                    # an empty tuple will skip any result validation\n', '                    fail_msg = next(\n', '                        (exp for exp in expected if isinstance(exp, str)), None\n', '                    )\n', '                    expected_exception = next(\n', '                        (\n', '                            exp\n', '                            for exp in expected\n', '                            if isinstance(exp, type) and issubclass(exp, Exception)\n', '                        ),\n', '                        None,\n', '                    )\n', '                    if expected_exception is not None:\n', '                        with self.assertRaises(\n', '                            expected_exception=expected_exception, msg=fail_msg or msg\n', '                        ):\n', '                            if isinstance(result, Exception):\n', '                                raise result\n', '                    else:\n', '                        expected_list = next(\n', '                            (exp for exp in expected if isinstance(exp, list)), None\n', '                        )\n', '                        expected_dict = next(\n', '                            (exp for exp in expected if isinstance(exp, dict)), None\n', '                        )\n', '                        if (expected_list, expected_dict) != (None, None):\n', '                            self.assertParseResultsEquals(\n', '                                result,\n', '                                expected_list=expected_list,\n', '                                expected_dict=expected_dict,\n', '                                msg=fail_msg or msg,\n', '                            )\n', '                        else:\n', '                            # warning here maybe?\n', '                            print("no validation for {!r}".format(test_string))\n', '\n', '            # do this last, in case some specific test results can be reported instead\n', '            self.assertTrue(\n', '                run_test_success, msg=msg if msg is not None else "failed runTests"\n', '            )\n', '\n', '        @contextmanager\n', '        def assertRaisesParseException(self, exc_type=ParseException, msg=None):\n', '            with self.assertRaises(exc_type, msg=msg):\n', '                yield\n', '\n', '\n', 'if __name__ == "__main__":\n', '\n', '    selectToken    = CaselessLiteral("select")\n', '    fromToken      = CaselessLiteral("from")\n', '\n', '    ident          = Word(alphas, alphanums + "_$")\n', '\n', '    columnName     = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)\n', '    columnNameList = Group(delimitedList(columnName)).setName("columns")\n', "    columnSpec     = ('*' | columnNameList)\n", '\n', '    tableName      = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)\n', '    tableNameList  = Group(delimitedList(tableName)).setName("tables")\n', '\n', '    simpleSQL      = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")\n', '\n', '    # demo runTests method, including embedded comments in test string\n', '    simpleSQL.runTests("""\n', "        # '*' as column list and dotted table name\n", '        select * from SYS.XYZZY\n', '\n', '        # caseless match on "SELECT", and casts back to "select"\n', '        SELECT * from XYZZY, ABC\n', '\n', '        # list of column names, and mixed case SELECT keyword\n', '        Select AA,BB,CC from Sys.dual\n', '\n', '        # multiple tables\n', '        Select A, B, C from Sys.dual, Table2\n', '\n', '        # invalid SELECT keyword - should fail\n', '        Xelect A, B, C from Sys.dual\n', '\n', '        # incomplete command - should fail\n', '        Select\n', '\n', '        # invalid column name - should fail\n', '        Select ^^^ frox Sys.dual\n', '\n', '        """)\n', '\n', '    pyparsing_common.number.runTests("""\n', '        100\n', '        -100\n', '        +100\n', '        3.14159\n', '        6.02e23\n', '        1e-12\n', '        """)\n', '\n', '    # any int or real number, returned as float\n', '    pyparsing_common.fnumber.runTests("""\n', '        100\n', '        -100\n', '        +100\n', '        3.14159\n', '        6.02e23\n', '        1e-12\n', '        """)\n', '\n', '    pyparsing_common.hex_integer.runTests("""\n', '        100\n', '        FF\n', '        """)\n', '\n', '    import uuid\n', '    pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))\n', '    pyparsing_common.uuid.runTests("""\n', '        12345678-1234-5678-1234-567812345678\n', '        """)\n'], '/nix/store/l466cxk1dkpn3jbx3wirraf8abfl183l-python3.9-pyparsing-2.4.7/lib/python3.9/site-packages/pyparsing.py'), '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/textwrap.py': (19407, 1.0, ['"""Text wrapping and filling.\n', '"""\n', '\n', '# Copyright (C) 1999-2001 Gregory P. Ward.\n', '# Copyright (C) 2002, 2003 Python Software Foundation.\n', '# Written by Greg Ward <gward@python.net>\n', '\n', 'import re\n', '\n', "__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten']\n", '\n', '# Hardcode the recognized whitespace characters to the US-ASCII\n', '# whitespace characters.  The main reason for doing this is that\n', '# some Unicode spaces (like \\u00a0) are non-breaking whitespaces.\n', "_whitespace = '\\t\\n\\x0b\\x0c\\r '\n", '\n', 'class TextWrapper:\n', '    """\n', '    Object for wrapping/filling text.  The public interface consists of\n', '    the wrap() and fill() methods; the other methods are just there for\n', '    subclasses to override in order to tweak the default behaviour.\n', '    If you want to completely replace the main wrapping algorithm,\n', "    you'll probably have to override _wrap_chunks().\n", '\n', '    Several instance attributes control various aspects of wrapping:\n', '      width (default: 70)\n', '        the maximum width of wrapped lines (unless break_long_words\n', '        is false)\n', '      initial_indent (default: "")\n', '        string that will be prepended to the first line of wrapped\n', "        output.  Counts towards the line's width.\n", '      subsequent_indent (default: "")\n', '        string that will be prepended to all lines save the first\n', "        of wrapped output; also counts towards each line's width.\n", '      expand_tabs (default: true)\n', '        Expand tabs in input text to spaces before further processing.\n', "        Each tab will become 0 .. 'tabsize' spaces, depending on its position\n", '        in its line.  If false, each tab is treated as a single character.\n', '      tabsize (default: 8)\n', "        Expand tabs in input text to 0 .. 'tabsize' spaces, unless\n", "        'expand_tabs' is false.\n", '      replace_whitespace (default: true)\n', '        Replace all whitespace characters in the input text by spaces\n', '        after tab expansion.  Note that if expand_tabs is false and\n', '        replace_whitespace is true, every tab will be converted to a\n', '        single space!\n', '      fix_sentence_endings (default: false)\n', '        Ensure that sentence-ending punctuation is always followed\n', '        by two spaces.  Off by default because the algorithm is\n', '        (unavoidably) imperfect.\n', '      break_long_words (default: true)\n', "        Break words longer than 'width'.  If false, those words will not\n", "        be broken, and some lines might be longer than 'width'.\n", '      break_on_hyphens (default: true)\n', '        Allow breaking hyphenated words. If true, wrapping will occur\n', '        preferably on whitespaces and right after hyphens part of\n', '        compound words.\n', '      drop_whitespace (default: true)\n', '        Drop leading and trailing whitespace from lines.\n', '      max_lines (default: None)\n', '        Truncate wrapped lines.\n', "      placeholder (default: ' [...]')\n", '        Append to the last line of truncated text.\n', '    """\n', '\n', '    unicode_whitespace_trans = {}\n', "    uspace = ord(' ')\n", '    for x in _whitespace:\n', '        unicode_whitespace_trans[ord(x)] = uspace\n', '\n', '    # This funky little regex is just the trick for splitting\n', '    # text up into word-wrappable chunks.  E.g.\n', '    #   "Hello there -- you goof-ball, use the -b option!"\n', '    # splits into\n', '    #   Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!\n', '    # (after stripping out empty strings).\n', '    word_punct = r\'[\\w!"\\\'&.,?]\'\n', "    letter = r'[^\\d\\W]'\n", "    whitespace = r'[%s]' % re.escape(_whitespace)\n", "    nowhitespace = '[^' + whitespace[1:]\n", "    wordsep_re = re.compile(r'''\n", '        ( # any whitespace\n', '          %(ws)s+\n', '        | # em-dash between words\n', '          (?<=%(wp)s) -{2,} (?=\\w)\n', '        | # word, possibly hyphenated\n', '          %(nws)s+? (?:\n', '            # hyphenated word\n', '              -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-))\n', '              (?= %(lt)s -? %(lt)s)\n', '            | # end of word\n', '              (?=%(ws)s|\\Z)\n', '            | # em-dash\n', '              (?<=%(wp)s) (?=-{2,}\\w)\n', '            )\n', "        )''' % {'wp': word_punct, 'lt': letter,\n", "                'ws': whitespace, 'nws': nowhitespace},\n", '        re.VERBOSE)\n', '    del word_punct, letter, nowhitespace\n', '\n', '    # This less funky little regex just split on recognized spaces. E.g.\n', '    #   "Hello there -- you goof-ball, use the -b option!"\n', '    # splits into\n', '    #   Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/\n', "    wordsep_simple_re = re.compile(r'(%s+)' % whitespace)\n", '    del whitespace\n', '\n', '    # XXX this is not locale- or charset-aware -- string.lowercase\n', '    # is US-ASCII only (and therefore English-only)\n', "    sentence_end_re = re.compile(r'[a-z]'             # lowercase letter\n", "                                 r'[\\.\\!\\?]'          # sentence-ending punct.\n", '                                 r\'[\\"\\\']?\'           # optional end-of-quote\n', "                                 r'\\Z')               # end of chunk\n", '\n', '    def __init__(self,\n', '                 width=70,\n', '                 initial_indent="",\n', '                 subsequent_indent="",\n', '                 expand_tabs=True,\n', '                 replace_whitespace=True,\n', '                 fix_sentence_endings=False,\n', '                 break_long_words=True,\n', '                 drop_whitespace=True,\n', '                 break_on_hyphens=True,\n', '                 tabsize=8,\n', '                 *,\n', '                 max_lines=None,\n', "                 placeholder=' [...]'):\n", '        self.width = width\n', '        self.initial_indent = initial_indent\n', '        self.subsequent_indent = subsequent_indent\n', '        self.expand_tabs = expand_tabs\n', '        self.replace_whitespace = replace_whitespace\n', '        self.fix_sentence_endings = fix_sentence_endings\n', '        self.break_long_words = break_long_words\n', '        self.drop_whitespace = drop_whitespace\n', '        self.break_on_hyphens = break_on_hyphens\n', '        self.tabsize = tabsize\n', '        self.max_lines = max_lines\n', '        self.placeholder = placeholder\n', '\n', '\n', '    # -- Private methods -----------------------------------------------\n', '    # (possibly useful for subclasses to override)\n', '\n', '    def _munge_whitespace(self, text):\n', '        """_munge_whitespace(text : string) -> string\n', '\n', '        Munge whitespace in text: expand tabs and convert all other\n', '        whitespace characters to spaces.  Eg. " foo\\\\tbar\\\\n\\\\nbaz"\n', '        becomes " foo    bar  baz".\n', '        """\n', '        if self.expand_tabs:\n', '            text = text.expandtabs(self.tabsize)\n', '        if self.replace_whitespace:\n', '            text = text.translate(self.unicode_whitespace_trans)\n', '        return text\n', '\n', '\n', '    def _split(self, text):\n', '        """_split(text : string) -> [string]\n', '\n', '        Split the text to wrap into indivisible chunks.  Chunks are\n', '        not quite the same as words; see _wrap_chunks() for full\n', '        details.  As an example, the text\n', '          Look, goof-ball -- use the -b option!\n', '        breaks into the following chunks:\n', "          'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',\n", "          'use', ' ', 'the', ' ', '-b', ' ', 'option!'\n", '        if break_on_hyphens is True, or in:\n', "          'Look,', ' ', 'goof-ball', ' ', '--', ' ',\n", "          'use', ' ', 'the', ' ', '-b', ' ', option!'\n", '        otherwise.\n', '        """\n', '        if self.break_on_hyphens is True:\n', '            chunks = self.wordsep_re.split(text)\n', '        else:\n', '            chunks = self.wordsep_simple_re.split(text)\n', '        chunks = [c for c in chunks if c]\n', '        return chunks\n', '\n', '    def _fix_sentence_endings(self, chunks):\n', '        """_fix_sentence_endings(chunks : [string])\n', '\n', "        Correct for sentence endings buried in 'chunks'.  Eg. when the\n", '        original text contains "... foo.\\\\nBar ...", munge_whitespace()\n', '        and split() will convert that to [..., "foo.", " ", "Bar", ...]\n', '        which has one too few spaces; this method simply changes the one\n', '        space to two.\n', '        """\n', '        i = 0\n', '        patsearch = self.sentence_end_re.search\n', '        while i < len(chunks)-1:\n', '            if chunks[i+1] == " " and patsearch(chunks[i]):\n', '                chunks[i+1] = "  "\n', '                i += 2\n', '            else:\n', '                i += 1\n', '\n', '    def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):\n', '        """_handle_long_word(chunks : [string],\n', '                             cur_line : [string],\n', '                             cur_len : int, width : int)\n', '\n', '        Handle a chunk of text (most likely a word, not whitespace) that\n', '        is too long to fit in any line.\n', '        """\n', '        # Figure out when indent is larger than the specified width, and make\n', '        # sure at least one character is stripped off on every pass\n', '        if width < 1:\n', '            space_left = 1\n', '        else:\n', '            space_left = width - cur_len\n', '\n', "        # If we're allowed to break long words, then do so: put as much\n", '        # of the next chunk onto the current line as will fit.\n', '        if self.break_long_words:\n', '            cur_line.append(reversed_chunks[-1][:space_left])\n', '            reversed_chunks[-1] = reversed_chunks[-1][space_left:]\n', '\n', '        # Otherwise, we have to preserve the long word intact.  Only add\n', "        # it to the current line if there's nothing already there --\n", '        # that minimizes how much we violate the width constraint.\n', '        elif not cur_line:\n', '            cur_line.append(reversed_chunks.pop())\n', '\n', "        # If we're not allowed to break long words, and there's already\n", '        # text on the current line, do nothing.  Next time through the\n', "        # main loop of _wrap_chunks(), we'll wind up here again, but\n", '        # cur_len will be zero, so the next line will be entirely\n', "        # devoted to the long word that we can't handle right now.\n", '\n', '    def _wrap_chunks(self, chunks):\n', '        """_wrap_chunks(chunks : [string]) -> [string]\n', '\n', '        Wrap a sequence of text chunks and return a list of lines of\n', "        length 'self.width' or less.  (If 'break_long_words' is false,\n", '        some lines may be longer than this.)  Chunks correspond roughly\n', '        to words and the whitespace between them: each chunk is\n', "        indivisible (modulo 'break_long_words'), but a line break can\n", '        come between any two chunks.  Chunks should not have internal\n', '        whitespace; ie. a chunk is either all whitespace or a "word".\n', '        Whitespace chunks will be removed from the beginning and end of\n', '        lines, but apart from that whitespace is preserved.\n', '        """\n', '        lines = []\n', '        if self.width <= 0:\n', '            raise ValueError("invalid width %r (must be > 0)" % self.width)\n', '        if self.max_lines is not None:\n', '            if self.max_lines > 1:\n', '                indent = self.subsequent_indent\n', '            else:\n', '                indent = self.initial_indent\n', '            if len(indent) + len(self.placeholder.lstrip()) > self.width:\n', '                raise ValueError("placeholder too large for max width")\n', '\n', '        # Arrange in reverse order so items can be efficiently popped\n', '        # from a stack of chucks.\n', '        chunks.reverse()\n', '\n', '        while chunks:\n', '\n', '            # Start the list of chunks that will make up the current line.\n', '            # cur_len is just the length of all the chunks in cur_line.\n', '            cur_line = []\n', '            cur_len = 0\n', '\n', '            # Figure out which static string will prefix this line.\n', '            if lines:\n', '                indent = self.subsequent_indent\n', '            else:\n', '                indent = self.initial_indent\n', '\n', '            # Maximum width for this line.\n', '            width = self.width - len(indent)\n', '\n', '            # First chunk on line is whitespace -- drop it, unless this\n', '            # is the very beginning of the text (ie. no lines started yet).\n', "            if self.drop_whitespace and chunks[-1].strip() == '' and lines:\n", '                del chunks[-1]\n', '\n', '            while chunks:\n', '                l = len(chunks[-1])\n', '\n', '                # Can at least squeeze this chunk onto the current line.\n', '                if cur_len + l <= width:\n', '                    cur_line.append(chunks.pop())\n', '                    cur_len += l\n', '\n', '                # Nope, this line is full.\n', '                else:\n', '                    break\n', '\n', '            # The current line is full, and the next chunk is too big to\n', '            # fit on *any* line (not just this one).\n', '            if chunks and len(chunks[-1]) > width:\n', '                self._handle_long_word(chunks, cur_line, cur_len, width)\n', '                cur_len = sum(map(len, cur_line))\n', '\n', '            # If the last chunk on this line is all whitespace, drop it.\n', "            if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':\n", '                cur_len -= len(cur_line[-1])\n', '                del cur_line[-1]\n', '\n', '            if cur_line:\n', '                if (self.max_lines is None or\n', '                    len(lines) + 1 < self.max_lines or\n', '                    (not chunks or\n', '                     self.drop_whitespace and\n', '                     len(chunks) == 1 and\n', '                     not chunks[0].strip()) and cur_len <= width):\n', '                    # Convert current line back to a string and store it in\n', '                    # list of all lines (return value).\n', "                    lines.append(indent + ''.join(cur_line))\n", '                else:\n', '                    while cur_line:\n', '                        if (cur_line[-1].strip() and\n', '                            cur_len + len(self.placeholder) <= width):\n', '                            cur_line.append(self.placeholder)\n', "                            lines.append(indent + ''.join(cur_line))\n", '                            break\n', '                        cur_len -= len(cur_line[-1])\n', '                        del cur_line[-1]\n', '                    else:\n', '                        if lines:\n', '                            prev_line = lines[-1].rstrip()\n', '                            if (len(prev_line) + len(self.placeholder) <=\n', '                                    self.width):\n', '                                lines[-1] = prev_line + self.placeholder\n', '                                break\n', '                        lines.append(indent + self.placeholder.lstrip())\n', '                    break\n', '\n', '        return lines\n', '\n', '    def _split_chunks(self, text):\n', '        text = self._munge_whitespace(text)\n', '        return self._split(text)\n', '\n', '    # -- Public interface ----------------------------------------------\n', '\n', '    def wrap(self, text):\n', '        """wrap(text : string) -> [string]\n', '\n', "        Reformat the single paragraph in 'text' so it fits in lines of\n", "        no more than 'self.width' columns, and return a list of wrapped\n", "        lines.  Tabs in 'text' are expanded with string.expandtabs(),\n", '        and all other whitespace characters (including newline) are\n', '        converted to space.\n', '        """\n', '        chunks = self._split_chunks(text)\n', '        if self.fix_sentence_endings:\n', '            self._fix_sentence_endings(chunks)\n', '        return self._wrap_chunks(chunks)\n', '\n', '    def fill(self, text):\n', '        """fill(text : string) -> string\n', '\n', "        Reformat the single paragraph in 'text' to fit in lines of no\n", "        more than 'self.width' columns, and return a new string\n", '        containing the entire wrapped paragraph.\n', '        """\n', '        return "\\n".join(self.wrap(text))\n', '\n', '\n', '# -- Convenience interface ---------------------------------------------\n', '\n', 'def wrap(text, width=70, **kwargs):\n', '    """Wrap a single paragraph of text, returning a list of wrapped lines.\n', '\n', "    Reformat the single paragraph in 'text' so it fits in lines of no\n", "    more than 'width' columns, and return a list of wrapped lines.  By\n", "    default, tabs in 'text' are expanded with string.expandtabs(), and\n", '    all other whitespace characters (including newline) are converted to\n', '    space.  See TextWrapper class for available keyword args to customize\n', '    wrapping behaviour.\n', '    """\n', '    w = TextWrapper(width=width, **kwargs)\n', '    return w.wrap(text)\n', '\n', 'def fill(text, width=70, **kwargs):\n', '    """Fill a single paragraph of text, returning a new string.\n', '\n', "    Reformat the single paragraph in 'text' to fit in lines of no more\n", "    than 'width' columns, and return a new string containing the entire\n", '    wrapped paragraph.  As with wrap(), tabs are expanded and other\n', '    whitespace characters converted to space.  See TextWrapper class for\n', '    available keyword args to customize wrapping behaviour.\n', '    """\n', '    w = TextWrapper(width=width, **kwargs)\n', '    return w.fill(text)\n', '\n', 'def shorten(text, width, **kwargs):\n', '    """Collapse and truncate the given text to fit in the given width.\n', '\n', '    The text first has its whitespace collapsed.  If it then fits in\n', '    the *width*, it is returned as is.  Otherwise, as many words\n', '    as possible are joined and then the placeholder is appended::\n', '\n', '        >>> textwrap.shorten("Hello  world!", width=12)\n', "        'Hello world!'\n", '        >>> textwrap.shorten("Hello  world!", width=11)\n', "        'Hello [...]'\n", '    """\n', '    w = TextWrapper(width=width, max_lines=1, **kwargs)\n', "    return w.fill(' '.join(text.strip().split()))\n", '\n', '\n', '# -- Loosely related functionality -------------------------------------\n', '\n', "_whitespace_only_re = re.compile('^[ \\t]+


, re.MULTILINE)\n", "_leading_whitespace_re = re.compile('(^[ \\t]*)(?:[^ \\t\\n])', re.MULTILINE)\n", '\n', 'def dedent(text):\n', '    """Remove any common leading whitespace from every line in `text`.\n', '\n', '    This can be used to make triple-quoted strings line up with the left\n', '    edge of the display, while still presenting them in the source code\n', '    in indented form.\n', '\n', '    Note that tabs and spaces are both treated as whitespace, but they\n', '    are not equal: the lines "  hello" and "\\\\thello" are\n', '    considered to have no common leading whitespace.\n', '\n', '    Entirely blank lines are normalized to a newline character.\n', '    """\n', '    # Look for the longest leading string of spaces and tabs common to\n', '    # all lines.\n', '    margin = None\n', "    text = _whitespace_only_re.sub('', text)\n", '    indents = _leading_whitespace_re.findall(text)\n', '    for indent in indents:\n', '        if margin is None:\n', '            margin = indent\n', '\n', '        # Current line more deeply indented than previous winner:\n', '        # no change (previous winner is still on top).\n', '        elif indent.startswith(margin):\n', '            pass\n', '\n', '        # Current line consistent with and no deeper than previous winner:\n', "        # it's the new winner.\n", '        elif margin.startswith(indent):\n', '            margin = indent\n', '\n', '        # Find the largest common whitespace between current line and previous\n', '        # winner.\n', '        else:\n', '            for i, (x, y) in enumerate(zip(margin, indent)):\n', '                if x != y:\n', '                    margin = margin[:i]\n', '                    break\n', '\n', '    # sanity check (testing/debugging only)\n', '    if 0 and margin:\n', '        for line in text.split("\\n"):\n', '            assert not line or line.startswith(margin), \\\n', '                   "line = %r, margin = %r" % (line, margin)\n', '\n', '    if margin:\n', "        text = re.sub(r'(?m)^' + margin, '', text)\n", '    return text\n', '\n', '\n', 'def indent(text, prefix, predicate=None):\n', '    """Adds \'prefix\' to the beginning of selected lines in \'text\'.\n', '\n', "    If 'predicate' is provided, 'prefix' will only be added to the lines\n", "    where 'predicate(line)' is True. If 'predicate' is not provided,\n", "    it will default to adding 'prefix' to all non-empty lines that do not\n", '    consist solely of whitespace characters.\n', '    """\n', '    if predicate is None:\n', '        def predicate(line):\n', '            return line.strip()\n', '\n', '    def prefixed_lines():\n', '        for line in text.splitlines(True):\n', '            yield (prefix + line if predicate(line) else line)\n', "    return ''.join(prefixed_lines())\n", '\n', '\n', 'if __name__ == "__main__":\n', '    #print dedent("\\tfoo\\n\\tbar")\n', '    #print dedent("  \\thello there\\n  \\t  how are you?")\n', '    print(dedent("Hello there.\\n  This is indented."))\n'], '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/textwrap.py'), '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/inspect.py': (118883, 1.0, ['"""Get useful information from live Python objects.\n', '\n', 'This module encapsulates the interface provided by the internal special\n', 'attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.\n', 'It also provides some help for examining source code and class layout.\n', '\n', 'Here are some of the useful functions provided by this module:\n', '\n', '    ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),\n', '        isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),\n', '        isroutine() - check object types\n', '    getmembers() - get members of an object that satisfy a given condition\n', '\n', "    getfile(), getsourcefile(), getsource() - find an object's source code\n", '    getdoc(), getcomments() - get documentation on an object\n', '    getmodule() - determine the module that an object came from\n', '    getclasstree() - arrange classes so as to represent their hierarchy\n', '\n', '    getargvalues(), getcallargs() - get info about function arguments\n', '    getfullargspec() - same, with support for Python 3 features\n', '    formatargvalues() - format an argument spec\n', '    getouterframes(), getinnerframes() - get info about frames\n', '    currentframe() - get the current stack frame\n', '    stack(), trace() - get info about frames on the stack or in a traceback\n', '\n', '    signature() - get a Signature object for the callable\n', '"""\n', '\n', '# This module is in the public domain.  No warranties.\n', '\n', "__author__ = ('Ka-Ping Yee <ping@lfw.org>',\n", "              'Yury Selivanov <yselivanov@sprymix.com>')\n", '\n', 'import abc\n', 'import ast\n', 'import dis\n', 'import collections.abc\n', 'import enum\n', 'import importlib.machinery\n', 'import itertools\n', 'import linecache\n', 'import os\n', 'import re\n', 'import sys\n', 'import tokenize\n', 'import token\n', 'import types\n', 'import warnings\n', 'import functools\n', 'import builtins\n', 'from operator import attrgetter\n', 'from collections import namedtuple, OrderedDict\n', '\n', '# Create constants for the compiler flags in Include/code.h\n', '# We try to get them from dis to avoid duplication\n', 'mod_dict = globals()\n', 'for k, v in dis.COMPILER_FLAG_NAMES.items():\n', '    mod_dict["CO_" + v] = k\n', '\n', '# See Include/object.h\n', 'TPFLAGS_IS_ABSTRACT = 1 << 20\n', '\n', '# ----------------------------------------------------------- type-checking\n', 'def ismodule(object):\n', '    """Return true if the object is a module.\n', '\n', '    Module objects provide these attributes:\n', '        __cached__      pathname to byte compiled file\n', '        __doc__         documentation string\n', '        __file__        filename (missing for built-in modules)"""\n', '    return isinstance(object, types.ModuleType)\n', '\n', 'def isclass(object):\n', '    """Return true if the object is a class.\n', '\n', '    Class objects provide these attributes:\n', '        __doc__         documentation string\n', '        __module__      name of module in which this class was defined"""\n', '    return isinstance(object, type)\n', '\n', 'def ismethod(object):\n', '    """Return true if the object is an instance method.\n', '\n', '    Instance method objects provide these attributes:\n', '        __doc__         documentation string\n', '        __name__        name with which this method was defined\n', '        __func__        function object containing implementation of method\n', '        __self__        instance to which this method is bound"""\n', '    return isinstance(object, types.MethodType)\n', '\n', 'def ismethoddescriptor(object):\n', '    """Return true if the object is a method descriptor.\n', '\n', '    But not if ismethod() or isclass() or isfunction() are true.\n', '\n', '    This is new in Python 2.2, and, for example, is true of int.__add__.\n', '    An object passing this test has a __get__ attribute but not a __set__\n', '    attribute, but beyond that the set of attributes varies.  __name__ is\n', '    usually sensible, and __doc__ often is.\n', '\n', '    Methods implemented via descriptors that also pass one of the other\n', '    tests return false from the ismethoddescriptor() test, simply because\n', '    the other tests promise more -- you can, e.g., count on having the\n', '    __func__ attribute (etc) when an object passes ismethod()."""\n', '    if isclass(object) or ismethod(object) or isfunction(object):\n', '        # mutual exclusion\n', '        return False\n', '    tp = type(object)\n', '    return hasattr(tp, "__get__") and not hasattr(tp, "__set__")\n', '\n', 'def isdatadescriptor(object):\n', '    """Return true if the object is a data descriptor.\n', '\n', '    Data descriptors have a __set__ or a __delete__ attribute.  Examples are\n', '    properties (defined in Python) and getsets and members (defined in C).\n', '    Typically, data descriptors will also have __name__ and __doc__ attributes\n', '    (properties, getsets, and members have both of these attributes), but this\n', '    is not guaranteed."""\n', '    if isclass(object) or ismethod(object) or isfunction(object):\n', '        # mutual exclusion\n', '        return False\n', '    tp = type(object)\n', '    return hasattr(tp, "__set__") or hasattr(tp, "__delete__")\n', '\n', "if hasattr(types, 'MemberDescriptorType'):\n", '    # CPython and equivalent\n', '    def ismemberdescriptor(object):\n', '        """Return true if the object is a member descriptor.\n', '\n', '        Member descriptors are specialized descriptors defined in extension\n', '        modules."""\n', '        return isinstance(object, types.MemberDescriptorType)\n', 'else:\n', '    # Other implementations\n', '    def ismemberdescriptor(object):\n', '        """Return true if the object is a member descriptor.\n', '\n', '        Member descriptors are specialized descriptors defined in extension\n', '        modules."""\n', '        return False\n', '\n', "if hasattr(types, 'GetSetDescriptorType'):\n", '    # CPython and equivalent\n', '    def isgetsetdescriptor(object):\n', '        """Return true if the object is a getset descriptor.\n', '\n', '        getset descriptors are specialized descriptors defined in extension\n', '        modules."""\n', '        return isinstance(object, types.GetSetDescriptorType)\n', 'else:\n', '    # Other implementations\n', '    def isgetsetdescriptor(object):\n', '        """Return true if the object is a getset descriptor.\n', '\n', '        getset descriptors are specialized descriptors defined in extension\n', '        modules."""\n', '        return False\n', '\n', 'def isfunction(object):\n', '    """Return true if the object is a user-defined function.\n', '\n', '    Function objects provide these attributes:\n', '        __doc__         documentation string\n', '        __name__        name with which this function was defined\n', '        __code__        code object containing compiled function bytecode\n', '        __defaults__    tuple of any default values for arguments\n', '        __globals__     global namespace in which this function was defined\n', '        __annotations__ dict of parameter annotations\n', '        __kwdefaults__  dict of keyword only parameters with defaults"""\n', '    return isinstance(object, types.FunctionType)\n', '\n', 'def _has_code_flag(f, flag):\n', '    """Return true if ``f`` is a function (or a method or functools.partial\n', '    wrapper wrapping a function) whose code object has the given ``flag``\n', '    set in its flags."""\n', '    while ismethod(f):\n', '        f = f.__func__\n', '    f = functools._unwrap_partial(f)\n', '    if not isfunction(f):\n', '        return False\n', '    return bool(f.__code__.co_flags & flag)\n', '\n', 'def isgeneratorfunction(obj):\n', '    """Return true if the object is a user-defined generator function.\n', '\n', '    Generator function objects provide the same attributes as functions.\n', '    See help(isfunction) for a list of attributes."""\n', '    return _has_code_flag(obj, CO_GENERATOR)\n', '\n', 'def iscoroutinefunction(obj):\n', '    """Return true if the object is a coroutine function.\n', '\n', '    Coroutine functions are defined with "async def" syntax.\n', '    """\n', '    return _has_code_flag(obj, CO_COROUTINE)\n', '\n', 'def isasyncgenfunction(obj):\n', '    """Return true if the object is an asynchronous generator function.\n', '\n', '    Asynchronous generator functions are defined with "async def"\n', '    syntax and have "yield" expressions in their body.\n', '    """\n', '    return _has_code_flag(obj, CO_ASYNC_GENERATOR)\n', '\n', 'def isasyncgen(object):\n', '    """Return true if the object is an asynchronous generator."""\n', '    return isinstance(object, types.AsyncGeneratorType)\n', '\n', 'def isgenerator(object):\n', '    """Return true if the object is a generator.\n', '\n', '    Generator objects provide these attributes:\n', '        __iter__        defined to support iteration over container\n', '        close           raises a new GeneratorExit exception inside the\n', '                        generator to terminate the iteration\n', '        gi_code         code object\n', '        gi_frame        frame object or possibly None once the generator has\n', '                        been exhausted\n', '        gi_running      set to 1 when generator is executing, 0 otherwise\n', '        next            return the next item from the container\n', '        send            resumes the generator and "sends" a value that becomes\n', '                        the result of the current yield-expression\n', '        throw           used to raise an exception inside the generator"""\n', '    return isinstance(object, types.GeneratorType)\n', '\n', 'def iscoroutine(object):\n', '    """Return true if the object is a coroutine."""\n', '    return isinstance(object, types.CoroutineType)\n', '\n', 'def isawaitable(object):\n', '    """Return true if object can be passed to an ``await`` expression."""\n', '    return (isinstance(object, types.CoroutineType) or\n', '            isinstance(object, types.GeneratorType) and\n', '                bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or\n', '            isinstance(object, collections.abc.Awaitable))\n', '\n', 'def istraceback(object):\n', '    """Return true if the object is a traceback.\n', '\n', '    Traceback objects provide these attributes:\n', '        tb_frame        frame object at this level\n', '        tb_lasti        index of last attempted instruction in bytecode\n', '        tb_lineno       current line number in Python source code\n', '        tb_next         next inner traceback object (called by this level)"""\n', '    return isinstance(object, types.TracebackType)\n', '\n', 'def isframe(object):\n', '    """Return true if the object is a frame object.\n', '\n', '    Frame objects provide these attributes:\n', "        f_back          next outer frame object (this frame's caller)\n", '        f_builtins      built-in namespace seen by this frame\n', '        f_code          code object being executed in this frame\n', '        f_globals       global namespace seen by this frame\n', '        f_lasti         index of last attempted instruction in bytecode\n', '        f_lineno        current line number in Python source code\n', '        f_locals        local namespace seen by this frame\n', '        f_trace         tracing function for this frame, or None"""\n', '    return isinstance(object, types.FrameType)\n', '\n', 'def iscode(object):\n', '    """Return true if the object is a code object.\n', '\n', '    Code objects provide these attributes:\n', '        co_argcount         number of arguments (not including *, ** args\n', '                            or keyword only arguments)\n', '        co_code             string of raw compiled bytecode\n', '        co_cellvars         tuple of names of cell variables\n', '        co_consts           tuple of constants used in the bytecode\n', '        co_filename         name of file in which this code object was created\n', '        co_firstlineno      number of first line in Python source code\n', '        co_flags            bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg\n', '                            | 16=nested | 32=generator | 64=nofree | 128=coroutine\n', '                            | 256=iterable_coroutine | 512=async_generator\n', '        co_freevars         tuple of names of free variables\n', '        co_posonlyargcount  number of positional only arguments\n', '        co_kwonlyargcount   number of keyword only arguments (not including ** arg)\n', '        co_lnotab           encoded mapping of line numbers to bytecode indices\n', '        co_name             name with which this code object was defined\n', '        co_names            tuple of names of local variables\n', '        co_nlocals          number of local variables\n', '        co_stacksize        virtual machine stack space required\n', '        co_varnames         tuple of names of arguments and local variables"""\n', '    return isinstance(object, types.CodeType)\n', '\n', 'def isbuiltin(object):\n', '    """Return true if the object is a built-in function or method.\n', '\n', '    Built-in functions and methods provide these attributes:\n', '        __doc__         documentation string\n', '        __name__        original name of this function or method\n', '        __self__        instance to which a method is bound, or None"""\n', '    return isinstance(object, types.BuiltinFunctionType)\n', '\n', 'def isroutine(object):\n', '    """Return true if the object is any kind of function or method."""\n', '    return (isbuiltin(object)\n', '            or isfunction(object)\n', '            or ismethod(object)\n', '            or ismethoddescriptor(object))\n', '\n', 'def isabstract(object):\n', '    """Return true if the object is an abstract base class (ABC)."""\n', '    if not isinstance(object, type):\n', '        return False\n', '    if object.__flags__ & TPFLAGS_IS_ABSTRACT:\n', '        return True\n', '    if not issubclass(type(object), abc.ABCMeta):\n', '        return False\n', "    if hasattr(object, '__abstractmethods__'):\n", '        # It looks like ABCMeta.__new__ has finished running;\n', '        # TPFLAGS_IS_ABSTRACT should have been accurate.\n', '        return False\n', "    # It looks like ABCMeta.__new__ has not finished running yet; we're\n", "    # probably in __init_subclass__. We'll look for abstractmethods manually.\n", '    for name, value in object.__dict__.items():\n', '        if getattr(value, "__isabstractmethod__", False):\n', '            return True\n', '    for base in object.__bases__:\n', '        for name in getattr(base, "__abstractmethods__", ()):\n', '            value = getattr(object, name, None)\n', '            if getattr(value, "__isabstractmethod__", False):\n', '                return True\n', '    return False\n', '\n', 'def getmembers(object, predicate=None):\n', '    """Return all members of an object as (name, value) pairs sorted by name.\n', '    Optionally, only return members that satisfy a given predicate."""\n', '    if isclass(object):\n', '        mro = (object,) + getmro(object)\n', '    else:\n', '        mro = ()\n', '    results = []\n', '    processed = set()\n', '    names = dir(object)\n', '    # :dd any DynamicClassAttributes to the list of names if object is a class;\n', '    # this may result in duplicate entries if, for example, a virtual\n', '    # attribute with the same name as a DynamicClassAttribute exists\n', '    try:\n', '        for base in object.__bases__:\n', '            for k, v in base.__dict__.items():\n', '                if isinstance(v, types.DynamicClassAttribute):\n', '                    names.append(k)\n', '    except AttributeError:\n', '        pass\n', '    for key in names:\n', "        # First try to get the value via getattr.  Some descriptors don't\n", '        # like calling their __get__ (see bug #1785), so fall back to\n', '        # looking in the __dict__.\n', '        try:\n', '            value = getattr(object, key)\n', '            # handle the duplicate key\n', '            if key in processed:\n', '                raise AttributeError\n', '        except AttributeError:\n', '            for base in mro:\n', '                if key in base.__dict__:\n', '                    value = base.__dict__[key]\n', '                    break\n', '            else:\n', '                # could be a (currently) missing slot member, or a buggy\n', '                # __dir__; discard and move on\n', '                continue\n', '        if not predicate or predicate(value):\n', '            results.append((key, value))\n', '        processed.add(key)\n', '    results.sort(key=lambda pair: pair[0])\n', '    return results\n', '\n', "Attribute = namedtuple('Attribute', 'name kind defining_class object')\n", '\n', 'def classify_class_attrs(cls):\n', '    """Return list of attribute-descriptor tuples.\n', '\n', '    For each name in dir(cls), the return list contains a 4-tuple\n', '    with these elements:\n', '\n', '        0. The name (a string).\n', '\n', '        1. The kind of attribute this is, one of these strings:\n', "               'class method'    created via classmethod()\n", "               'static method'   created via staticmethod()\n", "               'property'        created via property()\n", "               'method'          any other flavor of method or descriptor\n", "               'data'            not a method\n", '\n', '        2. The class which defined this attribute (a class).\n', '\n', '        3. The object as obtained by calling getattr; if this fails, or if the\n', "           resulting object does not live anywhere in the class' mro (including\n", "           metaclasses) then the object is looked up in the defining class's\n", '           dict (found by walking the mro).\n', '\n', '    If one of the items in dir(cls) is stored in the metaclass it will now\n', '    be discovered and not have None be listed as the class in which it was\n', '    defined.  Any items whose home class cannot be discovered are skipped.\n', '    """\n', '\n', '    mro = getmro(cls)\n', '    metamro = getmro(type(cls)) # for attributes stored in the metaclass\n', '    metamro = tuple(cls for cls in metamro if cls not in (type, object))\n', '    class_bases = (cls,) + mro\n', '    all_bases = class_bases + metamro\n', '    names = dir(cls)\n', '    # :dd any DynamicClassAttributes to the list of names;\n', '    # this may result in duplicate entries if, for example, a virtual\n', '    # attribute with the same name as a DynamicClassAttribute exists.\n', '    for base in mro:\n', '        for k, v in base.__dict__.items():\n', '            if isinstance(v, types.DynamicClassAttribute):\n', '                names.append(k)\n', '    result = []\n', '    processed = set()\n', '\n', '    for name in names:\n', '        # Get the object associated with the name, and where it was defined.\n', '        # Normal objects will be looked up with both getattr and directly in\n', "        # its class' dict (in case getattr fails [bug #1785], and also to look\n", '        # for a docstring).\n', '        # For DynamicClassAttributes on the second pass we only look in the\n', "        # class's dict.\n", '        #\n', '        # Getting an obj from the __dict__ sometimes reveals more than\n', '        # using getattr.  Static and class methods are dramatic examples.\n', '        homecls = None\n', '        get_obj = None\n', '        dict_obj = None\n', '        if name not in processed:\n', '            try:\n', "                if name == '__dict__':\n", '                    raise Exception("__dict__ is special, don\'t want the proxy")\n', '                get_obj = getattr(cls, name)\n', '            except Exception as exc:\n', '                pass\n', '            else:\n', '                homecls = getattr(get_obj, "__objclass__", homecls)\n', '                if homecls not in class_bases:\n', '                    # if the resulting object does not live somewhere in the\n', '                    # mro, drop it and search the mro manually\n', '                    homecls = None\n', '                    last_cls = None\n', '                    # first look in the classes\n', '                    for srch_cls in class_bases:\n', '                        srch_obj = getattr(srch_cls, name, None)\n', '                        if srch_obj is get_obj:\n', '                            last_cls = srch_cls\n', '                    # then check the metaclasses\n', '                    for srch_cls in metamro:\n', '                        try:\n', '                            srch_obj = srch_cls.__getattr__(cls, name)\n', '                        except AttributeError:\n', '                            continue\n', '                        if srch_obj is get_obj:\n', '                            last_cls = srch_cls\n', '                    if last_cls is not None:\n', '                        homecls = last_cls\n', '        for base in all_bases:\n', '            if name in base.__dict__:\n', '                dict_obj = base.__dict__[name]\n', '                if homecls not in metamro:\n', '                    homecls = base\n', '                break\n', '        if homecls is None:\n', '            # unable to locate the attribute anywhere, most likely due to\n', '            # buggy custom __dir__; discard and move on\n', '            continue\n', '        obj = get_obj if get_obj is not None else dict_obj\n', '        # Classify the object or its descriptor.\n', '        if isinstance(dict_obj, (staticmethod, types.BuiltinMethodType)):\n', '            kind = "static method"\n', '            obj = dict_obj\n', '        elif isinstance(dict_obj, (classmethod, types.ClassMethodDescriptorType)):\n', '            kind = "class method"\n', '            obj = dict_obj\n', '        elif isinstance(dict_obj, property):\n', '            kind = "property"\n', '            obj = dict_obj\n', '        elif isroutine(obj):\n', '            kind = "method"\n', '        else:\n', '            kind = "data"\n', '        result.append(Attribute(name, kind, homecls, obj))\n', '        processed.add(name)\n', '    return result\n', '\n', '# ----------------------------------------------------------- class helpers\n', '\n', 'def getmro(cls):\n', '    "Return tuple of base classes (including cls) in method resolution order."\n', '    return cls.__mro__\n', '\n', '# -------------------------------------------------------- function helpers\n', '\n', 'def unwrap(func, *, stop=None):\n', '    """Get the object wrapped by *func*.\n', '\n', '   Follows the chain of :attr:`__wrapped__` attributes returning the last\n', '   object in the chain.\n', '\n', '   *stop* is an optional callback accepting an object in the wrapper chain\n', '   as its sole argument that allows the unwrapping to be terminated early if\n', '   the callback returns a true value. If the callback never returns a true\n', '   value, the last object in the chain is returned as usual. For example,\n', '   :func:`signature` uses this to stop unwrapping if any object in the\n', '   chain has a ``__signature__`` attribute defined.\n', '\n', '   :exc:`ValueError` is raised if a cycle is encountered.\n', '\n', '    """\n', '    if stop is None:\n', '        def _is_wrapper(f):\n', "            return hasattr(f, '__wrapped__')\n", '    else:\n', '        def _is_wrapper(f):\n', "            return hasattr(f, '__wrapped__') and not stop(f)\n", '    f = func  # remember the original func for error reporting\n', '    # Memoise by id to tolerate non-hashable objects, but store objects to\n', "    # ensure they aren't destroyed, which would allow their IDs to be reused.\n", '    memo = {id(f): f}\n', '    recursion_limit = sys.getrecursionlimit()\n', '    while _is_wrapper(func):\n', '        func = func.__wrapped__\n', '        id_func = id(func)\n', '        if (id_func in memo) or (len(memo) >= recursion_limit):\n', "            raise ValueError('wrapper loop when unwrapping {!r}'.format(f))\n", '        memo[id_func] = func\n', '    return func\n', '\n', '# -------------------------------------------------- source code extraction\n', 'def indentsize(line):\n', '    """Return the indent size, in spaces, at the start of a line of text."""\n', '    expline = line.expandtabs()\n', '    return len(expline) - len(expline.lstrip())\n', '\n', 'def _findclass(func):\n', '    cls = sys.modules.get(func.__module__)\n', '    if cls is None:\n', '        return None\n', "    for name in func.__qualname__.split('.')[:-1]:\n", '        cls = getattr(cls, name)\n', '    if not isclass(cls):\n', '        return None\n', '    return cls\n', '\n', 'def _finddoc(obj):\n', '    if isclass(obj):\n', '        for base in obj.__mro__:\n', '            if base is not object:\n', '                try:\n', '                    doc = base.__doc__\n', '                except AttributeError:\n', '                    continue\n', '                if doc is not None:\n', '                    return doc\n', '        return None\n', '\n', '    if ismethod(obj):\n', '        name = obj.__func__.__name__\n', '        self = obj.__self__\n', '        if (isclass(self) and\n', "            getattr(getattr(self, name, None), '__func__') is obj.__func__):\n", '            # classmethod\n', '            cls = self\n', '        else:\n', '            cls = self.__class__\n', '    elif isfunction(obj):\n', '        name = obj.__name__\n', '        cls = _findclass(obj)\n', '        if cls is None or getattr(cls, name) is not obj:\n', '            return None\n', '    elif isbuiltin(obj):\n', '        name = obj.__name__\n', '        self = obj.__self__\n', '        if (isclass(self) and\n', "            self.__qualname__ + '.' + name == obj.__qualname__):\n", '            # classmethod\n', '            cls = self\n', '        else:\n', '            cls = self.__class__\n', '    # Should be tested before isdatadescriptor().\n', '    elif isinstance(obj, property):\n', '        func = obj.fget\n', '        name = func.__name__\n', '        cls = _findclass(func)\n', '        if cls is None or getattr(cls, name) is not obj:\n', '            return None\n', '    elif ismethoddescriptor(obj) or isdatadescriptor(obj):\n', '        name = obj.__name__\n', '        cls = obj.__objclass__\n', '        if getattr(cls, name) is not obj:\n', '            return None\n', '        if ismemberdescriptor(obj):\n', "            slots = getattr(cls, '__slots__', None)\n", '            if isinstance(slots, dict) and name in slots:\n', '                return slots[name]\n', '    else:\n', '        return None\n', '    for base in cls.__mro__:\n', '        try:\n', '            doc = getattr(base, name).__doc__\n', '        except AttributeError:\n', '            continue\n', '        if doc is not None:\n', '            return doc\n', '    return None\n', '\n', 'def getdoc(object):\n', '    """Get the documentation string for an object.\n', '\n', '    All tabs are expanded to spaces.  To clean up docstrings that are\n', '    indented to line up with blocks of code, any whitespace than can be\n', '    uniformly removed from the second line onwards is removed."""\n', '    try:\n', '        doc = object.__doc__\n', '    except AttributeError:\n', '        return None\n', '    if doc is None:\n', '        try:\n', '            doc = _finddoc(object)\n', '        except (AttributeError, TypeError):\n', '            return None\n', '    if not isinstance(doc, str):\n', '        return None\n', '    return cleandoc(doc)\n', '\n', 'def cleandoc(doc):\n', '    """Clean up indentation from docstrings.\n', '\n', '    Any whitespace that can be uniformly removed from the second line\n', '    onwards is removed."""\n', '    try:\n', "        lines = doc.expandtabs().split('\\n')\n", '    except UnicodeError:\n', '        return None\n', '    else:\n', '        # Find minimum indentation of any non-blank lines after first line.\n', '        margin = sys.maxsize\n', '        for line in lines[1:]:\n', '            content = len(line.lstrip())\n', '            if content:\n', '                indent = len(line) - content\n', '                margin = min(margin, indent)\n', '        # Remove indentation.\n', '        if lines:\n', '            lines[0] = lines[0].lstrip()\n', '        if margin < sys.maxsize:\n', '            for i in range(1, len(lines)): lines[i] = lines[i][margin:]\n', '        # Remove any trailing or leading blank lines.\n', '        while lines and not lines[-1]:\n', '            lines.pop()\n', '        while lines and not lines[0]:\n', '            lines.pop(0)\n', "        return '\\n'.join(lines)\n", '\n', 'def getfile(object):\n', '    """Work out which source or compiled file an object was defined in."""\n', '    if ismodule(object):\n', "        if getattr(object, '__file__', None):\n", '            return object.__file__\n', "        raise TypeError('{!r} is a built-in module'.format(object))\n", '    if isclass(object):\n', "        if hasattr(object, '__module__'):\n", '            module = sys.modules.get(object.__module__)\n', "            if getattr(module, '__file__', None):\n", '                return module.__file__\n', "        raise TypeError('{!r} is a built-in class'.format(object))\n", '    if ismethod(object):\n', '        object = object.__func__\n', '    if isfunction(object):\n', '        object = object.__code__\n', '    if istraceback(object):\n', '        object = object.tb_frame\n', '    if isframe(object):\n', '        object = object.f_code\n', '    if iscode(object):\n', '        return object.co_filename\n', "    raise TypeError('module, class, method, function, traceback, frame, or '\n", "                    'code object was expected, got {}'.format(\n", '                    type(object).__name__))\n', '\n', 'def getmodulename(path):\n', '    """Return the module name for a given file, or None."""\n', '    fname = os.path.basename(path)\n', '    # Check for paths that look like an actual module file\n', '    suffixes = [(-len(suffix), suffix)\n', '                    for suffix in importlib.machinery.all_suffixes()]\n', '    suffixes.sort() # try longest suffixes first, in case they overlap\n', '    for neglen, suffix in suffixes:\n', '        if fname.endswith(suffix):\n', '            return fname[:neglen]\n', '    return None\n', '\n', 'def getsourcefile(object):\n', '    """Return the filename that can be used to locate an object\'s source.\n', '    Return None if no way can be identified to get the source.\n', '    """\n', '    filename = getfile(object)\n', '    all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]\n', '    all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]\n', '    if any(filename.endswith(s) for s in all_bytecode_suffixes):\n', '        filename = (os.path.splitext(filename)[0] +\n', '                    importlib.machinery.SOURCE_SUFFIXES[0])\n', '    elif any(filename.endswith(s) for s in\n', '                 importlib.machinery.EXTENSION_SUFFIXES):\n', '        return None\n', '    if os.path.exists(filename):\n', '        return filename\n', '    # only return a non-existent filename if the module has a PEP 302 loader\n', "    if getattr(getmodule(object, filename), '__loader__', None) is not None:\n", '        return filename\n', '    # or it is in the linecache\n', '    if filename in linecache.cache:\n', '        return filename\n', '\n', 'def getabsfile(object, _filename=None):\n', '    """Return an absolute path to the source or compiled file for an object.\n', '\n', '    The idea is for each object to have a unique origin, so this routine\n', '    normalizes the result as much as possible."""\n', '    if _filename is None:\n', '        _filename = getsourcefile(object) or getfile(object)\n', '    return os.path.normcase(os.path.abspath(_filename))\n', '\n', 'modulesbyfile = {}\n', '_filesbymodname = {}\n', '\n', 'def getmodule(object, _filename=None):\n', '    """Return the module an object was defined in, or None if not found."""\n', '    if ismodule(object):\n', '        return object\n', "    if hasattr(object, '__module__'):\n", '        return sys.modules.get(object.__module__)\n', '    # Try the filename to modulename cache\n', '    if _filename is not None and _filename in modulesbyfile:\n', '        return sys.modules.get(modulesbyfile[_filename])\n', '    # Try the cache again with the absolute file name\n', '    try:\n', '        file = getabsfile(object, _filename)\n', '    except TypeError:\n', '        return None\n', '    if file in modulesbyfile:\n', '        return sys.modules.get(modulesbyfile[file])\n', '    # Update the filename to module name cache and check yet again\n', '    # Copy sys.modules in order to cope with changes while iterating\n', '    for modname, module in sys.modules.copy().items():\n', "        if ismodule(module) and hasattr(module, '__file__'):\n", '            f = module.__file__\n', '            if f == _filesbymodname.get(modname, None):\n', '                # Have already mapped this module, so skip it\n', '                continue\n', '            _filesbymodname[modname] = f\n', '            f = getabsfile(module)\n', '            # Always map to the name the module knows itself by\n', '            modulesbyfile[f] = modulesbyfile[\n', '                os.path.realpath(f)] = module.__name__\n', '    if file in modulesbyfile:\n', '        return sys.modules.get(modulesbyfile[file])\n', '    # Check the main module\n', "    main = sys.modules['__main__']\n", "    if not hasattr(object, '__name__'):\n", '        return None\n', '    if hasattr(main, object.__name__):\n', '        mainobject = getattr(main, object.__name__)\n', '        if mainobject is object:\n', '            return main\n', '    # Check builtins\n', "    builtin = sys.modules['builtins']\n", '    if hasattr(builtin, object.__name__):\n', '        builtinobject = getattr(builtin, object.__name__)\n', '        if builtinobject is object:\n', '            return builtin\n', '\n', '\n', 'class ClassFoundException(Exception):\n', '    pass\n', '\n', '\n', 'class _ClassFinder(ast.NodeVisitor):\n', '\n', '    def __init__(self, qualname):\n', '        self.stack = []\n', '        self.qualname = qualname\n', '\n', '    def visit_FunctionDef(self, node):\n', '        self.stack.append(node.name)\n', "        self.stack.append('<locals>')\n", '        self.generic_visit(node)\n', '        self.stack.pop()\n', '        self.stack.pop()\n', '\n', '    visit_AsyncFunctionDef = visit_FunctionDef\n', '\n', '    def visit_ClassDef(self, node):\n', '        self.stack.append(node.name)\n', "        if self.qualname == '.'.join(self.stack):\n", '            # Return the decorator for the class if present\n', '            if node.decorator_list:\n', '                line_number = node.decorator_list[0].lineno\n', '            else:\n', '                line_number = node.lineno\n', '\n', '            # decrement by one since lines starts with indexing by zero\n', '            line_number -= 1\n', '            raise ClassFoundException(line_number)\n', '        self.generic_visit(node)\n', '        self.stack.pop()\n', '\n', '\n', 'def findsource(object):\n', '    """Return the entire source file and starting line number for an object.\n', '\n', '    The argument may be a module, class, method, function, traceback, frame,\n', '    or code object.  The source code is returned as a list of all the lines\n', '    in the file and the line number indexes a line in that list.  An OSError\n', '    is raised if the source code cannot be retrieved."""\n', '\n', '    file = getsourcefile(object)\n', '    if file:\n', '        # Invalidate cache if needed.\n', '        linecache.checkcache(file)\n', '    else:\n', '        file = getfile(object)\n', '        # Allow filenames in form of "<something>" to pass through.\n', '        # `doctest` monkeypatches `linecache` module to enable\n', '        # inspection, so let `linecache.getlines` to be called.\n', "        if not (file.startswith('<') and file.endswith('>')):\n", "            raise OSError('source code not available')\n", '\n', '    module = getmodule(object, file)\n', '    if module:\n', '        lines = linecache.getlines(file, module.__dict__)\n', '    else:\n', '        lines = linecache.getlines(file)\n', '    if not lines:\n', "        raise OSError('could not get source code')\n", '\n', '    if ismodule(object):\n', '        return lines, 0\n', '\n', '    if isclass(object):\n', '        qualname = object.__qualname__\n', "        source = ''.join(lines)\n", '        tree = ast.parse(source)\n', '        class_finder = _ClassFinder(qualname)\n', '        try:\n', '            class_finder.visit(tree)\n', '        except ClassFoundException as e:\n', '            line_number = e.args[0]\n', '            return lines, line_number\n', '        else:\n', "            raise OSError('could not find class definition')\n", '\n', '    if ismethod(object):\n', '        object = object.__func__\n', '    if isfunction(object):\n', '        object = object.__code__\n', '    if istraceback(object):\n', '        object = object.tb_frame\n', '    if isframe(object):\n', '        object = object.f_code\n', '    if iscode(object):\n', "        if not hasattr(object, 'co_firstlineno'):\n", "            raise OSError('could not find function definition')\n", '        lnum = object.co_firstlineno - 1\n', "        pat = re.compile(r'^(\\s*def\\s)|(\\s*async\\s+def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*@)')\n", '        while lnum > 0:\n', '            try:\n', '                line = lines[lnum]\n', '            except IndexError:\n', "                raise OSError('lineno is out of bounds')\n", '            if pat.match(line):\n', '                break\n', '            lnum = lnum - 1\n', '        return lines, lnum\n', "    raise OSError('could not find code object')\n", '\n', 'def getcomments(object):\n', '    """Get lines of comments immediately preceding an object\'s source code.\n', '\n', "    Returns None when source can't be found.\n", '    """\n', '    try:\n', '        lines, lnum = findsource(object)\n', '    except (OSError, TypeError):\n', '        return None\n', '\n', '    if ismodule(object):\n', '        # Look for a comment block at the top of the file.\n', '        start = 0\n', "        if lines and lines[0][:2] == '#!': start = 1\n", "        while start < len(lines) and lines[start].strip() in ('', '#'):\n", '            start = start + 1\n', "        if start < len(lines) and lines[start][:1] == '#':\n", '            comments = []\n', '            end = start\n', "            while end < len(lines) and lines[end][:1] == '#':\n", '                comments.append(lines[end].expandtabs())\n', '                end = end + 1\n', "            return ''.join(comments)\n", '\n', '    # Look for a preceding block of comments at the same indentation.\n', '    elif lnum > 0:\n', '        indent = indentsize(lines[lnum])\n', '        end = lnum - 1\n', "        if end >= 0 and lines[end].lstrip()[:1] == '#' and \\\n", '            indentsize(lines[end]) == indent:\n', '            comments = [lines[end].expandtabs().lstrip()]\n', '            if end > 0:\n', '                end = end - 1\n', '                comment = lines[end].expandtabs().lstrip()\n', "                while comment[:1] == '#' and indentsize(lines[end]) == indent:\n", '                    comments[:0] = [comment]\n', '                    end = end - 1\n', '                    if end < 0: break\n', '                    comment = lines[end].expandtabs().lstrip()\n', "            while comments and comments[0].strip() == '#':\n", '                comments[:1] = []\n', "            while comments and comments[-1].strip() == '#':\n", '                comments[-1:] = []\n', "            return ''.join(comments)\n", '\n', 'class EndOfBlock(Exception): pass\n', '\n', 'class BlockFinder:\n', '    """Provide a tokeneater() method to detect the end of a code block."""\n', '    def __init__(self):\n', '        self.indent = 0\n', '        self.islambda = False\n', '        self.started = False\n', '        self.passline = False\n', '        self.indecorator = False\n', '        self.decoratorhasargs = False\n', '        self.last = 1\n', '        self.body_col0 = None\n', '\n', '    def tokeneater(self, type, token, srowcol, erowcol, line):\n', '        if not self.started and not self.indecorator:\n', '            # skip any decorators\n', '            if token == "@":\n', '                self.indecorator = True\n', '            # look for the first "def", "class" or "lambda"\n', '            elif token in ("def", "class", "lambda"):\n', '                if token == "lambda":\n', '                    self.islambda = True\n', '                self.started = True\n', '            self.passline = True    # skip to the end of the line\n', '        elif token == "(":\n', '            if self.indecorator:\n', '                self.decoratorhasargs = True\n', '        elif token == ")":\n', '            if self.indecorator:\n', '                self.indecorator = False\n', '                self.decoratorhasargs = False\n', '        elif type == tokenize.NEWLINE:\n', '            self.passline = False   # stop skipping when a NEWLINE is seen\n', '            self.last = srowcol[0]\n', '            if self.islambda:       # lambdas always end at the first NEWLINE\n', '                raise EndOfBlock\n', '            # hitting a NEWLINE when in a decorator without args\n', '            # ends the decorator\n', '            if self.indecorator and not self.decoratorhasargs:\n', '                self.indecorator = False\n', '        elif self.passline:\n', '            pass\n', '        elif type == tokenize.INDENT:\n', '            if self.body_col0 is None and self.started:\n', '                self.body_col0 = erowcol[1]\n', '            self.indent = self.indent + 1\n', '            self.passline = True\n', '        elif type == tokenize.DEDENT:\n', '            self.indent = self.indent - 1\n', '            # the end of matching indent/dedent pairs end a block\n', '            # (note that this only works for "def"/"class" blocks,\n', '            #  not e.g. for "if: else:" or "try: finally:" blocks)\n', '            if self.indent <= 0:\n', '                raise EndOfBlock\n', '        elif type == tokenize.COMMENT:\n', '            if self.body_col0 is not None and srowcol[1] >= self.body_col0:\n', '                # Include comments if indented at least as much as the block\n', '                self.last = srowcol[0]\n', '        elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):\n', '            # any other token on the same indentation level end the previous\n', '            # block as well, except the pseudo-tokens COMMENT and NL.\n', '            raise EndOfBlock\n', '\n', 'def getblock(lines):\n', '    """Extract the block of code at the top of the given list of lines."""\n', '    blockfinder = BlockFinder()\n', '    try:\n', '        tokens = tokenize.generate_tokens(iter(lines).__next__)\n', '        for _token in tokens:\n', '            blockfinder.tokeneater(*_token)\n', '    except (EndOfBlock, IndentationError):\n', '        pass\n', '    return lines[:blockfinder.last]\n', '\n', 'def getsourcelines(object):\n', '    """Return a list of source lines and starting line number for an object.\n', '\n', '    The argument may be a module, class, method, function, traceback, frame,\n', '    or code object.  The source code is returned as a list of the lines\n', '    corresponding to the object and the line number indicates where in the\n', '    original source file the first line of code was found.  An OSError is\n', '    raised if the source code cannot be retrieved."""\n', '    object = unwrap(object)\n', '    lines, lnum = findsource(object)\n', '\n', '    if istraceback(object):\n', '        object = object.tb_frame\n', '\n', '    # for module or frame that corresponds to module, return all source lines\n', '    if (ismodule(object) or\n', '        (isframe(object) and object.f_code.co_name == "<module>")):\n', '        return lines, 0\n', '    else:\n', '        return getblock(lines[lnum:]), lnum + 1\n', '\n', 'def getsource(object):\n', '    """Return the text of the source code for an object.\n', '\n', '    The argument may be a module, class, method, function, traceback, frame,\n', '    or code object.  The source code is returned as a single string.  An\n', '    OSError is raised if the source code cannot be retrieved."""\n', '    lines, lnum = getsourcelines(object)\n', "    return ''.join(lines)\n", '\n', '# --------------------------------------------------- class tree extraction\n', 'def walktree(classes, children, parent):\n', '    """Recursive helper function for getclasstree()."""\n', '    results = []\n', "    classes.sort(key=attrgetter('__module__', '__name__'))\n", '    for c in classes:\n', '        results.append((c, c.__bases__))\n', '        if c in children:\n', '            results.append(walktree(children[c], children, c))\n', '    return results\n', '\n', 'def getclasstree(classes, unique=False):\n', '    """Arrange the given list of classes into a hierarchy of nested lists.\n', '\n', '    Where a nested list appears, it contains classes derived from the class\n', '    whose entry immediately precedes the list.  Each entry is a 2-tuple\n', "    containing a class and a tuple of its base classes.  If the 'unique'\n", '    argument is true, exactly one entry appears in the returned structure\n', '    for each class in the given list.  Otherwise, classes using multiple\n', '    inheritance and their descendants will appear multiple times."""\n', '    children = {}\n', '    roots = []\n', '    for c in classes:\n', '        if c.__bases__:\n', '            for parent in c.__bases__:\n', '                if parent not in children:\n', '                    children[parent] = []\n', '                if c not in children[parent]:\n', '                    children[parent].append(c)\n', '                if unique and parent in classes: break\n', '        elif c not in roots:\n', '            roots.append(c)\n', '    for parent in children:\n', '        if parent not in classes:\n', '            roots.append(parent)\n', '    return walktree(roots, children, None)\n', '\n', '# ------------------------------------------------ argument list extraction\n', "Arguments = namedtuple('Arguments', 'args, varargs, varkw')\n", '\n', 'def getargs(co):\n', '    """Get information about the arguments accepted by a code object.\n', '\n', '    Three things are returned: (args, varargs, varkw), where\n', "    'args' is the list of argument names. Keyword-only arguments are\n", "    appended. 'varargs' and 'varkw' are the names of the * and **\n", '    arguments or None."""\n', '    if not iscode(co):\n', "        raise TypeError('{!r} is not a code object'.format(co))\n", '\n', '    names = co.co_varnames\n', '    nargs = co.co_argcount\n', '    nkwargs = co.co_kwonlyargcount\n', '    args = list(names[:nargs])\n', '    kwonlyargs = list(names[nargs:nargs+nkwargs])\n', '    step = 0\n', '\n', '    nargs += nkwargs\n', '    varargs = None\n', '    if co.co_flags & CO_VARARGS:\n', '        varargs = co.co_varnames[nargs]\n', '        nargs = nargs + 1\n', '    varkw = None\n', '    if co.co_flags & CO_VARKEYWORDS:\n', '        varkw = co.co_varnames[nargs]\n', '    return Arguments(args + kwonlyargs, varargs, varkw)\n', '\n', "ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')\n", '\n', 'def getargspec(func):\n', '    """Get the names and default values of a function\'s parameters.\n', '\n', '    A tuple of four things is returned: (args, varargs, keywords, defaults).\n', "    'args' is a list of the argument names, including keyword-only argument names.\n", "    'varargs' and 'keywords' are the names of the * and ** parameters or None.\n", "    'defaults' is an n-tuple of the default values of the last n parameters.\n", '\n', '    This function is deprecated, as it does not support annotations or\n', '    keyword-only parameters and will raise ValueError if either is present\n', '    on the supplied callable.\n', '\n', '    For a more structured introspection API, use inspect.signature() instead.\n', '\n', '    Alternatively, use getfullargspec() for an API with a similar namedtuple\n', '    based interface, but full support for annotations and keyword-only\n', '    parameters.\n', '\n', '    Deprecated since Python 3.5, use `inspect.getfullargspec()`.\n', '    """\n', '    warnings.warn("inspect.getargspec() is deprecated since Python 3.0, "\n', '                  "use inspect.signature() or inspect.getfullargspec()",\n', '                  DeprecationWarning, stacklevel=2)\n', '    args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \\\n', '        getfullargspec(func)\n', '    if kwonlyargs or ann:\n', '        raise ValueError("Function has keyword-only parameters or annotations"\n', '                         ", use inspect.signature() API which can support them")\n', '    return ArgSpec(args, varargs, varkw, defaults)\n', '\n', "FullArgSpec = namedtuple('FullArgSpec',\n", "    'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')\n", '\n', 'def getfullargspec(func):\n', '    """Get the names and default values of a callable object\'s parameters.\n', '\n', '    A tuple of seven things is returned:\n', '    (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations).\n', "    'args' is a list of the parameter names.\n", "    'varargs' and 'varkw' are the names of the * and ** parameters or None.\n", "    'defaults' is an n-tuple of the default values of the last n parameters.\n", "    'kwonlyargs' is a list of keyword-only parameter names.\n", "    'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.\n", "    'annotations' is a dictionary mapping parameter names to annotations.\n", '\n', '    Notable differences from inspect.signature():\n', '      - the "self" parameter is always reported, even for bound methods\n', '      - wrapper chains defined by __wrapped__ *not* unwrapped automatically\n', '    """\n', '    try:\n', '        # Re: `skip_bound_arg=False`\n', '        #\n', '        # There is a notable difference in behaviour between getfullargspec\n', "        # and Signature: the former always returns 'self' parameter for bound\n", '        # methods, whereas the Signature always shows the actual calling\n', '        # signature of the passed object.\n', '        #\n', '        # To simulate this behaviour, we "unbind" bound methods, to trick\n', '        # inspect.signature to always return their first parameter ("self",\n', '        # usually)\n', '\n', '        # Re: `follow_wrapper_chains=False`\n', '        #\n', '        # getfullargspec() historically ignored __wrapped__ attributes,\n', '        # so we ensure that remains the case in 3.3+\n', '\n', '        sig = _signature_from_callable(func,\n', '                                       follow_wrapper_chains=False,\n', '                                       skip_bound_arg=False,\n', '                                       sigcls=Signature)\n', '    except Exception as ex:\n', "        # Most of the times 'signature' will raise ValueError.\n", '        # But, it can also raise AttributeError, and, maybe something\n', '        # else. So to be fully backwards compatible, we catch all\n', '        # possible exceptions here, and reraise a TypeError.\n', "        raise TypeError('unsupported callable') from ex\n", '\n', '    args = []\n', '    varargs = None\n', '    varkw = None\n', '    posonlyargs = []\n', '    kwonlyargs = []\n', '    annotations = {}\n', '    defaults = ()\n', '    kwdefaults = {}\n', '\n', '    if sig.return_annotation is not sig.empty:\n', "        annotations['return'] = sig.return_annotation\n", '\n', '    for param in sig.parameters.values():\n', '        kind = param.kind\n', '        name = param.name\n', '\n', '        if kind is _POSITIONAL_ONLY:\n', '            posonlyargs.append(name)\n', '            if param.default is not param.empty:\n', '                defaults += (param.default,)\n', '        elif kind is _POSITIONAL_OR_KEYWORD:\n', '            args.append(name)\n', '            if param.default is not param.empty:\n', '                defaults += (param.default,)\n', '        elif kind is _VAR_POSITIONAL:\n', '            varargs = name\n', '        elif kind is _KEYWORD_ONLY:\n', '            kwonlyargs.append(name)\n', '            if param.default is not param.empty:\n', '                kwdefaults[name] = param.default\n', '        elif kind is _VAR_KEYWORD:\n', '            varkw = name\n', '\n', '        if param.annotation is not param.empty:\n', '            annotations[name] = param.annotation\n', '\n', '    if not kwdefaults:\n', "        # compatibility with 'func.__kwdefaults__'\n", '        kwdefaults = None\n', '\n', '    if not defaults:\n', "        # compatibility with 'func.__defaults__'\n", '        defaults = None\n', '\n', '    return FullArgSpec(posonlyargs + args, varargs, varkw, defaults,\n', '                       kwonlyargs, kwdefaults, annotations)\n', '\n', '\n', "ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')\n", '\n', 'def getargvalues(frame):\n', '    """Get information about arguments passed into a particular frame.\n', '\n', '    A tuple of four things is returned: (args, varargs, varkw, locals).\n', "    'args' is a list of the argument names.\n", "    'varargs' and 'varkw' are the names of the * and ** arguments or None.\n", '    \'locals\' is the locals dictionary of the given frame."""\n', '    args, varargs, varkw = getargs(frame.f_code)\n', '    return ArgInfo(args, varargs, varkw, frame.f_locals)\n', '\n', 'def formatannotation(annotation, base_module=None):\n', "    if getattr(annotation, '__module__', None) == 'typing':\n", "        return repr(annotation).replace('typing.', '')\n", '    if isinstance(annotation, type):\n', "        if annotation.__module__ in ('builtins', base_module):\n", '            return annotation.__qualname__\n', "        return annotation.__module__+'.'+annotation.__qualname__\n", '    return repr(annotation)\n', '\n', 'def formatannotationrelativeto(object):\n', "    module = getattr(object, '__module__', None)\n", '    def _formatannotation(annotation):\n', '        return formatannotation(annotation, module)\n', '    return _formatannotation\n', '\n', 'def formatargspec(args, varargs=None, varkw=None, defaults=None,\n', '                  kwonlyargs=(), kwonlydefaults={}, annotations={},\n', '                  formatarg=str,\n', "                  formatvarargs=lambda name: '*' + name,\n", "                  formatvarkw=lambda name: '**' + name,\n", "                  formatvalue=lambda value: '=' + repr(value),\n", "                  formatreturns=lambda text: ' -> ' + text,\n", '                  formatannotation=formatannotation):\n', '    """Format an argument spec from the values returned by getfullargspec.\n', '\n', '    The first seven arguments are (args, varargs, varkw, defaults,\n', '    kwonlyargs, kwonlydefaults, annotations).  The other five arguments\n', '    are the corresponding optional formatting functions that are called to\n', '    turn names and values into strings.  The last argument is an optional\n', '    function to format the sequence of arguments.\n', '\n', '    Deprecated since Python 3.5: use the `signature` function and `Signature`\n', '    objects.\n', '    """\n', '\n', '    from warnings import warn\n', '\n', '    warn("`formatargspec` is deprecated since Python 3.5. Use `signature` and "\n', '         "the `Signature` object directly",\n', '         DeprecationWarning,\n', '         stacklevel=2)\n', '\n', '    def formatargandannotation(arg):\n', '        result = formatarg(arg)\n', '        if arg in annotations:\n', "            result += ': ' + formatannotation(annotations[arg])\n", '        return result\n', '    specs = []\n', '    if defaults:\n', '        firstdefault = len(args) - len(defaults)\n', '    for i, arg in enumerate(args):\n', '        spec = formatargandannotation(arg)\n', '        if defaults and i >= firstdefault:\n', '            spec = spec + formatvalue(defaults[i - firstdefault])\n', '        specs.append(spec)\n', '    if varargs is not None:\n', '        specs.append(formatvarargs(formatargandannotation(varargs)))\n', '    else:\n', '        if kwonlyargs:\n', "            specs.append('*')\n", '    if kwonlyargs:\n', '        for kwonlyarg in kwonlyargs:\n', '            spec = formatargandannotation(kwonlyarg)\n', '            if kwonlydefaults and kwonlyarg in kwonlydefaults:\n', '                spec += formatvalue(kwonlydefaults[kwonlyarg])\n', '            specs.append(spec)\n', '    if varkw is not None:\n', '        specs.append(formatvarkw(formatargandannotation(varkw)))\n', "    result = '(' + ', '.join(specs) + ')'\n", "    if 'return' in annotations:\n", "        result += formatreturns(formatannotation(annotations['return']))\n", '    return result\n', '\n', 'def formatargvalues(args, varargs, varkw, locals,\n', '                    formatarg=str,\n', "                    formatvarargs=lambda name: '*' + name,\n", "                    formatvarkw=lambda name: '**' + name,\n", "                    formatvalue=lambda value: '=' + repr(value)):\n", '    """Format an argument spec from the 4 values returned by getargvalues.\n', '\n', '    The first four arguments are (args, varargs, varkw, locals).  The\n', '    next four arguments are the corresponding optional formatting functions\n', '    that are called to turn names and values into strings.  The ninth\n', '    argument is an optional function to format the sequence of arguments."""\n', '    def convert(name, locals=locals,\n', '                formatarg=formatarg, formatvalue=formatvalue):\n', '        return formatarg(name) + formatvalue(locals[name])\n', '    specs = []\n', '    for i in range(len(args)):\n', '        specs.append(convert(args[i]))\n', '    if varargs:\n', '        specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))\n', '    if varkw:\n', '        specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))\n', "    return '(' + ', '.join(specs) + ')'\n", '\n', 'def _missing_arguments(f_name, argnames, pos, values):\n', '    names = [repr(name) for name in argnames if name not in values]\n', '    missing = len(names)\n', '    if missing == 1:\n', '        s = names[0]\n', '    elif missing == 2:\n', '        s = "{} and {}".format(*names)\n', '    else:\n', '        tail = ", {} and {}".format(*names[-2:])\n', '        del names[-2:]\n', '        s = ", ".join(names) + tail\n', '    raise TypeError("%s() missing %i required %s argument%s: %s" %\n', '                    (f_name, missing,\n', '                      "positional" if pos else "keyword-only",\n', '                      "" if missing == 1 else "s", s))\n', '\n', 'def _too_many(f_name, args, kwonly, varargs, defcount, given, values):\n', '    atleast = len(args) - defcount\n', '    kwonly_given = len([arg for arg in kwonly if arg in values])\n', '    if varargs:\n', '        plural = atleast != 1\n', '        sig = "at least %d" % (atleast,)\n', '    elif defcount:\n', '        plural = True\n', '        sig = "from %d to %d" % (atleast, len(args))\n', '    else:\n', '        plural = len(args) != 1\n', '        sig = str(len(args))\n', '    kwonly_sig = ""\n', '    if kwonly_given:\n', '        msg = " positional argument%s (and %d keyword-only argument%s)"\n', '        kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,\n', '                             "s" if kwonly_given != 1 else ""))\n', '    raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %\n', '            (f_name, sig, "s" if plural else "", given, kwonly_sig,\n', '             "was" if given == 1 and not kwonly_given else "were"))\n', '\n', 'def getcallargs(func, /, *positional, **named):\n', '    """Get the mapping of arguments to values.\n', '\n', '    A dict is returned, with keys the function argument names (including the\n', '    names of the * and ** arguments, if any), and values the respective bound\n', '    values from \'positional\' and \'named\'."""\n', '    spec = getfullargspec(func)\n', '    args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec\n', '    f_name = func.__name__\n', '    arg2value = {}\n', '\n', '\n', '    if ismethod(func) and func.__self__ is not None:\n', "        # implicit 'self' (or 'cls' for classmethods) argument\n", '        positional = (func.__self__,) + positional\n', '    num_pos = len(positional)\n', '    num_args = len(args)\n', '    num_defaults = len(defaults) if defaults else 0\n', '\n', '    n = min(num_pos, num_args)\n', '    for i in range(n):\n', '        arg2value[args[i]] = positional[i]\n', '    if varargs:\n', '        arg2value[varargs] = tuple(positional[n:])\n', '    possible_kwargs = set(args + kwonlyargs)\n', '    if varkw:\n', '        arg2value[varkw] = {}\n', '    for kw, value in named.items():\n', '        if kw not in possible_kwargs:\n', '            if not varkw:\n', '                raise TypeError("%s() got an unexpected keyword argument %r" %\n', '                                (f_name, kw))\n', '            arg2value[varkw][kw] = value\n', '            continue\n', '        if kw in arg2value:\n', '            raise TypeError("%s() got multiple values for argument %r" %\n', '                            (f_name, kw))\n', '        arg2value[kw] = value\n', '    if num_pos > num_args and not varargs:\n', '        _too_many(f_name, args, kwonlyargs, varargs, num_defaults,\n', '                   num_pos, arg2value)\n', '    if num_pos < num_args:\n', '        req = args[:num_args - num_defaults]\n', '        for arg in req:\n', '            if arg not in arg2value:\n', '                _missing_arguments(f_name, req, True, arg2value)\n', '        for i, arg in enumerate(args[num_args - num_defaults:]):\n', '            if arg not in arg2value:\n', '                arg2value[arg] = defaults[i]\n', '    missing = 0\n', '    for kwarg in kwonlyargs:\n', '        if kwarg not in arg2value:\n', '            if kwonlydefaults and kwarg in kwonlydefaults:\n', '                arg2value[kwarg] = kwonlydefaults[kwarg]\n', '            else:\n', '                missing += 1\n', '    if missing:\n', '        _missing_arguments(f_name, kwonlyargs, False, arg2value)\n', '    return arg2value\n', '\n', "ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')\n", '\n', 'def getclosurevars(func):\n', '    """\n', '    Get the mapping of free variables to their current values.\n', '\n', '    Returns a named tuple of dicts mapping the current nonlocal, global\n', '    and builtin references as seen by the body of the function. A final\n', '    set of unbound names that could not be resolved is also provided.\n', '    """\n', '\n', '    if ismethod(func):\n', '        func = func.__func__\n', '\n', '    if not isfunction(func):\n', '        raise TypeError("{!r} is not a Python function".format(func))\n', '\n', '    code = func.__code__\n', '    # Nonlocal references are named in co_freevars and resolved\n', '    # by looking them up in __closure__ by positional index\n', '    if func.__closure__ is None:\n', '        nonlocal_vars = {}\n', '    else:\n', '        nonlocal_vars = {\n', '            var : cell.cell_contents\n', '            for var, cell in zip(code.co_freevars, func.__closure__)\n', '       }\n', '\n', '    # Global and builtin references are named in co_names and resolved\n', '    # by looking them up in __globals__ or __builtins__\n', '    global_ns = func.__globals__\n', '    builtin_ns = global_ns.get("__builtins__", builtins.__dict__)\n', '    if ismodule(builtin_ns):\n', '        builtin_ns = builtin_ns.__dict__\n', '    global_vars = {}\n', '    builtin_vars = {}\n', '    unbound_names = set()\n', '    for name in code.co_names:\n', '        if name in ("None", "True", "False"):\n', '            # Because these used to be builtins instead of keywords, they\n', '            # may still show up as name references. We ignore them.\n', '            continue\n', '        try:\n', '            global_vars[name] = global_ns[name]\n', '        except KeyError:\n', '            try:\n', '                builtin_vars[name] = builtin_ns[name]\n', '            except KeyError:\n', '                unbound_names.add(name)\n', '\n', '    return ClosureVars(nonlocal_vars, global_vars,\n', '                       builtin_vars, unbound_names)\n', '\n', '# -------------------------------------------------- stack frame extraction\n', '\n', "Traceback = namedtuple('Traceback', 'filename lineno function code_context index')\n", '\n', 'def getframeinfo(frame, context=1):\n', '    """Get information about a frame or traceback object.\n', '\n', '    A tuple of five things is returned: the filename, the line number of\n', '    the current line, the function name, a list of lines of context from\n', '    the source code, and the index of the current line within that list.\n', '    The optional second argument specifies the number of lines of context\n', '    to return, which are centered around the current line."""\n', '    if istraceback(frame):\n', '        lineno = frame.tb_lineno\n', '        frame = frame.tb_frame\n', '    else:\n', '        lineno = frame.f_lineno\n', '    if not isframe(frame):\n', "        raise TypeError('{!r} is not a frame or traceback object'.format(frame))\n", '\n', '    filename = getsourcefile(frame) or getfile(frame)\n', '    if context > 0:\n', '        start = lineno - 1 - context//2\n', '        try:\n', '            lines, lnum = findsource(frame)\n', '        except OSError:\n', '            lines = index = None\n', '        else:\n', '            start = max(0, min(start, len(lines) - context))\n', '            lines = lines[start:start+context]\n', '            index = lineno - 1 - start\n', '    else:\n', '        lines = index = None\n', '\n', '    return Traceback(filename, lineno, frame.f_code.co_name, lines, index)\n', '\n', 'def getlineno(frame):\n', '    """Get the line number from a frame object, allowing for optimization."""\n', '    # FrameType.f_lineno is now a descriptor that grovels co_lnotab\n', '    return frame.f_lineno\n', '\n', "FrameInfo = namedtuple('FrameInfo', ('frame',) + Traceback._fields)\n", '\n', 'def getouterframes(frame, context=1):\n', '    """Get a list of records for a frame and all higher (calling) frames.\n', '\n', '    Each record contains a frame object, filename, line number, function\n', '    name, a list of lines of context, and index within the context."""\n', '    framelist = []\n', '    while frame:\n', '        frameinfo = (frame,) + getframeinfo(frame, context)\n', '        framelist.append(FrameInfo(*frameinfo))\n', '        frame = frame.f_back\n', '    return framelist\n', '\n', 'def getinnerframes(tb, context=1):\n', '    """Get a list of records for a traceback\'s frame and all lower frames.\n', '\n', '    Each record contains a frame object, filename, line number, function\n', '    name, a list of lines of context, and index within the context."""\n', '    framelist = []\n', '    while tb:\n', '        frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)\n', '        framelist.append(FrameInfo(*frameinfo))\n', '        tb = tb.tb_next\n', '    return framelist\n', '\n', 'def currentframe():\n', '    """Return the frame of the caller or None if this is not possible."""\n', '    return sys._getframe(1) if hasattr(sys, "_getframe") else None\n', '\n', 'def stack(context=1):\n', '    """Return a list of records for the stack above the caller\'s frame."""\n', '    return getouterframes(sys._getframe(1), context)\n', '\n', 'def trace(context=1):\n', '    """Return a list of records for the stack below the current exception."""\n', '    return getinnerframes(sys.exc_info()[2], context)\n', '\n', '\n', '# ------------------------------------------------ static version of getattr\n', '\n', '_sentinel = object()\n', '\n', 'def _static_getmro(klass):\n', "    return type.__dict__['__mro__'].__get__(klass)\n", '\n', 'def _check_instance(obj, attr):\n', '    instance_dict = {}\n', '    try:\n', '        instance_dict = object.__getattribute__(obj, "__dict__")\n', '    except AttributeError:\n', '        pass\n', '    return dict.get(instance_dict, attr, _sentinel)\n', '\n', '\n', 'def _check_class(klass, attr):\n', '    for entry in _static_getmro(klass):\n', '        if _shadowed_dict(type(entry)) is _sentinel:\n', '            try:\n', '                return entry.__dict__[attr]\n', '            except KeyError:\n', '                pass\n', '    return _sentinel\n', '\n', 'def _is_type(obj):\n', '    try:\n', '        _static_getmro(obj)\n', '    except TypeError:\n', '        return False\n', '    return True\n', '\n', 'def _shadowed_dict(klass):\n', '    dict_attr = type.__dict__["__dict__"]\n', '    for entry in _static_getmro(klass):\n', '        try:\n', '            class_dict = dict_attr.__get__(entry)["__dict__"]\n', '        except KeyError:\n', '            pass\n', '        else:\n', '            if not (type(class_dict) is types.GetSetDescriptorType and\n', '                    class_dict.__name__ == "__dict__" and\n', '                    class_dict.__objclass__ is entry):\n', '                return class_dict\n', '    return _sentinel\n', '\n', 'def getattr_static(obj, attr, default=_sentinel):\n', '    """Retrieve attributes without triggering dynamic lookup via the\n', '       descriptor protocol,  __getattr__ or __getattribute__.\n', '\n', '       Note: this function may not be able to retrieve all attributes\n', '       that getattr can fetch (like dynamically created attributes)\n', "       and may find attributes that getattr can't (like descriptors\n", '       that raise AttributeError). It can also return descriptor objects\n', '       instead of instance members in some cases. See the\n', '       documentation for details.\n', '    """\n', '    instance_result = _sentinel\n', '    if not _is_type(obj):\n', '        klass = type(obj)\n', '        dict_attr = _shadowed_dict(klass)\n', '        if (dict_attr is _sentinel or\n', '            type(dict_attr) is types.MemberDescriptorType):\n', '            instance_result = _check_instance(obj, attr)\n', '    else:\n', '        klass = obj\n', '\n', '    klass_result = _check_class(klass, attr)\n', '\n', '    if instance_result is not _sentinel and klass_result is not _sentinel:\n', "        if (_check_class(type(klass_result), '__get__') is not _sentinel and\n", "            _check_class(type(klass_result), '__set__') is not _sentinel):\n", '            return klass_result\n', '\n', '    if instance_result is not _sentinel:\n', '        return instance_result\n', '    if klass_result is not _sentinel:\n', '        return klass_result\n', '\n', '    if obj is klass:\n', '        # for types we check the metaclass too\n', '        for entry in _static_getmro(type(klass)):\n', '            if _shadowed_dict(type(entry)) is _sentinel:\n', '                try:\n', '                    return entry.__dict__[attr]\n', '                except KeyError:\n', '                    pass\n', '    if default is not _sentinel:\n', '        return default\n', '    raise AttributeError(attr)\n', '\n', '\n', '# ------------------------------------------------ generator introspection\n', '\n', "GEN_CREATED = 'GEN_CREATED'\n", "GEN_RUNNING = 'GEN_RUNNING'\n", "GEN_SUSPENDED = 'GEN_SUSPENDED'\n", "GEN_CLOSED = 'GEN_CLOSED'\n", '\n', 'def getgeneratorstate(generator):\n', '    """Get current state of a generator-iterator.\n', '\n', '    Possible states are:\n', '      GEN_CREATED: Waiting to start execution.\n', '      GEN_RUNNING: Currently being executed by the interpreter.\n', '      GEN_SUSPENDED: Currently suspended at a yield expression.\n', '      GEN_CLOSED: Execution has completed.\n', '    """\n', '    if generator.gi_running:\n', '        return GEN_RUNNING\n', '    if generator.gi_frame is None:\n', '        return GEN_CLOSED\n', '    if generator.gi_frame.f_lasti == -1:\n', '        return GEN_CREATED\n', '    return GEN_SUSPENDED\n', '\n', '\n', 'def getgeneratorlocals(generator):\n', '    """\n', '    Get the mapping of generator local variables to their current values.\n', '\n', '    A dict is returned, with the keys the local variable names and values the\n', '    bound values."""\n', '\n', '    if not isgenerator(generator):\n', '        raise TypeError("{!r} is not a Python generator".format(generator))\n', '\n', '    frame = getattr(generator, "gi_frame", None)\n', '    if frame is not None:\n', '        return generator.gi_frame.f_locals\n', '    else:\n', '        return {}\n', '\n', '\n', '# ------------------------------------------------ coroutine introspection\n', '\n', "CORO_CREATED = 'CORO_CREATED'\n", "CORO_RUNNING = 'CORO_RUNNING'\n", "CORO_SUSPENDED = 'CORO_SUSPENDED'\n", "CORO_CLOSED = 'CORO_CLOSED'\n", '\n', 'def getcoroutinestate(coroutine):\n', '    """Get current state of a coroutine object.\n', '\n', '    Possible states are:\n', '      CORO_CREATED: Waiting to start execution.\n', '      CORO_RUNNING: Currently being executed by the interpreter.\n', '      CORO_SUSPENDED: Currently suspended at an await expression.\n', '      CORO_CLOSED: Execution has completed.\n', '    """\n', '    if coroutine.cr_running:\n', '        return CORO_RUNNING\n', '    if coroutine.cr_frame is None:\n', '        return CORO_CLOSED\n', '    if coroutine.cr_frame.f_lasti == -1:\n', '        return CORO_CREATED\n', '    return CORO_SUSPENDED\n', '\n', '\n', 'def getcoroutinelocals(coroutine):\n', '    """\n', '    Get the mapping of coroutine local variables to their current values.\n', '\n', '    A dict is returned, with the keys the local variable names and values the\n', '    bound values."""\n', '    frame = getattr(coroutine, "cr_frame", None)\n', '    if frame is not None:\n', '        return frame.f_locals\n', '    else:\n', '        return {}\n', '\n', '\n', '###############################################################################\n', '### Function Signature Object (PEP 362)\n', '###############################################################################\n', '\n', '\n', '_WrapperDescriptor = type(type.__call__)\n', '_MethodWrapper = type(all.__call__)\n', "_ClassMethodWrapper = type(int.__dict__['from_bytes'])\n", '\n', '_NonUserDefinedCallables = (_WrapperDescriptor,\n', '                            _MethodWrapper,\n', '                            _ClassMethodWrapper,\n', '                            types.BuiltinFunctionType)\n', '\n', '\n', 'def _signature_get_user_defined_method(cls, method_name):\n', '    """Private helper. Checks if ``cls`` has an attribute\n', '    named ``method_name`` and returns it only if it is a\n', '    pure python function.\n', '    """\n', '    try:\n', '        meth = getattr(cls, method_name)\n', '    except AttributeError:\n', '        return\n', '    else:\n', '        if not isinstance(meth, _NonUserDefinedCallables):\n', "            # Once '__signature__' will be added to 'C'-level\n", "            # callables, this check won't be necessary\n", '            return meth\n', '\n', '\n', 'def _signature_get_partial(wrapped_sig, partial, extra_args=()):\n', '    """Private helper to calculate how \'wrapped_sig\' signature will\n', "    look like after applying a 'functools.partial' object (or alike)\n", '    on it.\n', '    """\n', '\n', '    old_params = wrapped_sig.parameters\n', '    new_params = OrderedDict(old_params.items())\n', '\n', '    partial_args = partial.args or ()\n', '    partial_keywords = partial.keywords or {}\n', '\n', '    if extra_args:\n', '        partial_args = extra_args + partial_args\n', '\n', '    try:\n', '        ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)\n', '    except TypeError as ex:\n', "        msg = 'partial object {!r} has incorrect arguments'.format(partial)\n", '        raise ValueError(msg) from ex\n', '\n', '\n', '    transform_to_kwonly = False\n', '    for param_name, param in old_params.items():\n', '        try:\n', '            arg_value = ba.arguments[param_name]\n', '        except KeyError:\n', '            pass\n', '        else:\n', '            if param.kind is _POSITIONAL_ONLY:\n', '                # If positional-only parameter is bound by partial,\n', '                # it effectively disappears from the signature\n', '                new_params.pop(param_name)\n', '                continue\n', '\n', '            if param.kind is _POSITIONAL_OR_KEYWORD:\n', '                if param_name in partial_keywords:\n', '                    # This means that this parameter, and all parameters\n', '                    # after it should be keyword-only (and var-positional\n', "                    # should be removed). Here's why. Consider the following\n", '                    # function:\n', '                    #     foo(a, b, *args, c):\n', '                    #         pass\n', '                    #\n', '                    # "partial(foo, a=\'spam\')" will have the following\n', '                    # signature: "(*, a=\'spam\', b, c)". Because attempting\n', '                    # to call that partial with "(10, 20)" arguments will\n', '                    # raise a TypeError, saying that "a" argument received\n', '                    # multiple values.\n', '                    transform_to_kwonly = True\n', '                    # Set the new default value\n', '                    new_params[param_name] = param.replace(default=arg_value)\n', '                else:\n', '                    # was passed as a positional argument\n', '                    new_params.pop(param.name)\n', '                    continue\n', '\n', '            if param.kind is _KEYWORD_ONLY:\n', '                # Set the new default value\n', '                new_params[param_name] = param.replace(default=arg_value)\n', '\n', '        if transform_to_kwonly:\n', '            assert param.kind is not _POSITIONAL_ONLY\n', '\n', '            if param.kind is _POSITIONAL_OR_KEYWORD:\n', '                new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)\n', '                new_params[param_name] = new_param\n', '                new_params.move_to_end(param_name)\n', '            elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):\n', '                new_params.move_to_end(param_name)\n', '            elif param.kind is _VAR_POSITIONAL:\n', '                new_params.pop(param.name)\n', '\n', '    return wrapped_sig.replace(parameters=new_params.values())\n', '\n', '\n', 'def _signature_bound_method(sig):\n', '    """Private helper to transform signatures for unbound\n', '    functions to bound methods.\n', '    """\n', '\n', '    params = tuple(sig.parameters.values())\n', '\n', '    if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):\n', "        raise ValueError('invalid method signature')\n", '\n', '    kind = params[0].kind\n', '    if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):\n', '        # Drop first parameter:\n', "        # '(p1, p2[, ...])' -> '(p2[, ...])'\n", '        params = params[1:]\n', '    else:\n', '        if kind is not _VAR_POSITIONAL:\n', '            # Unless we add a new parameter type we never\n', '            # get here\n', "            raise ValueError('invalid argument type')\n", "        # It's a var-positional parameter.\n", "        # Do nothing. '(*args[, ...])' -> '(*args[, ...])'\n", '\n', '    return sig.replace(parameters=params)\n', '\n', '\n', 'def _signature_is_builtin(obj):\n', '    """Private helper to test if `obj` is a callable that might\n', "    support Argument Clinic's __text_signature__ protocol.\n", '    """\n', '    return (isbuiltin(obj) or\n', '            ismethoddescriptor(obj) or\n', '            isinstance(obj, _NonUserDefinedCallables) or\n', "            # Can't test 'isinstance(type)' here, as it would\n", '            # also be True for regular python classes\n', '            obj in (type, object))\n', '\n', '\n', 'def _signature_is_functionlike(obj):\n', '    """Private helper to test if `obj` is a duck type of FunctionType.\n', '    A good example of such objects are functions compiled with\n', '    Cython, which have all attributes that a pure Python function\n', '    would have, but have their code statically compiled.\n', '    """\n', '\n', '    if not callable(obj) or isclass(obj):\n', '        # All function-like objects are obviously callables,\n', '        # and not classes.\n', '        return False\n', '\n', "    name = getattr(obj, '__name__', None)\n", "    code = getattr(obj, '__code__', None)\n", "    defaults = getattr(obj, '__defaults__', _void) # Important to use _void ...\n", "    kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here\n", "    annotations = getattr(obj, '__annotations__', None)\n", '\n', '    return (isinstance(code, types.CodeType) and\n', '            isinstance(name, str) and\n', '            (defaults is None or isinstance(defaults, tuple)) and\n', '            (kwdefaults is None or isinstance(kwdefaults, dict)) and\n', '            isinstance(annotations, dict))\n', '\n', '\n', 'def _signature_get_bound_param(spec):\n', '    """ Private helper to get first parameter name from a\n', '    __text_signature__ of a builtin method, which should\n', "    be in the following format: '($param1, ...)'.\n", "    Assumptions are that the first argument won't have\n", '    a default value or an annotation.\n', '    """\n', '\n', "    assert spec.startswith('(


)\n", '\n', "    pos = spec.find(',')\n", '    if pos == -1:\n', "        pos = spec.find(')')\n", '\n', "    cpos = spec.find(':')\n", '    assert cpos == -1 or cpos > pos\n', '\n', "    cpos = spec.find('=')\n", '    assert cpos == -1 or cpos > pos\n', '\n', '    return spec[2:pos]\n', '\n', '\n', 'def _signature_strip_non_python_syntax(signature):\n', '    """\n', "    Private helper function. Takes a signature in Argument Clinic's\n", '    extended signature format.\n', '\n', '    Returns a tuple of three things:\n', '      * that signature re-rendered in standard Python syntax,\n', '      * the index of the "self" parameter (generally 0), or None if\n', '        the function does not have a "self" parameter, and\n', '      * the index of the last "positional only" parameter,\n', '        or None if the signature has no positional-only parameters.\n', '    """\n', '\n', '    if not signature:\n', '        return signature, None, None\n', '\n', '    self_parameter = None\n', '    last_positional_only = None\n', '\n', "    lines = [l.encode('ascii') for l in signature.split('\\n')]\n", '    generator = iter(lines).__next__\n', '    token_stream = tokenize.tokenize(generator)\n', '\n', '    delayed_comma = False\n', '    skip_next_comma = False\n', '    text = []\n', '    add = text.append\n', '\n', '    current_parameter = 0\n', '    OP = token.OP\n', '    ERRORTOKEN = token.ERRORTOKEN\n', '\n', '    # token stream always starts with ENCODING token, skip it\n', '    t = next(token_stream)\n', '    assert t.type == tokenize.ENCODING\n', '\n', '    for t in token_stream:\n', '        type, string = t.type, t.string\n', '\n', '        if type == OP:\n', "            if string == ',':\n", '                if skip_next_comma:\n', '                    skip_next_comma = False\n', '                else:\n', '                    assert not delayed_comma\n', '                    delayed_comma = True\n', '                    current_parameter += 1\n', '                continue\n', '\n', "            if string == '/':\n", '                assert not skip_next_comma\n', '                assert last_positional_only is None\n', '                skip_next_comma = True\n', '                last_positional_only = current_parameter - 1\n', '                continue\n', '\n', "        if (type == ERRORTOKEN) and (string == '


):\n", '            assert self_parameter is None\n', '            self_parameter = current_parameter\n', '            continue\n', '\n', '        if delayed_comma:\n', '            delayed_comma = False\n', "            if not ((type == OP) and (string == ')')):\n", "                add(', ')\n", '        add(string)\n', "        if (string == ','):\n", "            add(' ')\n", "    clean_signature = ''.join(text)\n", '    return clean_signature, self_parameter, last_positional_only\n', '\n', '\n', 'def _signature_fromstr(cls, obj, s, skip_bound_arg=True):\n', '    """Private helper to parse content of \'__text_signature__\'\n', '    and return a Signature based on it.\n', '    """\n', "    # Lazy import ast because it's relatively heavy and\n", "    # it's not used for other than this function.\n", '    import ast\n', '\n', '    Parameter = cls._parameter_cls\n', '\n', '    clean_signature, self_parameter, last_positional_only = \\\n', '        _signature_strip_non_python_syntax(s)\n', '\n', '    program = "def foo" + clean_signature + ": pass"\n', '\n', '    try:\n', '        module = ast.parse(program)\n', '    except SyntaxError:\n', '        module = None\n', '\n', '    if not isinstance(module, ast.Module):\n', '        raise ValueError("{!r} builtin has invalid signature".format(obj))\n', '\n', '    f = module.body[0]\n', '\n', '    parameters = []\n', '    empty = Parameter.empty\n', '    invalid = object()\n', '\n', '    module = None\n', '    module_dict = {}\n', "    module_name = getattr(obj, '__module__', None)\n", '    if module_name:\n', '        module = sys.modules.get(module_name, None)\n', '        if module:\n', '            module_dict = module.__dict__\n', '    sys_module_dict = sys.modules.copy()\n', '\n', '    def parse_name(node):\n', '        assert isinstance(node, ast.arg)\n', '        if node.annotation is not None:\n', '            raise ValueError("Annotations are not currently supported")\n', '        return node.arg\n', '\n', '    def wrap_value(s):\n', '        try:\n', '            value = eval(s, module_dict)\n', '        except NameError:\n', '            try:\n', '                value = eval(s, sys_module_dict)\n', '            except NameError:\n', '                raise RuntimeError()\n', '\n', '        if isinstance(value, (str, int, float, bytes, bool, type(None))):\n', '            return ast.Constant(value)\n', '        raise RuntimeError()\n', '\n', '    class RewriteSymbolics(ast.NodeTransformer):\n', '        def visit_Attribute(self, node):\n', '            a = []\n', '            n = node\n', '            while isinstance(n, ast.Attribute):\n', '                a.append(n.attr)\n', '                n = n.value\n', '            if not isinstance(n, ast.Name):\n', '                raise RuntimeError()\n', '            a.append(n.id)\n', '            value = ".".join(reversed(a))\n', '            return wrap_value(value)\n', '\n', '        def visit_Name(self, node):\n', '            if not isinstance(node.ctx, ast.Load):\n', '                raise ValueError()\n', '            return wrap_value(node.id)\n', '\n', '    def p(name_node, default_node, default=empty):\n', '        name = parse_name(name_node)\n', '        if name is invalid:\n', '            return None\n', '        if default_node and default_node is not _empty:\n', '            try:\n', '                default_node = RewriteSymbolics().visit(default_node)\n', '                o = ast.literal_eval(default_node)\n', '            except ValueError:\n', '                o = invalid\n', '            if o is invalid:\n', '                return None\n', '            default = o if o is not invalid else default\n', '        parameters.append(Parameter(name, kind, default=default, annotation=empty))\n', '\n', '    # non-keyword-only parameters\n', '    args = reversed(f.args.args)\n', '    defaults = reversed(f.args.defaults)\n', '    iter = itertools.zip_longest(args, defaults, fillvalue=None)\n', '    if last_positional_only is not None:\n', '        kind = Parameter.POSITIONAL_ONLY\n', '    else:\n', '        kind = Parameter.POSITIONAL_OR_KEYWORD\n', '    for i, (name, default) in enumerate(reversed(list(iter))):\n', '        p(name, default)\n', '        if i == last_positional_only:\n', '            kind = Parameter.POSITIONAL_OR_KEYWORD\n', '\n', '    # *args\n', '    if f.args.vararg:\n', '        kind = Parameter.VAR_POSITIONAL\n', '        p(f.args.vararg, empty)\n', '\n', '    # keyword-only arguments\n', '    kind = Parameter.KEYWORD_ONLY\n', '    for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults):\n', '        p(name, default)\n', '\n', '    # **kwargs\n', '    if f.args.kwarg:\n', '        kind = Parameter.VAR_KEYWORD\n', '        p(f.args.kwarg, empty)\n', '\n', '    if self_parameter is not None:\n', '        # Possibly strip the bound argument:\n', '        #    - We *always* strip first bound argument if\n', '        #      it is a module.\n', "        #    - We don't strip first bound argument if\n", '        #      skip_bound_arg is False.\n', '        assert parameters\n', "        _self = getattr(obj, '__self__', None)\n", '        self_isbound = _self is not None\n', '        self_ismodule = ismodule(_self)\n', '        if self_isbound and (self_ismodule or skip_bound_arg):\n', '            parameters.pop(0)\n', '        else:\n', '            # for builtins, self parameter is always positional-only!\n', '            p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)\n', '            parameters[0] = p\n', '\n', '    return cls(parameters, return_annotation=cls.empty)\n', '\n', '\n', 'def _signature_from_builtin(cls, func, skip_bound_arg=True):\n', '    """Private helper function to get signature for\n', '    builtin callables.\n', '    """\n', '\n', '    if not _signature_is_builtin(func):\n', '        raise TypeError("{!r} is not a Python builtin "\n', '                        "function".format(func))\n', '\n', '    s = getattr(func, "__text_signature__", None)\n', '    if not s:\n', '        raise ValueError("no signature found for builtin {!r}".format(func))\n', '\n', '    return _signature_fromstr(cls, func, s, skip_bound_arg)\n', '\n', '\n', 'def _signature_from_function(cls, func, skip_bound_arg=True):\n', '    """Private helper: constructs Signature for the given python function."""\n', '\n', '    is_duck_function = False\n', '    if not isfunction(func):\n', '        if _signature_is_functionlike(func):\n', '            is_duck_function = True\n', '        else:\n', "            # If it's not a pure Python function, and not a duck type\n", '            # of pure function:\n', "            raise TypeError('{!r} is not a Python function'.format(func))\n", '\n', '    s = getattr(func, "__text_signature__", None)\n', '    if s:\n', '        return _signature_fromstr(cls, func, s, skip_bound_arg)\n', '\n', '    Parameter = cls._parameter_cls\n', '\n', '    # Parameter information.\n', '    func_code = func.__code__\n', '    pos_count = func_code.co_argcount\n', '    arg_names = func_code.co_varnames\n', '    posonly_count = func_code.co_posonlyargcount\n', '    positional = arg_names[:pos_count]\n', '    keyword_only_count = func_code.co_kwonlyargcount\n', '    keyword_only = arg_names[pos_count:pos_count + keyword_only_count]\n', '    annotations = func.__annotations__\n', '    defaults = func.__defaults__\n', '    kwdefaults = func.__kwdefaults__\n', '\n', '    if defaults:\n', '        pos_default_count = len(defaults)\n', '    else:\n', '        pos_default_count = 0\n', '\n', '    parameters = []\n', '\n', '    non_default_count = pos_count - pos_default_count\n', '    posonly_left = posonly_count\n', '\n', '    # Non-keyword-only parameters w/o defaults.\n', '    for name in positional[:non_default_count]:\n', '        kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD\n', '        annotation = annotations.get(name, _empty)\n', '        parameters.append(Parameter(name, annotation=annotation,\n', '                                    kind=kind))\n', '        if posonly_left:\n', '            posonly_left -= 1\n', '\n', '    # ... w/ defaults.\n', '    for offset, name in enumerate(positional[non_default_count:]):\n', '        kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD\n', '        annotation = annotations.get(name, _empty)\n', '        parameters.append(Parameter(name, annotation=annotation,\n', '                                    kind=kind,\n', '                                    default=defaults[offset]))\n', '        if posonly_left:\n', '            posonly_left -= 1\n', '\n', '    # *args\n', '    if func_code.co_flags & CO_VARARGS:\n', '        name = arg_names[pos_count + keyword_only_count]\n', '        annotation = annotations.get(name, _empty)\n', '        parameters.append(Parameter(name, annotation=annotation,\n', '                                    kind=_VAR_POSITIONAL))\n', '\n', '    # Keyword-only parameters.\n', '    for name in keyword_only:\n', '        default = _empty\n', '        if kwdefaults is not None:\n', '            default = kwdefaults.get(name, _empty)\n', '\n', '        annotation = annotations.get(name, _empty)\n', '        parameters.append(Parameter(name, annotation=annotation,\n', '                                    kind=_KEYWORD_ONLY,\n', '                                    default=default))\n', '    # **kwargs\n', '    if func_code.co_flags & CO_VARKEYWORDS:\n', '        index = pos_count + keyword_only_count\n', '        if func_code.co_flags & CO_VARARGS:\n', '            index += 1\n', '\n', '        name = arg_names[index]\n', '        annotation = annotations.get(name, _empty)\n', '        parameters.append(Parameter(name, annotation=annotation,\n', '                                    kind=_VAR_KEYWORD))\n', '\n', "    # Is 'func' is a pure Python function - don't validate the\n", '    # parameters list (for correct order and defaults), it should be OK.\n', '    return cls(parameters,\n', "               return_annotation=annotations.get('return', _empty),\n", '               __validate_parameters__=is_duck_function)\n', '\n', '\n', 'def _signature_from_callable(obj, *,\n', '                             follow_wrapper_chains=True,\n', '                             skip_bound_arg=True,\n', '                             sigcls):\n', '\n', '    """Private helper function to get signature for arbitrary\n', '    callable objects.\n', '    """\n', '\n', '    if not callable(obj):\n', "        raise TypeError('{!r} is not a callable object'.format(obj))\n", '\n', '    if isinstance(obj, types.MethodType):\n', '        # In this case we skip the first parameter of the underlying\n', '        # function (usually `self` or `cls`).\n', '        sig = _signature_from_callable(\n', '            obj.__func__,\n', '            follow_wrapper_chains=follow_wrapper_chains,\n', '            skip_bound_arg=skip_bound_arg,\n', '            sigcls=sigcls)\n', '\n', '        if skip_bound_arg:\n', '            return _signature_bound_method(sig)\n', '        else:\n', '            return sig\n', '\n', '    # Was this function wrapped by a decorator?\n', '    if follow_wrapper_chains:\n', '        obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__")))\n', '        if isinstance(obj, types.MethodType):\n', '            # If the unwrapped object is a *method*, we might want to\n', '            # skip its first parameter (self).\n', '            # See test_signature_wrapped_bound_method for details.\n', '            return _signature_from_callable(\n', '                obj,\n', '                follow_wrapper_chains=follow_wrapper_chains,\n', '                skip_bound_arg=skip_bound_arg,\n', '                sigcls=sigcls)\n', '\n', '    try:\n', '        sig = obj.__signature__\n', '    except AttributeError:\n', '        pass\n', '    else:\n', '        if sig is not None:\n', '            if not isinstance(sig, Signature):\n', '                raise TypeError(\n', "                    'unexpected object {!r} in __signature__ '\n", "                    'attribute'.format(sig))\n", '            return sig\n', '\n', '    try:\n', '        partialmethod = obj._partialmethod\n', '    except AttributeError:\n', '        pass\n', '    else:\n', '        if isinstance(partialmethod, functools.partialmethod):\n', '            # Unbound partialmethod (see functools.partialmethod)\n', '            # This means, that we need to calculate the signature\n', "            # as if it's a regular partial object, but taking into\n", '            # account that the first positional argument\n', '            # (usually `self`, or `cls`) will not be passed\n', '            # automatically (as for boundmethods)\n', '\n', '            wrapped_sig = _signature_from_callable(\n', '                partialmethod.func,\n', '                follow_wrapper_chains=follow_wrapper_chains,\n', '                skip_bound_arg=skip_bound_arg,\n', '                sigcls=sigcls)\n', '\n', '            sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))\n', '            first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]\n', '            if first_wrapped_param.kind is Parameter.VAR_POSITIONAL:\n', '                # First argument of the wrapped callable is `*args`, as in\n', '                # `partialmethod(lambda *args)`.\n', '                return sig\n', '            else:\n', '                sig_params = tuple(sig.parameters.values())\n', '                assert (not sig_params or\n', '                        first_wrapped_param is not sig_params[0])\n', '                new_params = (first_wrapped_param,) + sig_params\n', '                return sig.replace(parameters=new_params)\n', '\n', '    if isfunction(obj) or _signature_is_functionlike(obj):\n', "        # If it's a pure Python function, or an object that is duck type\n", '        # of a Python function (Cython functions, for instance), then:\n', '        return _signature_from_function(sigcls, obj,\n', '                                        skip_bound_arg=skip_bound_arg)\n', '\n', '    if _signature_is_builtin(obj):\n', '        return _signature_from_builtin(sigcls, obj,\n', '                                       skip_bound_arg=skip_bound_arg)\n', '\n', '    if isinstance(obj, functools.partial):\n', '        wrapped_sig = _signature_from_callable(\n', '            obj.func,\n', '            follow_wrapper_chains=follow_wrapper_chains,\n', '            skip_bound_arg=skip_bound_arg,\n', '            sigcls=sigcls)\n', '        return _signature_get_partial(wrapped_sig, obj)\n', '\n', '    sig = None\n', '    if isinstance(obj, type):\n', '        # obj is a class or a metaclass\n', '\n', "        # First, let's see if it has an overloaded __call__ defined\n", '        # in its metaclass\n', "        call = _signature_get_user_defined_method(type(obj), '__call__')\n", '        if call is not None:\n', '            sig = _signature_from_callable(\n', '                call,\n', '                follow_wrapper_chains=follow_wrapper_chains,\n', '                skip_bound_arg=skip_bound_arg,\n', '                sigcls=sigcls)\n', '        else:\n', "            # Now we check if the 'obj' class has a '__new__' method\n", "            new = _signature_get_user_defined_method(obj, '__new__')\n", '            if new is not None:\n', '                sig = _signature_from_callable(\n', '                    new,\n', '                    follow_wrapper_chains=follow_wrapper_chains,\n', '                    skip_bound_arg=skip_bound_arg,\n', '                    sigcls=sigcls)\n', '            else:\n', '                # Finally, we should have at least __init__ implemented\n', "                init = _signature_get_user_defined_method(obj, '__init__')\n", '                if init is not None:\n', '                    sig = _signature_from_callable(\n', '                        init,\n', '                        follow_wrapper_chains=follow_wrapper_chains,\n', '                        skip_bound_arg=skip_bound_arg,\n', '                        sigcls=sigcls)\n', '\n', '        if sig is None:\n', '            # At this point we know, that `obj` is a class, with no user-\n', "            # defined '__init__', '__new__', or class-level '__call__'\n", '\n', '            for base in obj.__mro__[:-1]:\n', "                # Since '__text_signature__' is implemented as a\n", '                # descriptor that extracts text signature from the\n', "                # class docstring, if 'obj' is derived from a builtin\n", "                # class, its own '__text_signature__' may be 'None'.\n", '                # Therefore, we go through the MRO (except the last\n', "                # class in there, which is 'object') to find the first\n", '                # class with non-empty text signature.\n', '                try:\n', '                    text_sig = base.__text_signature__\n', '                except AttributeError:\n', '                    pass\n', '                else:\n', '                    if text_sig:\n', "                        # If 'obj' class has a __text_signature__ attribute:\n", '                        # return a signature based on it\n', '                        return _signature_fromstr(sigcls, obj, text_sig)\n', '\n', "            # No '__text_signature__' was found for the 'obj' class.\n", "            # Last option is to check if its '__init__' is\n", '            # object.__init__ or type.__init__.\n', '            if type not in obj.__mro__:\n', '                # We have a class (not metaclass), but no user-defined\n', '                # __init__ or __new__ for it\n', '                if (obj.__init__ is object.__init__ and\n', '                    obj.__new__ is object.__new__):\n', "                    # Return a signature of 'object' builtin.\n", '                    return sigcls.from_callable(object)\n', '                else:\n', '                    raise ValueError(\n', "                        'no signature found for builtin type {!r}'.format(obj))\n", '\n', '    elif not isinstance(obj, _NonUserDefinedCallables):\n', '        # An object with __call__\n', "        # We also check that the 'obj' is not an instance of\n", '        # _WrapperDescriptor or _MethodWrapper to avoid\n', '        # infinite recursion (and even potential segfault)\n', "        call = _signature_get_user_defined_method(type(obj), '__call__')\n", '        if call is not None:\n', '            try:\n', '                sig = _signature_from_callable(\n', '                    call,\n', '                    follow_wrapper_chains=follow_wrapper_chains,\n', '                    skip_bound_arg=skip_bound_arg,\n', '                    sigcls=sigcls)\n', '            except ValueError as ex:\n', "                msg = 'no signature found for {!r}'.format(obj)\n", '                raise ValueError(msg) from ex\n', '\n', '    if sig is not None:\n', '        # For classes and objects we skip the first parameter of their\n', '        # __call__, __new__, or __init__ methods\n', '        if skip_bound_arg:\n', '            return _signature_bound_method(sig)\n', '        else:\n', '            return sig\n', '\n', '    if isinstance(obj, types.BuiltinFunctionType):\n', '        # Raise a nicer error message for builtins\n', "        msg = 'no signature found for builtin function {!r}'.format(obj)\n", '        raise ValueError(msg)\n', '\n', "    raise ValueError('callable {!r} is not supported by signature'.format(obj))\n", '\n', '\n', 'class _void:\n', '    """A private marker - used in Parameter & Signature."""\n', '\n', '\n', 'class _empty:\n', '    """Marker object for Signature.empty and Parameter.empty."""\n', '\n', '\n', 'class _ParameterKind(enum.IntEnum):\n', '    POSITIONAL_ONLY = 0\n', '    POSITIONAL_OR_KEYWORD = 1\n', '    VAR_POSITIONAL = 2\n', '    KEYWORD_ONLY = 3\n', '    VAR_KEYWORD = 4\n', '\n', '    def __str__(self):\n', '        return self._name_\n', '\n', '    @property\n', '    def description(self):\n', '        return _PARAM_NAME_MAPPING[self]\n', '\n', '_POSITIONAL_ONLY         = _ParameterKind.POSITIONAL_ONLY\n', '_POSITIONAL_OR_KEYWORD   = _ParameterKind.POSITIONAL_OR_KEYWORD\n', '_VAR_POSITIONAL          = _ParameterKind.VAR_POSITIONAL\n', '_KEYWORD_ONLY            = _ParameterKind.KEYWORD_ONLY\n', '_VAR_KEYWORD             = _ParameterKind.VAR_KEYWORD\n', '\n', '_PARAM_NAME_MAPPING = {\n', "    _POSITIONAL_ONLY: 'positional-only',\n", "    _POSITIONAL_OR_KEYWORD: 'positional or keyword',\n", "    _VAR_POSITIONAL: 'variadic positional',\n", "    _KEYWORD_ONLY: 'keyword-only',\n", "    _VAR_KEYWORD: 'variadic keyword'\n", '}\n', '\n', '\n', 'class Parameter:\n', '    """Represents a parameter in a function signature.\n', '\n', '    Has the following public attributes:\n', '\n', '    * name : str\n', '        The name of the parameter as a string.\n', '    * default : object\n', '        The default value for the parameter if specified.  If the\n', '        parameter has no default value, this attribute is set to\n', '        `Parameter.empty`.\n', '    * annotation\n', '        The annotation for the parameter if specified.  If the\n', '        parameter has no annotation, this attribute is set to\n', '        `Parameter.empty`.\n', '    * kind : str\n', '        Describes how argument values are bound to the parameter.\n', '        Possible values: `Parameter.POSITIONAL_ONLY`,\n', '        `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,\n', '        `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.\n', '    """\n', '\n', "    __slots__ = ('_name', '_kind', '_default', '_annotation')\n", '\n', '    POSITIONAL_ONLY         = _POSITIONAL_ONLY\n', '    POSITIONAL_OR_KEYWORD   = _POSITIONAL_OR_KEYWORD\n', '    VAR_POSITIONAL          = _VAR_POSITIONAL\n', '    KEYWORD_ONLY            = _KEYWORD_ONLY\n', '    VAR_KEYWORD             = _VAR_KEYWORD\n', '\n', '    empty = _empty\n', '\n', '    def __init__(self, name, kind, *, default=_empty, annotation=_empty):\n', '        try:\n', '            self._kind = _ParameterKind(kind)\n', '        except ValueError:\n', "            raise ValueError(f'value {kind!r} is not a valid Parameter.kind')\n", '        if default is not _empty:\n', '            if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD):\n', "                msg = '{} parameters cannot have default values'\n", '                msg = msg.format(self._kind.description)\n', '                raise ValueError(msg)\n', '        self._default = default\n', '        self._annotation = annotation\n', '\n', '        if name is _empty:\n', "            raise ValueError('name is a required attribute for Parameter')\n", '\n', '        if not isinstance(name, str):\n', "            msg = 'name must be a str, not a {}'.format(type(name).__name__)\n", '            raise TypeError(msg)\n', '\n', "        if name[0] == '.' and name[1:].isdigit():\n", '            # These are implicit arguments generated by comprehensions. In\n', '            # order to provide a friendlier interface to users, we recast\n', '            # their name as "implicitN" and treat them as positional-only.\n', '            # See issue 19611.\n', '            if self._kind != _POSITIONAL_OR_KEYWORD:\n', '                msg = (\n', "                    'implicit arguments must be passed as '\n", "                    'positional or keyword arguments, not {}'\n", '                )\n', '                msg = msg.format(self._kind.description)\n', '                raise ValueError(msg)\n', '            self._kind = _POSITIONAL_ONLY\n', "            name = 'implicit{}'.format(name[1:])\n", '\n', '        if not name.isidentifier():\n', "            raise ValueError('{!r} is not a valid parameter name'.format(name))\n", '\n', '        self._name = name\n', '\n', '    def __reduce__(self):\n', '        return (type(self),\n', '                (self._name, self._kind),\n', "                {'_default': self._default,\n", "                 '_annotation': self._annotation})\n", '\n', '    def __setstate__(self, state):\n', "        self._default = state['_default']\n", "        self._annotation = state['_annotation']\n", '\n', '    @property\n', '    def name(self):\n', '        return self._name\n', '\n', '    @property\n', '    def default(self):\n', '        return self._default\n', '\n', '    @property\n', '    def annotation(self):\n', '        return self._annotation\n', '\n', '    @property\n', '    def kind(self):\n', '        return self._kind\n', '\n', '    def replace(self, *, name=_void, kind=_void,\n', '                annotation=_void, default=_void):\n', '        """Creates a customized copy of the Parameter."""\n', '\n', '        if name is _void:\n', '            name = self._name\n', '\n', '        if kind is _void:\n', '            kind = self._kind\n', '\n', '        if annotation is _void:\n', '            annotation = self._annotation\n', '\n', '        if default is _void:\n', '            default = self._default\n', '\n', '        return type(self)(name, kind, default=default, annotation=annotation)\n', '\n', '    def __str__(self):\n', '        kind = self.kind\n', '        formatted = self._name\n', '\n', '        # Add annotation and default value\n', '        if self._annotation is not _empty:\n', "            formatted = '{}: {}'.format(formatted,\n", '                                       formatannotation(self._annotation))\n', '\n', '        if self._default is not _empty:\n', '            if self._annotation is not _empty:\n', "                formatted = '{} = {}'.format(formatted, repr(self._default))\n", '            else:\n', "                formatted = '{}={}'.format(formatted, repr(self._default))\n", '\n', '        if kind == _VAR_POSITIONAL:\n', "            formatted = '*' + formatted\n", '        elif kind == _VAR_KEYWORD:\n', "            formatted = '**' + formatted\n", '\n', '        return formatted\n', '\n', '    def __repr__(self):\n', '        return \'<{} "{}">\'.format(self.__class__.__name__, self)\n', '\n', '    def __hash__(self):\n', '        return hash((self.name, self.kind, self.annotation, self.default))\n', '\n', '    def __eq__(self, other):\n', '        if self is other:\n', '            return True\n', '        if not isinstance(other, Parameter):\n', '            return NotImplemented\n', '        return (self._name == other._name and\n', '                self._kind == other._kind and\n', '                self._default == other._default and\n', '                self._annotation == other._annotation)\n', '\n', '\n', 'class BoundArguments:\n', '    """Result of `Signature.bind` call.  Holds the mapping of arguments\n', "    to the function's parameters.\n", '\n', '    Has the following public attributes:\n', '\n', '    * arguments : dict\n', "        An ordered mutable mapping of parameters' names to arguments' values.\n", "        Does not contain arguments' default values.\n", '    * signature : Signature\n', '        The Signature object that created this instance.\n', '    * args : tuple\n', '        Tuple of positional arguments values.\n', '    * kwargs : dict\n', '        Dict of keyword arguments values.\n', '    """\n', '\n', "    __slots__ = ('arguments', '_signature', '__weakref__')\n", '\n', '    def __init__(self, signature, arguments):\n', '        self.arguments = arguments\n', '        self._signature = signature\n', '\n', '    @property\n', '    def signature(self):\n', '        return self._signature\n', '\n', '    @property\n', '    def args(self):\n', '        args = []\n', '        for param_name, param in self._signature.parameters.items():\n', '            if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):\n', '                break\n', '\n', '            try:\n', '                arg = self.arguments[param_name]\n', '            except KeyError:\n', "                # We're done here. Other arguments\n", "                # will be mapped in 'BoundArguments.kwargs'\n", '                break\n', '            else:\n', '                if param.kind == _VAR_POSITIONAL:\n', '                    # *args\n', '                    args.extend(arg)\n', '                else:\n', '                    # plain argument\n', '                    args.append(arg)\n', '\n', '        return tuple(args)\n', '\n', '    @property\n', '    def kwargs(self):\n', '        kwargs = {}\n', '        kwargs_started = False\n', '        for param_name, param in self._signature.parameters.items():\n', '            if not kwargs_started:\n', '                if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):\n', '                    kwargs_started = True\n', '                else:\n', '                    if param_name not in self.arguments:\n', '                        kwargs_started = True\n', '                        continue\n', '\n', '            if not kwargs_started:\n', '                continue\n', '\n', '            try:\n', '                arg = self.arguments[param_name]\n', '            except KeyError:\n', '                pass\n', '            else:\n', '                if param.kind == _VAR_KEYWORD:\n', '                    # **kwargs\n', '                    kwargs.update(arg)\n', '                else:\n', '                    # plain keyword argument\n', '                    kwargs[param_name] = arg\n', '\n', '        return kwargs\n', '\n', '    def apply_defaults(self):\n', '        """Set default values for missing arguments.\n', '\n', '        For variable-positional arguments (*args) the default is an\n', '        empty tuple.\n', '\n', '        For variable-keyword arguments (**kwargs) the default is an\n', '        empty dict.\n', '        """\n', '        arguments = self.arguments\n', '        new_arguments = []\n', '        for name, param in self._signature.parameters.items():\n', '            try:\n', '                new_arguments.append((name, arguments[name]))\n', '            except KeyError:\n', '                if param.default is not _empty:\n', '                    val = param.default\n', '                elif param.kind is _VAR_POSITIONAL:\n', '                    val = ()\n', '                elif param.kind is _VAR_KEYWORD:\n', '                    val = {}\n', '                else:\n', '                    # This BoundArguments was likely produced by\n', '                    # Signature.bind_partial().\n', '                    continue\n', '                new_arguments.append((name, val))\n', '        self.arguments = dict(new_arguments)\n', '\n', '    def __eq__(self, other):\n', '        if self is other:\n', '            return True\n', '        if not isinstance(other, BoundArguments):\n', '            return NotImplemented\n', '        return (self.signature == other.signature and\n', '                self.arguments == other.arguments)\n', '\n', '    def __setstate__(self, state):\n', "        self._signature = state['_signature']\n", "        self.arguments = state['arguments']\n", '\n', '    def __getstate__(self):\n', "        return {'_signature': self._signature, 'arguments': self.arguments}\n", '\n', '    def __repr__(self):\n', '        args = []\n', '        for arg, value in self.arguments.items():\n', "            args.append('{}={!r}'.format(arg, value))\n", "        return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args))\n", '\n', '\n', 'class Signature:\n', '    """A Signature object represents the overall signature of a function.\n', '    It stores a Parameter object for each parameter accepted by the\n', '    function, as well as information specific to the function itself.\n', '\n', '    A Signature object has the following public attributes and methods:\n', '\n', '    * parameters : OrderedDict\n', "        An ordered mapping of parameters' names to the corresponding\n", '        Parameter objects (keyword-only arguments are in the same order\n', '        as listed in `code.co_varnames`).\n', '    * return_annotation : object\n', '        The annotation for the return type of the function if specified.\n', '        If the function has no annotation for its return type, this\n', '        attribute is set to `Signature.empty`.\n', '    * bind(*args, **kwargs) -> BoundArguments\n', '        Creates a mapping from positional and keyword arguments to\n', '        parameters.\n', '    * bind_partial(*args, **kwargs) -> BoundArguments\n', '        Creates a partial mapping from positional and keyword arguments\n', "        to parameters (simulating 'functools.partial' behavior.)\n", '    """\n', '\n', "    __slots__ = ('_return_annotation', '_parameters')\n", '\n', '    _parameter_cls = Parameter\n', '    _bound_arguments_cls = BoundArguments\n', '\n', '    empty = _empty\n', '\n', '    def __init__(self, parameters=None, *, return_annotation=_empty,\n', '                 __validate_parameters__=True):\n', '        """Constructs Signature from the given list of Parameter\n', "        objects and 'return_annotation'.  All arguments are optional.\n", '        """\n', '\n', '        if parameters is None:\n', '            params = OrderedDict()\n', '        else:\n', '            if __validate_parameters__:\n', '                params = OrderedDict()\n', '                top_kind = _POSITIONAL_ONLY\n', '                kind_defaults = False\n', '\n', '                for param in parameters:\n', '                    kind = param.kind\n', '                    name = param.name\n', '\n', '                    if kind < top_kind:\n', '                        msg = (\n', "                            'wrong parameter order: {} parameter before {} '\n", "                            'parameter'\n", '                        )\n', '                        msg = msg.format(top_kind.description,\n', '                                         kind.description)\n', '                        raise ValueError(msg)\n', '                    elif kind > top_kind:\n', '                        kind_defaults = False\n', '                        top_kind = kind\n', '\n', '                    if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):\n', '                        if param.default is _empty:\n', '                            if kind_defaults:\n', '                                # No default for this parameter, but the\n', '                                # previous parameter of the same kind had\n', '                                # a default\n', "                                msg = 'non-default argument follows default ' \\\n", "                                      'argument'\n", '                                raise ValueError(msg)\n', '                        else:\n', '                            # There is a default for this parameter.\n', '                            kind_defaults = True\n', '\n', '                    if name in params:\n', "                        msg = 'duplicate parameter name: {!r}'.format(name)\n", '                        raise ValueError(msg)\n', '\n', '                    params[name] = param\n', '            else:\n', '                params = OrderedDict((param.name, param) for param in parameters)\n', '\n', '        self._parameters = types.MappingProxyType(params)\n', '        self._return_annotation = return_annotation\n', '\n', '    @classmethod\n', '    def from_function(cls, func):\n', '        """Constructs Signature for the given python function.\n', '\n', '        Deprecated since Python 3.5, use `Signature.from_callable()`.\n', '        """\n', '\n', '        warnings.warn("inspect.Signature.from_function() is deprecated since "\n', '                      "Python 3.5, use Signature.from_callable()",\n', '                      DeprecationWarning, stacklevel=2)\n', '        return _signature_from_function(cls, func)\n', '\n', '    @classmethod\n', '    def from_builtin(cls, func):\n', '        """Constructs Signature for the given builtin function.\n', '\n', '        Deprecated since Python 3.5, use `Signature.from_callable()`.\n', '        """\n', '\n', '        warnings.warn("inspect.Signature.from_builtin() is deprecated since "\n', '                      "Python 3.5, use Signature.from_callable()",\n', '                      DeprecationWarning, stacklevel=2)\n', '        return _signature_from_builtin(cls, func)\n', '\n', '    @classmethod\n', '    def from_callable(cls, obj, *, follow_wrapped=True):\n', '        """Constructs Signature for the given callable object."""\n', '        return _signature_from_callable(obj, sigcls=cls,\n', '                                        follow_wrapper_chains=follow_wrapped)\n', '\n', '    @property\n', '    def parameters(self):\n', '        return self._parameters\n', '\n', '    @property\n', '    def return_annotation(self):\n', '        return self._return_annotation\n', '\n', '    def replace(self, *, parameters=_void, return_annotation=_void):\n', '        """Creates a customized copy of the Signature.\n', "        Pass 'parameters' and/or 'return_annotation' arguments\n", '        to override them in the new copy.\n', '        """\n', '\n', '        if parameters is _void:\n', '            parameters = self.parameters.values()\n', '\n', '        if return_annotation is _void:\n', '            return_annotation = self._return_annotation\n', '\n', '        return type(self)(parameters,\n', '                          return_annotation=return_annotation)\n', '\n', '    def _hash_basis(self):\n', '        params = tuple(param for param in self.parameters.values()\n', '                             if param.kind != _KEYWORD_ONLY)\n', '\n', '        kwo_params = {param.name: param for param in self.parameters.values()\n', '                                        if param.kind == _KEYWORD_ONLY}\n', '\n', '        return params, kwo_params, self.return_annotation\n', '\n', '    def __hash__(self):\n', '        params, kwo_params, return_annotation = self._hash_basis()\n', '        kwo_params = frozenset(kwo_params.values())\n', '        return hash((params, kwo_params, return_annotation))\n', '\n', '    def __eq__(self, other):\n', '        if self is other:\n', '            return True\n', '        if not isinstance(other, Signature):\n', '            return NotImplemented\n', '        return self._hash_basis() == other._hash_basis()\n', '\n', '    def _bind(self, args, kwargs, *, partial=False):\n', '        """Private method. Don\'t use directly."""\n', '\n', '        arguments = {}\n', '\n', '        parameters = iter(self.parameters.values())\n', '        parameters_ex = ()\n', '        arg_vals = iter(args)\n', '\n', '        while True:\n', "            # Let's iterate through the positional arguments and corresponding\n", '            # parameters\n', '            try:\n', '                arg_val = next(arg_vals)\n', '            except StopIteration:\n', '                # No more positional arguments\n', '                try:\n', '                    param = next(parameters)\n', '                except StopIteration:\n', "                    # No more parameters. That's it. Just need to check that\n", '                    # we have no `kwargs` after this while loop\n', '                    break\n', '                else:\n', '                    if param.kind == _VAR_POSITIONAL:\n', "                        # That's OK, just empty *args.  Let's start parsing\n", '                        # kwargs\n', '                        break\n', '                    elif param.name in kwargs:\n', '                        if param.kind == _POSITIONAL_ONLY:\n', "                            msg = '{arg!r} parameter is positional only, ' \\\n", "                                  'but was passed as a keyword'\n", '                            msg = msg.format(arg=param.name)\n', '                            raise TypeError(msg) from None\n', '                        parameters_ex = (param,)\n', '                        break\n', '                    elif (param.kind == _VAR_KEYWORD or\n', '                                                param.default is not _empty):\n', "                        # That's fine too - we have a default value for this\n", '                        # parameter.  So, lets start parsing `kwargs`, starting\n', '                        # with the current parameter\n', '                        parameters_ex = (param,)\n', '                        break\n', '                    else:\n', '                        # No default, not VAR_KEYWORD, not VAR_POSITIONAL,\n', '                        # not in `kwargs`\n', '                        if partial:\n', '                            parameters_ex = (param,)\n', '                            break\n', '                        else:\n', "                            msg = 'missing a required argument: {arg!r}'\n", '                            msg = msg.format(arg=param.name)\n', '                            raise TypeError(msg) from None\n', '            else:\n', '                # We have a positional argument to process\n', '                try:\n', '                    param = next(parameters)\n', '                except StopIteration:\n', "                    raise TypeError('too many positional arguments') from None\n", '                else:\n', '                    if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):\n', '                        # Looks like we have no parameter for this positional\n', '                        # argument\n', '                        raise TypeError(\n', "                            'too many positional arguments') from None\n", '\n', '                    if param.kind == _VAR_POSITIONAL:\n', "                        # We have an '*args'-like argument, let's fill it with\n", '                        # all positional arguments we have left and move on to\n', '                        # the next phase\n', '                        values = [arg_val]\n', '                        values.extend(arg_vals)\n', '                        arguments[param.name] = tuple(values)\n', '                        break\n', '\n', '                    if param.name in kwargs and param.kind != _POSITIONAL_ONLY:\n', '                        raise TypeError(\n', "                            'multiple values for argument {arg!r}'.format(\n", '                                arg=param.name)) from None\n', '\n', '                    arguments[param.name] = arg_val\n', '\n', '        # Now, we iterate through the remaining parameters to process\n', '        # keyword arguments\n', '        kwargs_param = None\n', '        for param in itertools.chain(parameters_ex, parameters):\n', '            if param.kind == _VAR_KEYWORD:\n', "                # Memorize that we have a '**kwargs'-like parameter\n", '                kwargs_param = param\n', '                continue\n', '\n', '            if param.kind == _VAR_POSITIONAL:\n', "                # Named arguments don't refer to '*args'-like parameters.\n", '                # We only arrive here if the positional arguments ended\n', '                # before reaching the last parameter before *args.\n', '                continue\n', '\n', '            param_name = param.name\n', '            try:\n', '                arg_val = kwargs.pop(param_name)\n', '            except KeyError:\n', "                # We have no value for this parameter.  It's fine though,\n", "                # if it has a default value, or it is an '*args'-like\n", '                # parameter, left alone by the processing of positional\n', '                # arguments.\n', '                if (not partial and param.kind != _VAR_POSITIONAL and\n', '                                                    param.default is _empty):\n', "                    raise TypeError('missing a required argument: {arg!r}'. \\\n", '                                    format(arg=param_name)) from None\n', '\n', '            else:\n', '                if param.kind == _POSITIONAL_ONLY:\n', '                    # This should never happen in case of a properly built\n', "                    # Signature object (but let's have this check here\n", '                    # to ensure correct behaviour just in case)\n', "                    raise TypeError('{arg!r} parameter is positional only, '\n", "                                    'but was passed as a keyword'. \\\n", '                                    format(arg=param.name))\n', '\n', '                arguments[param_name] = arg_val\n', '\n', '        if kwargs:\n', '            if kwargs_param is not None:\n', "                # Process our '**kwargs'-like parameter\n", '                arguments[kwargs_param.name] = kwargs\n', '            else:\n', '                raise TypeError(\n', "                    'got an unexpected keyword argument {arg!r}'.format(\n", '                        arg=next(iter(kwargs))))\n', '\n', '        return self._bound_arguments_cls(self, arguments)\n', '\n', '    def bind(self, /, *args, **kwargs):\n', '        """Get a BoundArguments object, that maps the passed `args`\n', "        and `kwargs` to the function's signature.  Raises `TypeError`\n", '        if the passed arguments can not be bound.\n', '        """\n', '        return self._bind(args, kwargs)\n', '\n', '    def bind_partial(self, /, *args, **kwargs):\n', '        """Get a BoundArguments object, that partially maps the\n', "        passed `args` and `kwargs` to the function's signature.\n", '        Raises `TypeError` if the passed arguments can not be bound.\n', '        """\n', '        return self._bind(args, kwargs, partial=True)\n', '\n', '    def __reduce__(self):\n', '        return (type(self),\n', '                (tuple(self._parameters.values()),),\n', "                {'_return_annotation': self._return_annotation})\n", '\n', '    def __setstate__(self, state):\n', "        self._return_annotation = state['_return_annotation']\n", '\n', '    def __repr__(self):\n', "        return '<{} {}>'.format(self.__class__.__name__, self)\n", '\n', '    def __str__(self):\n', '        result = []\n', '        render_pos_only_separator = False\n', '        render_kw_only_separator = True\n', '        for param in self.parameters.values():\n', '            formatted = str(param)\n', '\n', '            kind = param.kind\n', '\n', '            if kind == _POSITIONAL_ONLY:\n', '                render_pos_only_separator = True\n', '            elif render_pos_only_separator:\n', "                # It's not a positional-only parameter, and the flag\n", "                # is set to 'True' (there were pos-only params before.)\n", "                result.append('/')\n", '                render_pos_only_separator = False\n', '\n', '            if kind == _VAR_POSITIONAL:\n', "                # OK, we have an '*args'-like parameter, so we won't need\n", "                # a '*' to separate keyword-only arguments\n", '                render_kw_only_separator = False\n', '            elif kind == _KEYWORD_ONLY and render_kw_only_separator:\n', "                # We have a keyword-only parameter to render and we haven't\n", "                # rendered an '*args'-like parameter before, so add a '*'\n", '                # separator to the parameters list ("foo(arg1, *, arg2)" case)\n', "                result.append('*')\n", '                # This condition should be only triggered once, so\n', '                # reset the flag\n', '                render_kw_only_separator = False\n', '\n', '            result.append(formatted)\n', '\n', '        if render_pos_only_separator:\n', '            # There were only positional-only parameters, hence the\n', "            # flag was not reset to 'False'\n", "            result.append('/')\n", '\n', "        rendered = '({})'.format(', '.join(result))\n", '\n', '        if self.return_annotation is not _empty:\n', '            anno = formatannotation(self.return_annotation)\n', "            rendered += ' -> {}'.format(anno)\n", '\n', '        return rendered\n', '\n', '\n', 'def signature(obj, *, follow_wrapped=True):\n', '    """Get a signature object for the passed callable."""\n', '    return Signature.from_callable(obj, follow_wrapped=follow_wrapped)\n', '\n', '\n', 'def _main():\n', '    """ Logic for inspecting an object given at command line """\n', '    import argparse\n', '    import importlib\n', '\n', '    parser = argparse.ArgumentParser()\n', '    parser.add_argument(\n', "        'object',\n", '         help="The object to be analysed. "\n', '              "It supports the \'module:qualname\' syntax")\n', '    parser.add_argument(\n', "        '-d', '--details', action='store_true',\n", "        help='Display info about the module rather than its source code')\n", '\n', '    args = parser.parse_args()\n', '\n', '    target = args.object\n', '    mod_name, has_attrs, attrs = target.partition(":")\n', '    try:\n', '        obj = module = importlib.import_module(mod_name)\n', '    except Exception as exc:\n', '        msg = "Failed to import {} ({}: {})".format(mod_name,\n', '                                                    type(exc).__name__,\n', '                                                    exc)\n', '        print(msg, file=sys.stderr)\n', '        sys.exit(2)\n', '\n', '    if has_attrs:\n', '        parts = attrs.split(".")\n', '        obj = module\n', '        for part in parts:\n', '            obj = getattr(obj, part)\n', '\n', '    if module.__name__ in sys.builtin_module_names:\n', '        print("Can\'t get info for builtin modules.", file=sys.stderr)\n', '        sys.exit(1)\n', '\n', '    if args.details:\n', "        print('Target: {}'.format(target))\n", "        print('Origin: {}'.format(getsourcefile(module)))\n", "        print('Cached: {}'.format(module.__cached__))\n", '        if obj is module:\n', "            print('Loader: {}'.format(repr(module.__loader__)))\n", "            if hasattr(module, '__path__'):\n", "                print('Submodule search path: {}'.format(module.__path__))\n", '        else:\n', '            try:\n', '                __, lineno = findsource(obj)\n', '            except Exception:\n', '                pass\n', '            else:\n', "                print('Line: {}'.format(lineno))\n", '\n', "        print('\\n')\n", '    else:\n', '        print(getsource(obj))\n', '\n', '\n', 'if __name__ == "__main__":\n', '    _main()\n'], '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/inspect.py'), '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/ast.py': (56179, 1.0, ['"""\n', '    ast\n', '    ~~~\n', '\n', '    The `ast` module helps Python applications to process trees of the Python\n', '    abstract syntax grammar.  The abstract syntax itself might change with\n', '    each Python release; this module helps to find out programmatically what\n', '    the current grammar looks like and allows modifications of it.\n', '\n', '    An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as\n', '    a flag to the `compile()` builtin function or by using the `parse()`\n', '    function from this module.  The result will be a tree of objects whose\n', '    classes all inherit from `ast.AST`.\n', '\n', '    A modified abstract syntax tree can be compiled into a Python code object\n', '    using the built-in `compile()` function.\n', '\n', '    Additionally various helper functions are provided that make working with\n', '    the trees simpler.  The main intention of the helper functions and this\n', '    module in general is to provide an easy to use interface for libraries\n', '    that work tightly with the python syntax (template engines for example).\n', '\n', '\n', '    :copyright: Copyright 2008 by Armin Ronacher.\n', '    :license: Python License.\n', '"""\n', 'import sys\n', 'from _ast import *\n', 'from contextlib import contextmanager, nullcontext\n', 'from enum import IntEnum, auto\n', '\n', '\n', "def parse(source, filename='<unknown>', mode='exec', *,\n", '          type_comments=False, feature_version=None):\n', '    """\n', '    Parse the source into an AST node.\n', '    Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).\n', '    Pass type_comments=True to get back type comments where the syntax allows.\n', '    """\n', '    flags = PyCF_ONLY_AST\n', '    if type_comments:\n', '        flags |= PyCF_TYPE_COMMENTS\n', '    if isinstance(feature_version, tuple):\n', '        major, minor = feature_version  # Should be a 2-tuple.\n', '        assert major == 3\n', '        feature_version = minor\n', '    elif feature_version is None:\n', '        feature_version = -1\n', '    # Else it should be an int giving the minor version for 3.x.\n', '    return compile(source, filename, mode, flags,\n', '                   _feature_version=feature_version)\n', '\n', '\n', 'def literal_eval(node_or_string):\n', '    """\n', '    Safely evaluate an expression node or a string containing a Python\n', '    expression.  The string or node provided may only consist of the following\n', '    Python literal structures: strings, bytes, numbers, tuples, lists, dicts,\n', '    sets, booleans, and None.\n', '    """\n', '    if isinstance(node_or_string, str):\n', "        node_or_string = parse(node_or_string, mode='eval')\n", '    if isinstance(node_or_string, Expression):\n', '        node_or_string = node_or_string.body\n', '    def _raise_malformed_node(node):\n', "        raise ValueError(f'malformed node or string: {node!r}')\n", '    def _convert_num(node):\n', '        if not isinstance(node, Constant) or type(node.value) not in (int, float, complex):\n', '            _raise_malformed_node(node)\n', '        return node.value\n', '    def _convert_signed_num(node):\n', '        if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)):\n', '            operand = _convert_num(node.operand)\n', '            if isinstance(node.op, UAdd):\n', '                return + operand\n', '            else:\n', '                return - operand\n', '        return _convert_num(node)\n', '    def _convert(node):\n', '        if isinstance(node, Constant):\n', '            return node.value\n', '        elif isinstance(node, Tuple):\n', '            return tuple(map(_convert, node.elts))\n', '        elif isinstance(node, List):\n', '            return list(map(_convert, node.elts))\n', '        elif isinstance(node, Set):\n', '            return set(map(_convert, node.elts))\n', '        elif (isinstance(node, Call) and isinstance(node.func, Name) and\n', "              node.func.id == 'set' and node.args == node.keywords == []):\n", '            return set()\n', '        elif isinstance(node, Dict):\n', '            if len(node.keys) != len(node.values):\n', '                _raise_malformed_node(node)\n', '            return dict(zip(map(_convert, node.keys),\n', '                            map(_convert, node.values)))\n', '        elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)):\n', '            left = _convert_signed_num(node.left)\n', '            right = _convert_num(node.right)\n', '            if isinstance(left, (int, float)) and isinstance(right, complex):\n', '                if isinstance(node.op, Add):\n', '                    return left + right\n', '                else:\n', '                    return left - right\n', '        return _convert_signed_num(node)\n', '    return _convert(node_or_string)\n', '\n', '\n', 'def dump(node, annotate_fields=True, include_attributes=False, *, indent=None):\n', '    """\n', '    Return a formatted dump of the tree in node.  This is mainly useful for\n', '    debugging purposes.  If annotate_fields is true (by default),\n', '    the returned string will show the names and the values for fields.\n', '    If annotate_fields is false, the result string will be more compact by\n', '    omitting unambiguous field names.  Attributes such as line\n', '    numbers and column offsets are not dumped by default.  If this is wanted,\n', '    include_attributes can be set to true.  If indent is a non-negative\n', '    integer or string, then the tree will be pretty-printed with that indent\n', '    level. None (the default) selects the single line representation.\n', '    """\n', '    def _format(node, level=0):\n', '        if indent is not None:\n', '            level += 1\n', "            prefix = '\\n' + indent * level\n", "            sep = ',\\n' + indent * level\n", '        else:\n', "            prefix = ''\n", "            sep = ', '\n", '        if isinstance(node, AST):\n', '            cls = type(node)\n', '            args = []\n', '            allsimple = True\n', '            keywords = annotate_fields\n', '            for name in node._fields:\n', '                try:\n', '                    value = getattr(node, name)\n', '                except AttributeError:\n', '                    keywords = True\n', '                    continue\n', '                if value is None and getattr(cls, name, ...) is None:\n', '                    keywords = True\n', '                    continue\n', '                value, simple = _format(value, level)\n', '                allsimple = allsimple and simple\n', '                if keywords:\n', "                    args.append('%s=%s' % (name, value))\n", '                else:\n', '                    args.append(value)\n', '            if include_attributes and node._attributes:\n', '                for name in node._attributes:\n', '                    try:\n', '                        value = getattr(node, name)\n', '                    except AttributeError:\n', '                        continue\n', '                    if value is None and getattr(cls, name, ...) is None:\n', '                        continue\n', '                    value, simple = _format(value, level)\n', '                    allsimple = allsimple and simple\n', "                    args.append('%s=%s' % (name, value))\n", '            if allsimple and len(args) <= 3:\n', "                return '%s(%s)' % (node.__class__.__name__, ', '.join(args)), not args\n", "            return '%s(%s%s)' % (node.__class__.__name__, prefix, sep.join(args)), False\n", '        elif isinstance(node, list):\n', '            if not node:\n', "                return '[]', True\n", "            return '[%s%s]' % (prefix, sep.join(_format(x, level)[0] for x in node)), False\n", '        return repr(node), True\n', '\n', '    if not isinstance(node, AST):\n', "        raise TypeError('expected AST, got %r' % node.__class__.__name__)\n", '    if indent is not None and not isinstance(indent, str):\n', "        indent = ' ' * indent\n", '    return _format(node)[0]\n', '\n', '\n', 'def copy_location(new_node, old_node):\n', '    """\n', '    Copy source location (`lineno`, `col_offset`, `end_lineno`, and `end_col_offset`\n', '    attributes) from *old_node* to *new_node* if possible, and return *new_node*.\n', '    """\n', "    for attr in 'lineno', 'col_offset', 'end_lineno', 'end_col_offset':\n", '        if attr in old_node._attributes and attr in new_node._attributes:\n', '            value = getattr(old_node, attr, None)\n', '            # end_lineno and end_col_offset are optional attributes, and they\n', '            # should be copied whether the value is None or not.\n', '            if value is not None or (\n', '                hasattr(old_node, attr) and attr.startswith("end_")\n', '            ):\n', '                setattr(new_node, attr, value)\n', '    return new_node\n', '\n', '\n', 'def fix_missing_locations(node):\n', '    """\n', '    When you compile a node tree with compile(), the compiler expects lineno and\n', '    col_offset attributes for every node that supports them.  This is rather\n', '    tedious to fill in for generated nodes, so this helper adds these attributes\n', '    recursively where not already set, by setting them to the values of the\n', '    parent node.  It works recursively starting at *node*.\n', '    """\n', '    def _fix(node, lineno, col_offset, end_lineno, end_col_offset):\n', "        if 'lineno' in node._attributes:\n", "            if not hasattr(node, 'lineno'):\n", '                node.lineno = lineno\n', '            else:\n', '                lineno = node.lineno\n', "        if 'end_lineno' in node._attributes:\n", "            if getattr(node, 'end_lineno', None) is None:\n", '                node.end_lineno = end_lineno\n', '            else:\n', '                end_lineno = node.end_lineno\n', "        if 'col_offset' in node._attributes:\n", "            if not hasattr(node, 'col_offset'):\n", '                node.col_offset = col_offset\n', '            else:\n', '                col_offset = node.col_offset\n', "        if 'end_col_offset' in node._attributes:\n", "            if getattr(node, 'end_col_offset', None) is None:\n", '                node.end_col_offset = end_col_offset\n', '            else:\n', '                end_col_offset = node.end_col_offset\n', '        for child in iter_child_nodes(node):\n', '            _fix(child, lineno, col_offset, end_lineno, end_col_offset)\n', '    _fix(node, 1, 0, 1, 0)\n', '    return node\n', '\n', '\n', 'def increment_lineno(node, n=1):\n', '    """\n', '    Increment the line number and end line number of each node in the tree\n', '    starting at *node* by *n*. This is useful to "move code" to a different\n', '    location in a file.\n', '    """\n', '    for child in walk(node):\n', "        if 'lineno' in child._attributes:\n", "            child.lineno = getattr(child, 'lineno', 0) + n\n", '        if (\n', '            "end_lineno" in child._attributes\n', '            and (end_lineno := getattr(child, "end_lineno", 0)) is not None\n', '        ):\n', '            child.end_lineno = end_lineno + n\n', '    return node\n', '\n', '\n', 'def iter_fields(node):\n', '    """\n', '    Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``\n', '    that is present on *node*.\n', '    """\n', '    for field in node._fields:\n', '        try:\n', '            yield field, getattr(node, field)\n', '        except AttributeError:\n', '            pass\n', '\n', '\n', 'def iter_child_nodes(node):\n', '    """\n', '    Yield all direct child nodes of *node*, that is, all fields that are nodes\n', '    and all items of fields that are lists of nodes.\n', '    """\n', '    for name, field in iter_fields(node):\n', '        if isinstance(field, AST):\n', '            yield field\n', '        elif isinstance(field, list):\n', '            for item in field:\n', '                if isinstance(item, AST):\n', '                    yield item\n', '\n', '\n', 'def get_docstring(node, clean=True):\n', '    """\n', '    Return the docstring for the given node or None if no docstring can\n', '    be found.  If the node provided does not have docstrings a TypeError\n', '    will be raised.\n', '\n', '    If *clean* is `True`, all tabs are expanded to spaces and any whitespace\n', '    that can be uniformly removed from the second line onwards is removed.\n', '    """\n', '    if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)):\n', '        raise TypeError("%r can\'t have docstrings" % node.__class__.__name__)\n', '    if not(node.body and isinstance(node.body[0], Expr)):\n', '        return None\n', '    node = node.body[0].value\n', '    if isinstance(node, Str):\n', '        text = node.s\n', '    elif isinstance(node, Constant) and isinstance(node.value, str):\n', '        text = node.value\n', '    else:\n', '        return None\n', '    if clean:\n', '        import inspect\n', '        text = inspect.cleandoc(text)\n', '    return text\n', '\n', '\n', 'def _splitlines_no_ff(source):\n', '    """Split a string into lines ignoring form feed and other chars.\n', '\n', '    This mimics how the Python parser splits source code.\n', '    """\n', '    idx = 0\n', '    lines = []\n', "    next_line = ''\n", '    while idx < len(source):\n', '        c = source[idx]\n', '        next_line += c\n', '        idx += 1\n', '        # Keep \\r\\n together\n', "        if c == '\\r' and idx < len(source) and source[idx] == '\\n':\n", "            next_line += '\\n'\n", '            idx += 1\n', "        if c in '\\r\\n':\n", '            lines.append(next_line)\n', "            next_line = ''\n", '\n', '    if next_line:\n', '        lines.append(next_line)\n', '    return lines\n', '\n', '\n', 'def _pad_whitespace(source):\n', '    r"""Replace all chars except \'\\f\\t\' in a line with spaces."""\n', "    result = ''\n", '    for c in source:\n', "        if c in '\\f\\t':\n", '            result += c\n', '        else:\n', "            result += ' '\n", '    return result\n', '\n', '\n', 'def get_source_segment(source, node, *, padded=False):\n', '    """Get source code segment of the *source* that generated *node*.\n', '\n', '    If some location information (`lineno`, `end_lineno`, `col_offset`,\n', '    or `end_col_offset`) is missing, return None.\n', '\n', '    If *padded* is `True`, the first line of a multi-line statement will\n', '    be padded with spaces to match its original position.\n', '    """\n', '    try:\n', '        if node.end_lineno is None or node.end_col_offset is None:\n', '            return None\n', '        lineno = node.lineno - 1\n', '        end_lineno = node.end_lineno - 1\n', '        col_offset = node.col_offset\n', '        end_col_offset = node.end_col_offset\n', '    except AttributeError:\n', '        return None\n', '\n', '    lines = _splitlines_no_ff(source)\n', '    if end_lineno == lineno:\n', '        return lines[lineno].encode()[col_offset:end_col_offset].decode()\n', '\n', '    if padded:\n', '        padding = _pad_whitespace(lines[lineno].encode()[:col_offset].decode())\n', '    else:\n', "        padding = ''\n", '\n', '    first = padding + lines[lineno].encode()[col_offset:].decode()\n', '    last = lines[end_lineno].encode()[:end_col_offset].decode()\n', '    lines = lines[lineno+1:end_lineno]\n', '\n', '    lines.insert(0, first)\n', '    lines.append(last)\n', "    return ''.join(lines)\n", '\n', '\n', 'def walk(node):\n', '    """\n', '    Recursively yield all descendant nodes in the tree starting at *node*\n', '    (including *node* itself), in no specified order.  This is useful if you\n', "    only want to modify nodes in place and don't care about the context.\n", '    """\n', '    from collections import deque\n', '    todo = deque([node])\n', '    while todo:\n', '        node = todo.popleft()\n', '        todo.extend(iter_child_nodes(node))\n', '        yield node\n', '\n', '\n', 'class NodeVisitor(object):\n', '    """\n', '    A node visitor base class that walks the abstract syntax tree and calls a\n', '    visitor function for every node found.  This function may return a value\n', '    which is forwarded by the `visit` method.\n', '\n', '    This class is meant to be subclassed, with the subclass adding visitor\n', '    methods.\n', '\n', "    Per default the visitor functions for the nodes are ``'visit_'`` +\n", '    class name of the node.  So a `TryFinally` node visit function would\n', '    be `visit_TryFinally`.  This behavior can be changed by overriding\n', '    the `visit` method.  If no visitor function exists for a node\n', '    (return value `None`) the `generic_visit` visitor is used instead.\n', '\n', "    Don't use the `NodeVisitor` if you want to apply changes to nodes during\n", '    traversing.  For this a special visitor exists (`NodeTransformer`) that\n', '    allows modifications.\n', '    """\n', '\n', '    def visit(self, node):\n', '        """Visit a node."""\n', "        method = 'visit_' + node.__class__.__name__\n", '        visitor = getattr(self, method, self.generic_visit)\n', '        return visitor(node)\n', '\n', '    def generic_visit(self, node):\n', '        """Called if no explicit visitor function exists for a node."""\n', '        for field, value in iter_fields(node):\n', '            if isinstance(value, list):\n', '                for item in value:\n', '                    if isinstance(item, AST):\n', '                        self.visit(item)\n', '            elif isinstance(value, AST):\n', '                self.visit(value)\n', '\n', '    def visit_Constant(self, node):\n', '        value = node.value\n', '        type_name = _const_node_type_names.get(type(value))\n', '        if type_name is None:\n', '            for cls, name in _const_node_type_names.items():\n', '                if isinstance(value, cls):\n', '                    type_name = name\n', '                    break\n', '        if type_name is not None:\n', "            method = 'visit_' + type_name\n", '            try:\n', '                visitor = getattr(self, method)\n', '            except AttributeError:\n', '                pass\n', '            else:\n', '                import warnings\n', '                warnings.warn(f"{method} is deprecated; add visit_Constant",\n', '                              DeprecationWarning, 2)\n', '                return visitor(node)\n', '        return self.generic_visit(node)\n', '\n', '\n', 'class NodeTransformer(NodeVisitor):\n', '    """\n', '    A :class:`NodeVisitor` subclass that walks the abstract syntax tree and\n', '    allows modification of nodes.\n', '\n', '    The `NodeTransformer` will walk the AST and use the return value of the\n', '    visitor methods to replace or remove the old node.  If the return value of\n', '    the visitor method is ``None``, the node will be removed from its location,\n', '    otherwise it is replaced with the return value.  The return value may be the\n', '    original node in which case no replacement takes place.\n', '\n', '    Here is an example transformer that rewrites all occurrences of name lookups\n', "    (``foo``) to ``data['foo']``::\n", '\n', '       class RewriteName(NodeTransformer):\n', '\n', '           def visit_Name(self, node):\n', '               return Subscript(\n', "                   value=Name(id='data', ctx=Load()),\n", '                   slice=Constant(value=node.id),\n', '                   ctx=node.ctx\n', '               )\n', '\n', "    Keep in mind that if the node you're operating on has child nodes you must\n", '    either transform the child nodes yourself or call the :meth:`generic_visit`\n', '    method for the node first.\n', '\n', '    For nodes that were part of a collection of statements (that applies to all\n', '    statement nodes), the visitor may also return a list of nodes rather than\n', '    just a single node.\n', '\n', '    Usually you use the transformer like this::\n', '\n', '       node = YourTransformer().visit(node)\n', '    """\n', '\n', '    def generic_visit(self, node):\n', '        for field, old_value in iter_fields(node):\n', '            if isinstance(old_value, list):\n', '                new_values = []\n', '                for value in old_value:\n', '                    if isinstance(value, AST):\n', '                        value = self.visit(value)\n', '                        if value is None:\n', '                            continue\n', '                        elif not isinstance(value, AST):\n', '                            new_values.extend(value)\n', '                            continue\n', '                    new_values.append(value)\n', '                old_value[:] = new_values\n', '            elif isinstance(old_value, AST):\n', '                new_node = self.visit(old_value)\n', '                if new_node is None:\n', '                    delattr(node, field)\n', '                else:\n', '                    setattr(node, field, new_node)\n', '        return node\n', '\n', '\n', '# If the ast module is loaded more than once, only add deprecated methods once\n', "if not hasattr(Constant, 'n'):\n", '    # The following code is for backward compatibility.\n', '    # It will be removed in future.\n', '\n', '    def _getter(self):\n', '        """Deprecated. Use value instead."""\n', '        return self.value\n', '\n', '    def _setter(self, value):\n', '        self.value = value\n', '\n', '    Constant.n = property(_getter, _setter)\n', '    Constant.s = property(_getter, _setter)\n', '\n', 'class _ABC(type):\n', '\n', '    def __init__(cls, *args):\n', '        cls.__doc__ = """Deprecated AST node class. Use ast.Constant instead"""\n', '\n', '    def __instancecheck__(cls, inst):\n', '        if not isinstance(inst, Constant):\n', '            return False\n', '        if cls in _const_types:\n', '            try:\n', '                value = inst.value\n', '            except AttributeError:\n', '                return False\n', '            else:\n', '                return (\n', '                    isinstance(value, _const_types[cls]) and\n', '                    not isinstance(value, _const_types_not.get(cls, ()))\n', '                )\n', '        return type.__instancecheck__(cls, inst)\n', '\n', 'def _new(cls, *args, **kwargs):\n', '    for key in kwargs:\n', '        if key not in cls._fields:\n', '            # arbitrary keyword arguments are accepted\n', '            continue\n', '        pos = cls._fields.index(key)\n', '        if pos < len(args):\n', '            raise TypeError(f"{cls.__name__} got multiple values for argument {key!r}")\n', '    if cls in _const_types:\n', '        return Constant(*args, **kwargs)\n', '    return Constant.__new__(cls, *args, **kwargs)\n', '\n', 'class Num(Constant, metaclass=_ABC):\n', "    _fields = ('n',)\n", '    __new__ = _new\n', '\n', 'class Str(Constant, metaclass=_ABC):\n', "    _fields = ('s',)\n", '    __new__ = _new\n', '\n', 'class Bytes(Constant, metaclass=_ABC):\n', "    _fields = ('s',)\n", '    __new__ = _new\n', '\n', 'class NameConstant(Constant, metaclass=_ABC):\n', '    __new__ = _new\n', '\n', 'class Ellipsis(Constant, metaclass=_ABC):\n', '    _fields = ()\n', '\n', '    def __new__(cls, *args, **kwargs):\n', '        if cls is Ellipsis:\n', '            return Constant(..., *args, **kwargs)\n', '        return Constant.__new__(cls, *args, **kwargs)\n', '\n', '_const_types = {\n', '    Num: (int, float, complex),\n', '    Str: (str,),\n', '    Bytes: (bytes,),\n', '    NameConstant: (type(None), bool),\n', '    Ellipsis: (type(...),),\n', '}\n', '_const_types_not = {\n', '    Num: (bool,),\n', '}\n', '\n', '_const_node_type_names = {\n', "    bool: 'NameConstant',  # should be before int\n", "    type(None): 'NameConstant',\n", "    int: 'Num',\n", "    float: 'Num',\n", "    complex: 'Num',\n", "    str: 'Str',\n", "    bytes: 'Bytes',\n", "    type(...): 'Ellipsis',\n", '}\n', '\n', 'class slice(AST):\n', '    """Deprecated AST node class."""\n', '\n', 'class Index(slice):\n', '    """Deprecated AST node class. Use the index value directly instead."""\n', '    def __new__(cls, value, **kwargs):\n', '        return value\n', '\n', 'class ExtSlice(slice):\n', '    """Deprecated AST node class. Use ast.Tuple instead."""\n', '    def __new__(cls, dims=(), **kwargs):\n', '        return Tuple(list(dims), Load(), **kwargs)\n', '\n', '# If the ast module is loaded more than once, only add deprecated methods once\n', "if not hasattr(Tuple, 'dims'):\n", '    # The following code is for backward compatibility.\n', '    # It will be removed in future.\n', '\n', '    def _dims_getter(self):\n', '        """Deprecated. Use elts instead."""\n', '        return self.elts\n', '\n', '    def _dims_setter(self, value):\n', '        self.elts = value\n', '\n', '    Tuple.dims = property(_dims_getter, _dims_setter)\n', '\n', 'class Suite(mod):\n', '    """Deprecated AST node class.  Unused in Python 3."""\n', '\n', 'class AugLoad(expr_context):\n', '    """Deprecated AST node class.  Unused in Python 3."""\n', '\n', 'class AugStore(expr_context):\n', '    """Deprecated AST node class.  Unused in Python 3."""\n', '\n', 'class Param(expr_context):\n', '    """Deprecated AST node class.  Unused in Python 3."""\n', '\n', '\n', '# Large float and imaginary literals get turned into infinities in the AST.\n', '# We unparse those infinities to INFSTR.\n', '_INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)\n', '\n', 'class _Precedence(IntEnum):\n', '    """Precedence table that originated from python grammar."""\n', '\n', '    TUPLE = auto()\n', "    YIELD = auto()           # 'yield', 'yield from'\n", "    TEST = auto()            # 'if'-'else', 'lambda'\n", "    OR = auto()              # 'or'\n", "    AND = auto()             # 'and'\n", "    NOT = auto()             # 'not'\n", "    CMP = auto()             # '<', '>', '==', '>=', '<=', '!=',\n", "                             # 'in', 'not in', 'is', 'is not'\n", '    EXPR = auto()\n', "    BOR = EXPR               # '|'\n", "    BXOR = auto()            # '^'\n", "    BAND = auto()            # '&'\n", "    SHIFT = auto()           # '<<', '>>'\n", "    ARITH = auto()           # '+', '-'\n", "    TERM = auto()            # '*', '@', '/', '%', '//'\n", "    FACTOR = auto()          # unary '+', '-', '~'\n", "    POWER = auto()           # '**'\n", "    AWAIT = auto()           # 'await'\n", '    ATOM = auto()\n', '\n', '    def next(self):\n', '        try:\n', '            return self.__class__(self + 1)\n', '        except ValueError:\n', '            return self\n', '\n', '\n', '_SINGLE_QUOTES = ("\'", \'"\')\n', '_MULTI_QUOTES = (\'"""\', "\'\'\'")\n', '_ALL_QUOTES = (*_SINGLE_QUOTES, *_MULTI_QUOTES)\n', '\n', 'class _Unparser(NodeVisitor):\n', '    """Methods in this class recursively traverse an AST and\n', '    output source code for the abstract syntax; original formatting\n', '    is disregarded."""\n', '\n', '    def __init__(self, *, _avoid_backslashes=False):\n', '        self._source = []\n', '        self._buffer = []\n', '        self._precedences = {}\n', '        self._type_ignores = {}\n', '        self._indent = 0\n', '        self._avoid_backslashes = _avoid_backslashes\n', '\n', '    def interleave(self, inter, f, seq):\n', '        """Call f on each item in seq, calling inter() in between."""\n', '        seq = iter(seq)\n', '        try:\n', '            f(next(seq))\n', '        except StopIteration:\n', '            pass\n', '        else:\n', '            for x in seq:\n', '                inter()\n', '                f(x)\n', '\n', '    def items_view(self, traverser, items):\n', '        """Traverse and separate the given *items* with a comma and append it to\n', '        the buffer. If *items* is a single item sequence, a trailing comma\n', '        will be added."""\n', '        if len(items) == 1:\n', '            traverser(items[0])\n', '            self.write(",")\n', '        else:\n', '            self.interleave(lambda: self.write(", "), traverser, items)\n', '\n', '    def maybe_newline(self):\n', '        """Adds a newline if it isn\'t the start of generated source"""\n', '        if self._source:\n', '            self.write("\\n")\n', '\n', '    def fill(self, text=""):\n', '        """Indent a piece of text and append it, according to the current\n', '        indentation level"""\n', '        self.maybe_newline()\n', '        self.write("    " * self._indent + text)\n', '\n', '    def write(self, text):\n', '        """Append a piece of text"""\n', '        self._source.append(text)\n', '\n', '    def buffer_writer(self, text):\n', '        self._buffer.append(text)\n', '\n', '    @property\n', '    def buffer(self):\n', '        value = "".join(self._buffer)\n', '        self._buffer.clear()\n', '        return value\n', '\n', '    @contextmanager\n', '    def block(self, *, extra = None):\n', '        """A context manager for preparing the source for blocks. It adds\n', "        the character':', increases the indentation on enter and decreases\n", '        the indentation on exit. If *extra* is given, it will be directly\n', '        appended after the colon character.\n', '        """\n', '        self.write(":")\n', '        if extra:\n', '            self.write(extra)\n', '        self._indent += 1\n', '        yield\n', '        self._indent -= 1\n', '\n', '    @contextmanager\n', '    def delimit(self, start, end):\n', '        """A context manager for preparing the source for expressions. It adds\n', '        *start* to the buffer and enters, after exit it adds *end*."""\n', '\n', '        self.write(start)\n', '        yield\n', '        self.write(end)\n', '\n', '    def delimit_if(self, start, end, condition):\n', '        if condition:\n', '            return self.delimit(start, end)\n', '        else:\n', '            return nullcontext()\n', '\n', '    def require_parens(self, precedence, node):\n', '        """Shortcut to adding precedence related parens"""\n', '        return self.delimit_if("(", ")", self.get_precedence(node) > precedence)\n', '\n', '    def get_precedence(self, node):\n', '        return self._precedences.get(node, _Precedence.TEST)\n', '\n', '    def set_precedence(self, precedence, *nodes):\n', '        for node in nodes:\n', '            self._precedences[node] = precedence\n', '\n', '    def get_raw_docstring(self, node):\n', '        """If a docstring node is found in the body of the *node* parameter,\n', '        return that docstring node, None otherwise.\n', '\n', '        Logic mirrored from ``_PyAST_GetDocString``."""\n', '        if not isinstance(\n', '            node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)\n', '        ) or len(node.body) < 1:\n', '            return None\n', '        node = node.body[0]\n', '        if not isinstance(node, Expr):\n', '            return None\n', '        node = node.value\n', '        if isinstance(node, Constant) and isinstance(node.value, str):\n', '            return node\n', '\n', '    def get_type_comment(self, node):\n', '        comment = self._type_ignores.get(node.lineno) or node.type_comment\n', '        if comment is not None:\n', '            return f" # type: {comment}"\n', '\n', '    def traverse(self, node):\n', '        if isinstance(node, list):\n', '            for item in node:\n', '                self.traverse(item)\n', '        else:\n', '            super().visit(node)\n', '\n', '    def visit(self, node):\n', '        """Outputs a source code string that, if converted back to an ast\n', '        (using ast.parse) will generate an AST equivalent to *node*"""\n', '        self._source = []\n', '        self.traverse(node)\n', '        return "".join(self._source)\n', '\n', '    def _write_docstring_and_traverse_body(self, node):\n', '        if (docstring := self.get_raw_docstring(node)):\n', '            self._write_docstring(docstring)\n', '            self.traverse(node.body[1:])\n', '        else:\n', '            self.traverse(node.body)\n', '\n', '    def visit_Module(self, node):\n', '        self._type_ignores = {\n', '            ignore.lineno: f"ignore{ignore.tag}"\n', '            for ignore in node.type_ignores\n', '        }\n', '        self._write_docstring_and_traverse_body(node)\n', '        self._type_ignores.clear()\n', '\n', '    def visit_FunctionType(self, node):\n', '        with self.delimit("(", ")"):\n', '            self.interleave(\n', '                lambda: self.write(", "), self.traverse, node.argtypes\n', '            )\n', '\n', '        self.write(" -> ")\n', '        self.traverse(node.returns)\n', '\n', '    def visit_Expr(self, node):\n', '        self.fill()\n', '        self.set_precedence(_Precedence.YIELD, node.value)\n', '        self.traverse(node.value)\n', '\n', '    def visit_NamedExpr(self, node):\n', '        with self.require_parens(_Precedence.TUPLE, node):\n', '            self.set_precedence(_Precedence.ATOM, node.target, node.value)\n', '            self.traverse(node.target)\n', '            self.write(" := ")\n', '            self.traverse(node.value)\n', '\n', '    def visit_Import(self, node):\n', '        self.fill("import ")\n', '        self.interleave(lambda: self.write(", "), self.traverse, node.names)\n', '\n', '    def visit_ImportFrom(self, node):\n', '        self.fill("from ")\n', '        self.write("." * node.level)\n', '        if node.module:\n', '            self.write(node.module)\n', '        self.write(" import ")\n', '        self.interleave(lambda: self.write(", "), self.traverse, node.names)\n', '\n', '    def visit_Assign(self, node):\n', '        self.fill()\n', '        for target in node.targets:\n', '            self.traverse(target)\n', '            self.write(" = ")\n', '        self.traverse(node.value)\n', '        if type_comment := self.get_type_comment(node):\n', '            self.write(type_comment)\n', '\n', '    def visit_AugAssign(self, node):\n', '        self.fill()\n', '        self.traverse(node.target)\n', '        self.write(" " + self.binop[node.op.__class__.__name__] + "= ")\n', '        self.traverse(node.value)\n', '\n', '    def visit_AnnAssign(self, node):\n', '        self.fill()\n', '        with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)):\n', '            self.traverse(node.target)\n', '        self.write(": ")\n', '        self.traverse(node.annotation)\n', '        if node.value:\n', '            self.write(" = ")\n', '            self.traverse(node.value)\n', '\n', '    def visit_Return(self, node):\n', '        self.fill("return")\n', '        if node.value:\n', '            self.write(" ")\n', '            self.traverse(node.value)\n', '\n', '    def visit_Pass(self, node):\n', '        self.fill("pass")\n', '\n', '    def visit_Break(self, node):\n', '        self.fill("break")\n', '\n', '    def visit_Continue(self, node):\n', '        self.fill("continue")\n', '\n', '    def visit_Delete(self, node):\n', '        self.fill("del ")\n', '        self.interleave(lambda: self.write(", "), self.traverse, node.targets)\n', '\n', '    def visit_Assert(self, node):\n', '        self.fill("assert ")\n', '        self.traverse(node.test)\n', '        if node.msg:\n', '            self.write(", ")\n', '            self.traverse(node.msg)\n', '\n', '    def visit_Global(self, node):\n', '        self.fill("global ")\n', '        self.interleave(lambda: self.write(", "), self.write, node.names)\n', '\n', '    def visit_Nonlocal(self, node):\n', '        self.fill("nonlocal ")\n', '        self.interleave(lambda: self.write(", "), self.write, node.names)\n', '\n', '    def visit_Await(self, node):\n', '        with self.require_parens(_Precedence.AWAIT, node):\n', '            self.write("await")\n', '            if node.value:\n', '                self.write(" ")\n', '                self.set_precedence(_Precedence.ATOM, node.value)\n', '                self.traverse(node.value)\n', '\n', '    def visit_Yield(self, node):\n', '        with self.require_parens(_Precedence.YIELD, node):\n', '            self.write("yield")\n', '            if node.value:\n', '                self.write(" ")\n', '                self.set_precedence(_Precedence.ATOM, node.value)\n', '                self.traverse(node.value)\n', '\n', '    def visit_YieldFrom(self, node):\n', '        with self.require_parens(_Precedence.YIELD, node):\n', '            self.write("yield from ")\n', '            if not node.value:\n', '                raise ValueError("Node can\'t be used without a value attribute.")\n', '            self.set_precedence(_Precedence.ATOM, node.value)\n', '            self.traverse(node.value)\n', '\n', '    def visit_Raise(self, node):\n', '        self.fill("raise")\n', '        if not node.exc:\n', '            if node.cause:\n', '                raise ValueError(f"Node can\'t use cause without an exception.")\n', '            return\n', '        self.write(" ")\n', '        self.traverse(node.exc)\n', '        if node.cause:\n', '            self.write(" from ")\n', '            self.traverse(node.cause)\n', '\n', '    def visit_Try(self, node):\n', '        self.fill("try")\n', '        with self.block():\n', '            self.traverse(node.body)\n', '        for ex in node.handlers:\n', '            self.traverse(ex)\n', '        if node.orelse:\n', '            self.fill("else")\n', '            with self.block():\n', '                self.traverse(node.orelse)\n', '        if node.finalbody:\n', '            self.fill("finally")\n', '            with self.block():\n', '                self.traverse(node.finalbody)\n', '\n', '    def visit_ExceptHandler(self, node):\n', '        self.fill("except")\n', '        if node.type:\n', '            self.write(" ")\n', '            self.traverse(node.type)\n', '        if node.name:\n', '            self.write(" as ")\n', '            self.write(node.name)\n', '        with self.block():\n', '            self.traverse(node.body)\n', '\n', '    def visit_ClassDef(self, node):\n', '        self.maybe_newline()\n', '        for deco in node.decorator_list:\n', '            self.fill("@")\n', '            self.traverse(deco)\n', '        self.fill("class " + node.name)\n', '        with self.delimit_if("(", ")", condition = node.bases or node.keywords):\n', '            comma = False\n', '            for e in node.bases:\n', '                if comma:\n', '                    self.write(", ")\n', '                else:\n', '                    comma = True\n', '                self.traverse(e)\n', '            for e in node.keywords:\n', '                if comma:\n', '                    self.write(", ")\n', '                else:\n', '                    comma = True\n', '                self.traverse(e)\n', '\n', '        with self.block():\n', '            self._write_docstring_and_traverse_body(node)\n', '\n', '    def visit_FunctionDef(self, node):\n', '        self._function_helper(node, "def")\n', '\n', '    def visit_AsyncFunctionDef(self, node):\n', '        self._function_helper(node, "async def")\n', '\n', '    def _function_helper(self, node, fill_suffix):\n', '        self.maybe_newline()\n', '        for deco in node.decorator_list:\n', '            self.fill("@")\n', '            self.traverse(deco)\n', '        def_str = fill_suffix + " " + node.name\n', '        self.fill(def_str)\n', '        with self.delimit("(", ")"):\n', '            self.traverse(node.args)\n', '        if node.returns:\n', '            self.write(" -> ")\n', '            self.traverse(node.returns)\n', '        with self.block(extra=self.get_type_comment(node)):\n', '            self._write_docstring_and_traverse_body(node)\n', '\n', '    def visit_For(self, node):\n', '        self._for_helper("for ", node)\n', '\n', '    def visit_AsyncFor(self, node):\n', '        self._for_helper("async for ", node)\n', '\n', '    def _for_helper(self, fill, node):\n', '        self.fill(fill)\n', '        self.traverse(node.target)\n', '        self.write(" in ")\n', '        self.traverse(node.iter)\n', '        with self.block(extra=self.get_type_comment(node)):\n', '            self.traverse(node.body)\n', '        if node.orelse:\n', '            self.fill("else")\n', '            with self.block():\n', '                self.traverse(node.orelse)\n', '\n', '    def visit_If(self, node):\n', '        self.fill("if ")\n', '        self.traverse(node.test)\n', '        with self.block():\n', '            self.traverse(node.body)\n', '        # collapse nested ifs into equivalent elifs.\n', '        while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If):\n', '            node = node.orelse[0]\n', '            self.fill("elif ")\n', '            self.traverse(node.test)\n', '            with self.block():\n', '                self.traverse(node.body)\n', '        # final else\n', '        if node.orelse:\n', '            self.fill("else")\n', '            with self.block():\n', '                self.traverse(node.orelse)\n', '\n', '    def visit_While(self, node):\n', '        self.fill("while ")\n', '        self.traverse(node.test)\n', '        with self.block():\n', '            self.traverse(node.body)\n', '        if node.orelse:\n', '            self.fill("else")\n', '            with self.block():\n', '                self.traverse(node.orelse)\n', '\n', '    def visit_With(self, node):\n', '        self.fill("with ")\n', '        self.interleave(lambda: self.write(", "), self.traverse, node.items)\n', '        with self.block(extra=self.get_type_comment(node)):\n', '            self.traverse(node.body)\n', '\n', '    def visit_AsyncWith(self, node):\n', '        self.fill("async with ")\n', '        self.interleave(lambda: self.write(", "), self.traverse, node.items)\n', '        with self.block(extra=self.get_type_comment(node)):\n', '            self.traverse(node.body)\n', '\n', '    def _str_literal_helper(\n', '        self, string, *, quote_types=_ALL_QUOTES, escape_special_whitespace=False\n', '    ):\n', '        """Helper for writing string literals, minimizing escapes.\n', '        Returns the tuple (string literal to write, possible quote types).\n', '        """\n', '        def escape_char(c):\n', '            # \\n and \\t are non-printable, but we only escape them if\n', '            # escape_special_whitespace is True\n', '            if not escape_special_whitespace and c in "\\n\\t":\n', '                return c\n', '            # Always escape backslashes and other non-printable characters\n', '            if c == "\\\\" or not c.isprintable():\n', '                return c.encode("unicode_escape").decode("ascii")\n', '            return c\n', '\n', '        escaped_string = "".join(map(escape_char, string))\n', '        possible_quotes = quote_types\n', '        if "\\n" in escaped_string:\n', '            possible_quotes = [q for q in possible_quotes if q in _MULTI_QUOTES]\n', '        possible_quotes = [q for q in possible_quotes if q not in escaped_string]\n', '        if not possible_quotes:\n', "            # If there aren't any possible_quotes, fallback to using repr\n", '            # on the original string. Try to use a quote from quote_types,\n', '            # e.g., so that we use triple quotes for docstrings.\n', '            string = repr(string)\n', '            quote = next((q for q in quote_types if string[0] in q), string[0])\n', '            return string[1:-1], [quote]\n', '        if escaped_string:\n', '            # Sort so that we prefer \'\'\'"\'\'\' over """\\""""\n', '            possible_quotes.sort(key=lambda q: q[0] == escaped_string[-1])\n', "            # If we're using triple quotes and we'd need to escape a final\n", '            # quote, escape it\n', '            if possible_quotes[0][0] == escaped_string[-1]:\n', '                assert len(possible_quotes[0]) == 3\n', '                escaped_string = escaped_string[:-1] + "\\\\" + escaped_string[-1]\n', '        return escaped_string, possible_quotes\n', '\n', '    def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES):\n', '        """Write string literal value with a best effort attempt to avoid backslashes."""\n', '        string, quote_types = self._str_literal_helper(string, quote_types=quote_types)\n', '        quote_type = quote_types[0]\n', '        self.write(f"{quote_type}{string}{quote_type}")\n', '\n', '    def visit_JoinedStr(self, node):\n', '        self.write("f")\n', '        if self._avoid_backslashes:\n', '            self._fstring_JoinedStr(node, self.buffer_writer)\n', '            self._write_str_avoiding_backslashes(self.buffer)\n', '            return\n', '\n', "        # If we don't need to avoid backslashes globally (i.e., we only need\n", "        # to avoid them inside FormattedValues), it's cosmetically preferred\n", "        # to use escaped whitespace. That is, it's preferred to use backslashes\n", '        # for cases like: f"{x}\\n". To accomplish this, we keep track of what\n', '        # in our buffer corresponds to FormattedValues and what corresponds to\n', '        # Constant parts of the f-string, and allow escapes accordingly.\n', '        buffer = []\n', '        for value in node.values:\n', '            meth = getattr(self, "_fstring_" + type(value).__name__)\n', '            meth(value, self.buffer_writer)\n', '            buffer.append((self.buffer, isinstance(value, Constant)))\n', '        new_buffer = []\n', '        quote_types = _ALL_QUOTES\n', '        for value, is_constant in buffer:\n', '            # Repeatedly narrow down the list of possible quote_types\n', '            value, quote_types = self._str_literal_helper(\n', '                value, quote_types=quote_types,\n', '                escape_special_whitespace=is_constant\n', '            )\n', '            new_buffer.append(value)\n', '        value = "".join(new_buffer)\n', '        quote_type = quote_types[0]\n', '        self.write(f"{quote_type}{value}{quote_type}")\n', '\n', '    def visit_FormattedValue(self, node):\n', '        self.write("f")\n', '        self._fstring_FormattedValue(node, self.buffer_writer)\n', '        self._write_str_avoiding_backslashes(self.buffer)\n', '\n', '    def _fstring_JoinedStr(self, node, write):\n', '        for value in node.values:\n', '            meth = getattr(self, "_fstring_" + type(value).__name__)\n', '            meth(value, write)\n', '\n', '    def _fstring_Constant(self, node, write):\n', '        if not isinstance(node.value, str):\n', '            raise ValueError("Constants inside JoinedStr should be a string.")\n', '        value = node.value.replace("{", "{{").replace("}", "}}")\n', '        write(value)\n', '\n', '    def _fstring_FormattedValue(self, node, write):\n', '        write("{")\n', '        unparser = type(self)(_avoid_backslashes=True)\n', '        unparser.set_precedence(_Precedence.TEST.next(), node.value)\n', '        expr = unparser.visit(node.value)\n', '        if expr.startswith("{"):\n', '            write(" ")  # Separate pair of opening brackets as "{ {"\n', '        if "\\\\" in expr:\n', '            raise ValueError("Unable to avoid backslash in f-string expression part")\n', '        write(expr)\n', '        if node.conversion != -1:\n', '            conversion = chr(node.conversion)\n', '            if conversion not in "sra":\n', '                raise ValueError("Unknown f-string conversion.")\n', '            write(f"!{conversion}")\n', '        if node.format_spec:\n', '            write(":")\n', '            meth = getattr(self, "_fstring_" + type(node.format_spec).__name__)\n', '            meth(node.format_spec, write)\n', '        write("}")\n', '\n', '    def visit_Name(self, node):\n', '        self.write(node.id)\n', '\n', '    def _write_docstring(self, node):\n', '        self.fill()\n', '        if node.kind == "u":\n', '            self.write("u")\n', '        self._write_str_avoiding_backslashes(node.value, quote_types=_MULTI_QUOTES)\n', '\n', '    def _write_constant(self, value):\n', '        if isinstance(value, (float, complex)):\n', '            # Substitute overflowing decimal literal for AST infinities,\n', '            # and inf - inf for NaNs.\n', '            self.write(\n', '                repr(value)\n', '                .replace("inf", _INFSTR)\n', '                .replace("nan", f"({_INFSTR}-{_INFSTR})")\n', '            )\n', '        elif self._avoid_backslashes and isinstance(value, str):\n', '            self._write_str_avoiding_backslashes(value)\n', '        else:\n', '            self.write(repr(value))\n', '\n', '    def visit_Constant(self, node):\n', '        value = node.value\n', '        if isinstance(value, tuple):\n', '            with self.delimit("(", ")"):\n', '                self.items_view(self._write_constant, value)\n', '        elif value is ...:\n', '            self.write("...")\n', '        else:\n', '            if node.kind == "u":\n', '                self.write("u")\n', '            self._write_constant(node.value)\n', '\n', '    def visit_List(self, node):\n', '        with self.delimit("[", "]"):\n', '            self.interleave(lambda: self.write(", "), self.traverse, node.elts)\n', '\n', '    def visit_ListComp(self, node):\n', '        with self.delimit("[", "]"):\n', '            self.traverse(node.elt)\n', '            for gen in node.generators:\n', '                self.traverse(gen)\n', '\n', '    def visit_GeneratorExp(self, node):\n', '        with self.delimit("(", ")"):\n', '            self.traverse(node.elt)\n', '            for gen in node.generators:\n', '                self.traverse(gen)\n', '\n', '    def visit_SetComp(self, node):\n', '        with self.delimit("{", "}"):\n', '            self.traverse(node.elt)\n', '            for gen in node.generators:\n', '                self.traverse(gen)\n', '\n', '    def visit_DictComp(self, node):\n', '        with self.delimit("{", "}"):\n', '            self.traverse(node.key)\n', '            self.write(": ")\n', '            self.traverse(node.value)\n', '            for gen in node.generators:\n', '                self.traverse(gen)\n', '\n', '    def visit_comprehension(self, node):\n', '        if node.is_async:\n', '            self.write(" async for ")\n', '        else:\n', '            self.write(" for ")\n', '        self.set_precedence(_Precedence.TUPLE, node.target)\n', '        self.traverse(node.target)\n', '        self.write(" in ")\n', '        self.set_precedence(_Precedence.TEST.next(), node.iter, *node.ifs)\n', '        self.traverse(node.iter)\n', '        for if_clause in node.ifs:\n', '            self.write(" if ")\n', '            self.traverse(if_clause)\n', '\n', '    def visit_IfExp(self, node):\n', '        with self.require_parens(_Precedence.TEST, node):\n', '            self.set_precedence(_Precedence.TEST.next(), node.body, node.test)\n', '            self.traverse(node.body)\n', '            self.write(" if ")\n', '            self.traverse(node.test)\n', '            self.write(" else ")\n', '            self.set_precedence(_Precedence.TEST, node.orelse)\n', '            self.traverse(node.orelse)\n', '\n', '    def visit_Set(self, node):\n', '        if node.elts:\n', '            with self.delimit("{", "}"):\n', '                self.interleave(lambda: self.write(", "), self.traverse, node.elts)\n', '        else:\n', '            # `{}` would be interpreted as a dictionary literal, and\n', '            # `set` might be shadowed. Thus:\n', "            self.write('{*()}')\n", '\n', '    def visit_Dict(self, node):\n', '        def write_key_value_pair(k, v):\n', '            self.traverse(k)\n', '            self.write(": ")\n', '            self.traverse(v)\n', '\n', '        def write_item(item):\n', '            k, v = item\n', '            if k is None:\n', "                # for dictionary unpacking operator in dicts {**{'y': 2}}\n", '                # see PEP 448 for details\n', '                self.write("**")\n', '                self.set_precedence(_Precedence.EXPR, v)\n', '                self.traverse(v)\n', '            else:\n', '                write_key_value_pair(k, v)\n', '\n', '        with self.delimit("{", "}"):\n', '            self.interleave(\n', '                lambda: self.write(", "), write_item, zip(node.keys, node.values)\n', '            )\n', '\n', '    def visit_Tuple(self, node):\n', '        with self.delimit("(", ")"):\n', '            self.items_view(self.traverse, node.elts)\n', '\n', '    unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"}\n', '    unop_precedence = {\n', '        "not": _Precedence.NOT,\n', '        "~": _Precedence.FACTOR,\n', '        "+": _Precedence.FACTOR,\n', '        "-": _Precedence.FACTOR,\n', '    }\n', '\n', '    def visit_UnaryOp(self, node):\n', '        operator = self.unop[node.op.__class__.__name__]\n', '        operator_precedence = self.unop_precedence[operator]\n', '        with self.require_parens(operator_precedence, node):\n', '            self.write(operator)\n', "            # factor prefixes (+, -, ~) shouldn't be seperated\n", '            # from the value they belong, (e.g: +1 instead of + 1)\n', '            if operator_precedence is not _Precedence.FACTOR:\n', '                self.write(" ")\n', '            self.set_precedence(operator_precedence, node.operand)\n', '            self.traverse(node.operand)\n', '\n', '    binop = {\n', '        "Add": "+",\n', '        "Sub": "-",\n', '        "Mult": "*",\n', '        "MatMult": "@",\n', '        "Div": "/",\n', '        "Mod": "%",\n', '        "LShift": "<<",\n', '        "RShift": ">>",\n', '        "BitOr": "|",\n', '        "BitXor": "^",\n', '        "BitAnd": "&",\n', '        "FloorDiv": "//",\n', '        "Pow": "**",\n', '    }\n', '\n', '    binop_precedence = {\n', '        "+": _Precedence.ARITH,\n', '        "-": _Precedence.ARITH,\n', '        "*": _Precedence.TERM,\n', '        "@": _Precedence.TERM,\n', '        "/": _Precedence.TERM,\n', '        "%": _Precedence.TERM,\n', '        "<<": _Precedence.SHIFT,\n', '        ">>": _Precedence.SHIFT,\n', '        "|": _Precedence.BOR,\n', '        "^": _Precedence.BXOR,\n', '        "&": _Precedence.BAND,\n', '        "//": _Precedence.TERM,\n', '        "**": _Precedence.POWER,\n', '    }\n', '\n', '    binop_rassoc = frozenset(("**",))\n', '    def visit_BinOp(self, node):\n', '        operator = self.binop[node.op.__class__.__name__]\n', '        operator_precedence = self.binop_precedence[operator]\n', '        with self.require_parens(operator_precedence, node):\n', '            if operator in self.binop_rassoc:\n', '                left_precedence = operator_precedence.next()\n', '                right_precedence = operator_precedence\n', '            else:\n', '                left_precedence = operator_precedence\n', '                right_precedence = operator_precedence.next()\n', '\n', '            self.set_precedence(left_precedence, node.left)\n', '            self.traverse(node.left)\n', '            self.write(f" {operator} ")\n', '            self.set_precedence(right_precedence, node.right)\n', '            self.traverse(node.right)\n', '\n', '    cmpops = {\n', '        "Eq": "==",\n', '        "NotEq": "!=",\n', '        "Lt": "<",\n', '        "LtE": "<=",\n', '        "Gt": ">",\n', '        "GtE": ">=",\n', '        "Is": "is",\n', '        "IsNot": "is not",\n', '        "In": "in",\n', '        "NotIn": "not in",\n', '    }\n', '\n', '    def visit_Compare(self, node):\n', '        with self.require_parens(_Precedence.CMP, node):\n', '            self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators)\n', '            self.traverse(node.left)\n', '            for o, e in zip(node.ops, node.comparators):\n', '                self.write(" " + self.cmpops[o.__class__.__name__] + " ")\n', '                self.traverse(e)\n', '\n', '    boolops = {"And": "and", "Or": "or"}\n', '    boolop_precedence = {"and": _Precedence.AND, "or": _Precedence.OR}\n', '\n', '    def visit_BoolOp(self, node):\n', '        operator = self.boolops[node.op.__class__.__name__]\n', '        operator_precedence = self.boolop_precedence[operator]\n', '\n', '        def increasing_level_traverse(node):\n', '            nonlocal operator_precedence\n', '            operator_precedence = operator_precedence.next()\n', '            self.set_precedence(operator_precedence, node)\n', '            self.traverse(node)\n', '\n', '        with self.require_parens(operator_precedence, node):\n', '            s = f" {operator} "\n', '            self.interleave(lambda: self.write(s), increasing_level_traverse, node.values)\n', '\n', '    def visit_Attribute(self, node):\n', '        self.set_precedence(_Precedence.ATOM, node.value)\n', '        self.traverse(node.value)\n', '        # Special case: 3.__abs__() is a syntax error, so if node.value\n', '        # is an integer literal then we need to either parenthesize\n', '        # it or add an extra space to get 3 .__abs__().\n', '        if isinstance(node.value, Constant) and isinstance(node.value.value, int):\n', '            self.write(" ")\n', '        self.write(".")\n', '        self.write(node.attr)\n', '\n', '    def visit_Call(self, node):\n', '        self.set_precedence(_Precedence.ATOM, node.func)\n', '        self.traverse(node.func)\n', '        with self.delimit("(", ")"):\n', '            comma = False\n', '            for e in node.args:\n', '                if comma:\n', '                    self.write(", ")\n', '                else:\n', '                    comma = True\n', '                self.traverse(e)\n', '            for e in node.keywords:\n', '                if comma:\n', '                    self.write(", ")\n', '                else:\n', '                    comma = True\n', '                self.traverse(e)\n', '\n', '    def visit_Subscript(self, node):\n', '        def is_simple_tuple(slice_value):\n', '            # when unparsing a non-empty tuple, the parentheses can be safely\n', "            # omitted if there aren't any elements that explicitly requires\n", '            # parentheses (such as starred expressions).\n', '            return (\n', '                isinstance(slice_value, Tuple)\n', '                and slice_value.elts\n', '                and not any(isinstance(elt, Starred) for elt in slice_value.elts)\n', '            )\n', '\n', '        self.set_precedence(_Precedence.ATOM, node.value)\n', '        self.traverse(node.value)\n', '        with self.delimit("[", "]"):\n', '            if is_simple_tuple(node.slice):\n', '                self.items_view(self.traverse, node.slice.elts)\n', '            else:\n', '                self.traverse(node.slice)\n', '\n', '    def visit_Starred(self, node):\n', '        self.write("*")\n', '        self.set_precedence(_Precedence.EXPR, node.value)\n', '        self.traverse(node.value)\n', '\n', '    def visit_Ellipsis(self, node):\n', '        self.write("...")\n', '\n', '    def visit_Slice(self, node):\n', '        if node.lower:\n', '            self.traverse(node.lower)\n', '        self.write(":")\n', '        if node.upper:\n', '            self.traverse(node.upper)\n', '        if node.step:\n', '            self.write(":")\n', '            self.traverse(node.step)\n', '\n', '    def visit_arg(self, node):\n', '        self.write(node.arg)\n', '        if node.annotation:\n', '            self.write(": ")\n', '            self.traverse(node.annotation)\n', '\n', '    def visit_arguments(self, node):\n', '        first = True\n', '        # normal arguments\n', '        all_args = node.posonlyargs + node.args\n', '        defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults\n', '        for index, elements in enumerate(zip(all_args, defaults), 1):\n', '            a, d = elements\n', '            if first:\n', '                first = False\n', '            else:\n', '                self.write(", ")\n', '            self.traverse(a)\n', '            if d:\n', '                self.write("=")\n', '                self.traverse(d)\n', '            if index == len(node.posonlyargs):\n', '                self.write(", /")\n', '\n', "        # varargs, or bare '*' if no varargs but keyword-only arguments present\n", '        if node.vararg or node.kwonlyargs:\n', '            if first:\n', '                first = False\n', '            else:\n', '                self.write(", ")\n', '            self.write("*")\n', '            if node.vararg:\n', '                self.write(node.vararg.arg)\n', '                if node.vararg.annotation:\n', '                    self.write(": ")\n', '                    self.traverse(node.vararg.annotation)\n', '\n', '        # keyword-only arguments\n', '        if node.kwonlyargs:\n', '            for a, d in zip(node.kwonlyargs, node.kw_defaults):\n', '                self.write(", ")\n', '                self.traverse(a)\n', '                if d:\n', '                    self.write("=")\n', '                    self.traverse(d)\n', '\n', '        # kwargs\n', '        if node.kwarg:\n', '            if first:\n', '                first = False\n', '            else:\n', '                self.write(", ")\n', '            self.write("**" + node.kwarg.arg)\n', '            if node.kwarg.annotation:\n', '                self.write(": ")\n', '                self.traverse(node.kwarg.annotation)\n', '\n', '    def visit_keyword(self, node):\n', '        if node.arg is None:\n', '            self.write("**")\n', '        else:\n', '            self.write(node.arg)\n', '            self.write("=")\n', '        self.traverse(node.value)\n', '\n', '    def visit_Lambda(self, node):\n', '        with self.require_parens(_Precedence.TEST, node):\n', '            self.write("lambda ")\n', '            self.traverse(node.args)\n', '            self.write(": ")\n', '            self.set_precedence(_Precedence.TEST, node.body)\n', '            self.traverse(node.body)\n', '\n', '    def visit_alias(self, node):\n', '        self.write(node.name)\n', '        if node.asname:\n', '            self.write(" as " + node.asname)\n', '\n', '    def visit_withitem(self, node):\n', '        self.traverse(node.context_expr)\n', '        if node.optional_vars:\n', '            self.write(" as ")\n', '            self.traverse(node.optional_vars)\n', '\n', 'def unparse(ast_obj):\n', '    unparser = _Unparser()\n', '    return unparser.visit(ast_obj)\n', '\n', '\n', 'def main():\n', '    import argparse\n', '\n', "    parser = argparse.ArgumentParser(prog='python -m ast')\n", "    parser.add_argument('infile', type=argparse.FileType(mode='rb'), nargs='?',\n", "                        default='-',\n", "                        help='the file to parse; defaults to stdin')\n", "    parser.add_argument('-m', '--mode', default='exec',\n", "                        choices=('exec', 'single', 'eval', 'func_type'),\n", "                        help='specify what kind of code must be parsed')\n", "    parser.add_argument('--no-type-comments', default=True, action='store_false',\n", '                        help="don\'t add information about type comments")\n', "    parser.add_argument('-a', '--include-attributes', action='store_true',\n", "                        help='include attributes such as line numbers and '\n", "                             'column offsets')\n", "    parser.add_argument('-i', '--indent', type=int, default=3,\n", "                        help='indentation of nodes (number of spaces)')\n", '    args = parser.parse_args()\n', '\n', '    with args.infile as infile:\n', '        source = infile.read()\n', '    tree = parse(source, args.infile.name, args.mode, type_comments=args.no_type_comments)\n', '    print(dump(tree, include_attributes=args.include_attributes, indent=args.indent))\n', '\n', "if __name__ == '__main__':\n", '    main()\n'], '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/ast.py'), '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/tls.py': (3114, 1.0, ['import datetime\n', 'import os.path\n', 'import logging\n', 'import ssl\n', 'import traceback\n', '\n', 'from cryptography import x509\n', 'from cryptography.x509.oid import NameOID\n', 'from cryptography.hazmat.primitives import hashes, serialization\n', 'from cryptography.hazmat.primitives.asymmetric import rsa\n', '\n', 'from typing import List, TYPE_CHECKING\n', '\n', 'if TYPE_CHECKING:\n', '    from .config import Config\n', '\n', '\n', 'log = logging.getLogger("amethyst.tls")\n', '\n', '\n', 'def make_partial_context():\n', '    c = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\n', '    c.options |= (ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1)\n', '    c.options |= (ssl.OP_SINGLE_DH_USE | ssl.OP_SINGLE_ECDH_USE)\n', '    c.check_hostname = False\n', '    c.verify_mode = ssl.VerifyMode.CERT_OPTIONAL\n', '    return c\n', '\n', '\n', 'def make_context(cert_path: str, key_path: str):\n', '    c = make_partial_context()\n', '    c.load_cert_chain(cert_path, keyfile=key_path)\n', '    return c\n', '\n', '\n', 'def make_sni_context(config: "Config"):\n', '    def sni_callback(sock, host, _original_ctx):\n', '        for host_cfg in config.hosts:\n', '            if host_cfg.host == host:\n', '                break\n', '        else:\n', '            return ssl.ALERT_DESCRIPTION_HANDSHAKE_FAILURE\n', '\n', '        try:\n', '            sock.context = host_cfg.tls.get_ssl_context()\n', '        except Exception:\n', '            log.warn(f"When setting context after SNI; {traceback.format_exc()}")\n', '\n', '    c = make_partial_context()\n', '    c.sni_callback = sni_callback\n', '    return c\n', '\n', '\n', 'def update_certificate(cert_path: str, key_path: str, hosts: List[str]):\n', '    if os.path.exists(cert_path):\n', '        with open(cert_path, "rb") as f:\n', '            cert = x509.load_pem_x509_certificate(f.read())\n', '\n', '        if cert.not_valid_after > datetime.datetime.now():\n', '            log.info("Certificate exists and is unexpired; skipping regeneration.")\n', '            return cert.not_valid_after\n', '\n', '        else:\n', '            log.info("Certificate expired; regenerating.")\n', '\n', '    else:\n', '        log.info("Certificate does not exist yet, generating one now.")\n', '\n', '    key = rsa.generate_private_key(\n', '        public_exponent=65537,\n', '        key_size=4096,\n', '    )\n', '\n', '    with open(key_path, "wb") as f:\n', '        f.write(key.private_bytes(\n', '            encoding=serialization.Encoding.PEM,\n', '            format=serialization.PrivateFormat.TraditionalOpenSSL,\n', '            encryption_algorithm=serialization.NoEncryption(),\n', '        ))\n', '\n', '    subject = issuer = x509.Name([\n', '        x509.NameAttribute(NameOID.COMMON_NAME, hosts[0])\n', '    ])\n', '\n', '    cert = x509.CertificateBuilder().subject_name(\n', '        subject\n', '    ).issuer_name(\n', '        issuer\n', '    ).public_key(\n', '        key.public_key()\n', '    ).serial_number(\n', '        x509.random_serial_number()\n', '    ).not_valid_before(\n', '        datetime.datetime.utcnow() - datetime.timedelta(days=1)\n', '    ).not_valid_after(\n', '        datetime.datetime.utcnow() + datetime.timedelta(days=30)\n', '    ).add_extension(\n', '        x509.SubjectAlternativeName([\n', '            x509.DNSName(host) for host in hosts\n', '        ]),\n', '        critical=False\n', '    ).sign(key, hashes.SHA256())\n', '\n', '    with open(cert_path, "wb") as f:\n', '        f.write(cert.public_bytes(serialization.Encoding.PEM))\n', '\n', '    log.info("Success! Certificate generated and saved.")\n', '    return cert.not_valid_after\n'], '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/tls.py'), '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/config.py': (2866, 1.0, ['import datetime\n', 'import ssl\n', '\n', 'from dataclasses import dataclass\n', 'from typing import Dict, List, Optional, Tuple\n', '\n', 'from .handler import GenericHandler, Handler\n', 'from .resource import Resource\n', 'from .resource_registry import registry\n', '\n', 'import os\n', '\n', '\n', '@dataclass\n', 'class TLSConfig():\n', '    host: str\n', '    auto: bool = False\n', '    cert_path: Optional[str] = None\n', '    key_path: Optional[str] = None\n', '\n', '    _context_cache: Optional[Tuple[datetime.datetime, ssl.SSLContext]] = None\n', '\n', '    @classmethod\n', '    def from_config(cls, host, cfg):\n', '        o = cls(host)\n', '\n', '        state = os.getenv("STATE_DIRECTORY", ".")\n', '\n', '        o.auto = cfg.get("auto", True)\n', '\n', '        o.cert_path = cfg.get("cert_path", None);\n', '        if o.cert_path is None:\n', '            o.cert_path = os.path.join(state, f"{host}.cert.pem")\n', '\n', '        o.key_path = cfg.get("key_path", None);\n', '        if o.key_path is None:\n', '            o.key_path = os.path.join(state, f"{host}.key.pem")\n', '\n', '        return o\n', '\n', '    def clear_context_cache(self):\n', '        self._context_cache = None\n', '\n', '    def get_ssl_context(self):\n', '        from . import tls\n', '        if self._context_cache is not None:\n', '            expires, context = self._context_cache\n', '\n', '            if expires is None or expires > datetime.datetime.now():\n', '                return context\n', '\n', '        elif self.auto:\n', '            expires = tls.update_certificate(self.cert_path, self.key_path, [self.host])\n', '        else:\n', '            # We want to keep using a manually-specified certificate forever\n', '            # or at least until the server is restarted / HUPed.\n', '            expires = None\n', '\n', '        context = tls.make_context(self.cert_path, self.key_path)\n', '\n', '        self._context_cache = expires, context\n', '        return context\n', '\n', '\n', '@dataclass\n', 'class HostConfig():\n', '    host: str\n', '    tls: TLSConfig\n', '    path_map: Dict[str, Resource]\n', '\n', '    @classmethod\n', '    def _construct_resource(cls, cfg) -> Resource:\n', '        resource_type = cfg.pop("type", "filesystem")\n', '        return registry[resource_type](**cfg)\n', '\n', '\n', '    @classmethod\n', '    def from_config(cls, cfg):\n', '        host = cfg["name"]\n', '        tls = TLSConfig.from_config(host, cfg.get("tls", {}))\n', '        path_map = {\n', '            path: cls._construct_resource(config)\n', '            for path, config in cfg["paths"].items()\n', '        }\n', '\n', '        return cls(host, tls, path_map)\n', '\n', '\n', '@dataclass\n', 'class Config():\n', '    hosts: List[HostConfig]\n', '    handler: Handler\n', '    port: int = 1965\n', '\n', '    def load(self, cfg):\n', '        self.hosts = [\n', '            HostConfig.from_config(host)\n', '            for host in cfg.get("hosts", [])\n', '        ]\n', ' \n', '        if not self.hosts:\n', '            raise ValueError("Server can\'t run without any hosts!")\n', '\n', '        self.handler = GenericHandler({\n', '            host.host: host.path_map for host in self.hosts\n', '        })\n', '\n', '    @classmethod\n', '    def from_config(cls, cfg):\n', '        o = cls([], None, cfg.get("port", 1965))\n', '        o.load(cfg)\n', '        return o\n'], '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/config.py'), '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/pydoc.py': (109390, 1.0, ['#!/usr/bin/env python3\n', '"""Generate Python documentation in HTML or text for interactive use.\n', '\n', 'At the Python interactive prompt, calling help(thing) on a Python object\n', 'documents the object, and calling help() starts up an interactive\n', 'help session.\n', '\n', 'Or, at the shell command line outside of Python:\n', '\n', 'Run "pydoc <name>" to show documentation on something.  <name> may be\n', 'the name of a function, module, package, or a dotted reference to a\n', 'class or function within a module or module in a package.  If the\n', 'argument contains a path segment delimiter (e.g. slash on Unix,\n', 'backslash on Windows) it is treated as the path to a Python source file.\n', '\n', 'Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines\n', 'of all available modules.\n', '\n', 'Run "pydoc -n <hostname>" to start an HTTP server with the given\n', 'hostname (default: localhost) on the local machine.\n', '\n', 'Run "pydoc -p <port>" to start an HTTP server on the given port on the\n', 'local machine.  Port number 0 can be used to get an arbitrary unused port.\n', '\n', 'Run "pydoc -b" to start an HTTP server on an arbitrary unused port and\n', 'open a Web browser to interactively browse documentation.  Combine with\n', 'the -n and -p options to control the hostname and port used.\n', '\n', 'Run "pydoc -w <name>" to write out the HTML documentation for a module\n', 'to a file named "<name>.html".\n', '\n', 'Module docs for core modules are assumed to be in\n', '\n', '    https://docs.python.org/X.Y/library/\n', '\n', 'This can be overridden by setting the PYTHONDOCS environment variable\n', 'to a different URL or to a local directory containing the Library\n', 'Reference Manual pages.\n', '"""\n', "__all__ = ['help']\n", '__author__ = "Ka-Ping Yee <ping@lfw.org>"\n', '__date__ = "26 February 2001"\n', '\n', '__credits__ = """Guido van Rossum, for an excellent programming language.\n', 'Tommy Burnette, the original creator of manpy.\n', 'Paul Prescod, for all his work on onlinehelp.\n', 'Richard Chamberlain, for the first implementation of textdoc.\n', '"""\n', '\n', "# Known bugs that can't be fixed here:\n", '#   - synopsis() cannot be prevented from clobbering existing\n', '#     loaded modules.\n', '#   - If the __file__ attribute on a module is a relative path and\n', '#     the current directory is changed with os.chdir(), an incorrect\n', '#     path will be displayed.\n', '\n', 'import builtins\n', 'import importlib._bootstrap\n', 'import importlib._bootstrap_external\n', 'import importlib.machinery\n', 'import importlib.util\n', 'import inspect\n', 'import io\n', 'import os\n', 'import pkgutil\n', 'import platform\n', 'import re\n', 'import sys\n', 'import sysconfig\n', 'import time\n', 'import tokenize\n', 'import urllib.parse\n', 'import warnings\n', 'from collections import deque\n', 'from reprlib import Repr\n', 'from traceback import format_exception_only\n', '\n', '\n', '# --------------------------------------------------------- common routines\n', '\n', 'def pathdirs():\n', '    """Convert sys.path into a list of absolute, existing, unique paths."""\n', '    dirs = []\n', '    normdirs = []\n', '    for dir in sys.path:\n', "        dir = os.path.abspath(dir or '.')\n", '        normdir = os.path.normcase(dir)\n', '        if normdir not in normdirs and os.path.isdir(dir):\n', '            dirs.append(dir)\n', '            normdirs.append(normdir)\n', '    return dirs\n', '\n', 'def _findclass(func):\n', '    cls = sys.modules.get(func.__module__)\n', '    if cls is None:\n', '        return None\n', "    for name in func.__qualname__.split('.')[:-1]:\n", '        cls = getattr(cls, name)\n', '    if not inspect.isclass(cls):\n', '        return None\n', '    return cls\n', '\n', 'def _finddoc(obj):\n', '    if inspect.ismethod(obj):\n', '        name = obj.__func__.__name__\n', '        self = obj.__self__\n', '        if (inspect.isclass(self) and\n', "            getattr(getattr(self, name, None), '__func__') is obj.__func__):\n", '            # classmethod\n', '            cls = self\n', '        else:\n', '            cls = self.__class__\n', '    elif inspect.isfunction(obj):\n', '        name = obj.__name__\n', '        cls = _findclass(obj)\n', '        if cls is None or getattr(cls, name) is not obj:\n', '            return None\n', '    elif inspect.isbuiltin(obj):\n', '        name = obj.__name__\n', '        self = obj.__self__\n', '        if (inspect.isclass(self) and\n', "            self.__qualname__ + '.' + name == obj.__qualname__):\n", '            # classmethod\n', '            cls = self\n', '        else:\n', '            cls = self.__class__\n', '    # Should be tested before isdatadescriptor().\n', '    elif isinstance(obj, property):\n', '        func = obj.fget\n', '        name = func.__name__\n', '        cls = _findclass(func)\n', '        if cls is None or getattr(cls, name) is not obj:\n', '            return None\n', '    elif inspect.ismethoddescriptor(obj) or inspect.isdatadescriptor(obj):\n', '        name = obj.__name__\n', '        cls = obj.__objclass__\n', '        if getattr(cls, name) is not obj:\n', '            return None\n', '        if inspect.ismemberdescriptor(obj):\n', "            slots = getattr(cls, '__slots__', None)\n", '            if isinstance(slots, dict) and name in slots:\n', '                return slots[name]\n', '    else:\n', '        return None\n', '    for base in cls.__mro__:\n', '        try:\n', '            doc = _getowndoc(getattr(base, name))\n', '        except AttributeError:\n', '            continue\n', '        if doc is not None:\n', '            return doc\n', '    return None\n', '\n', 'def _getowndoc(obj):\n', '    """Get the documentation string for an object if it is not\n', '    inherited from its class."""\n', '    try:\n', "        doc = object.__getattribute__(obj, '__doc__')\n", '        if doc is None:\n', '            return None\n', '        if obj is not type:\n', '            typedoc = type(obj).__doc__\n', '            if isinstance(typedoc, str) and typedoc == doc:\n', '                return None\n', '        return doc\n', '    except AttributeError:\n', '        return None\n', '\n', 'def _getdoc(object):\n', '    """Get the documentation string for an object.\n', '\n', '    All tabs are expanded to spaces.  To clean up docstrings that are\n', '    indented to line up with blocks of code, any whitespace than can be\n', '    uniformly removed from the second line onwards is removed."""\n', '    doc = _getowndoc(object)\n', '    if doc is None:\n', '        try:\n', '            doc = _finddoc(object)\n', '        except (AttributeError, TypeError):\n', '            return None\n', '    if not isinstance(doc, str):\n', '        return None\n', '    return inspect.cleandoc(doc)\n', '\n', 'def getdoc(object):\n', '    """Get the doc string or comments for an object."""\n', '    result = _getdoc(object) or inspect.getcomments(object)\n', "    return result and re.sub('^ *\\n', '', result.rstrip()) or ''\n", '\n', 'def splitdoc(doc):\n', '    """Split a doc string into a synopsis line (if any) and the rest."""\n', "    lines = doc.strip().split('\\n')\n", '    if len(lines) == 1:\n', "        return lines[0], ''\n", '    elif len(lines) >= 2 and not lines[1].rstrip():\n', "        return lines[0], '\\n'.join(lines[2:])\n", "    return '', '\\n'.join(lines)\n", '\n', 'def classname(object, modname):\n', '    """Get a class name and qualify it with a module name if necessary."""\n', '    name = object.__name__\n', '    if object.__module__ != modname:\n', "        name = object.__module__ + '.' + name\n", '    return name\n', '\n', 'def isdata(object):\n', '    """Check if an object is of a type that probably means it\'s data."""\n', '    return not (inspect.ismodule(object) or inspect.isclass(object) or\n', '                inspect.isroutine(object) or inspect.isframe(object) or\n', '                inspect.istraceback(object) or inspect.iscode(object))\n', '\n', 'def replace(text, *pairs):\n', '    """Do a series of global replacements on a string."""\n', '    while pairs:\n', '        text = pairs[1].join(text.split(pairs[0]))\n', '        pairs = pairs[2:]\n', '    return text\n', '\n', 'def cram(text, maxlen):\n', '    """Omit part of a string if needed to make it fit in a maximum length."""\n', '    if len(text) > maxlen:\n', '        pre = max(0, (maxlen-3)//2)\n', '        post = max(0, maxlen-3-pre)\n', "        return text[:pre] + '...' + text[len(text)-post:]\n", '    return text\n', '\n', "_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)


, re.IGNORECASE)\n", 'def stripid(text):\n', '    """Remove the hexadecimal id from a Python object representation."""\n', '    # The behaviour of %p is implementation-dependent in terms of case.\n', "    return _re_stripid.sub(r'\\1', text)\n", '\n', 'def _is_bound_method(fn):\n', '    """\n', '    Returns True if fn is a bound method, regardless of whether\n', '    fn was implemented in Python or in C.\n', '    """\n', '    if inspect.ismethod(fn):\n', '        return True\n', '    if inspect.isbuiltin(fn):\n', "        self = getattr(fn, '__self__', None)\n", '        return not (inspect.ismodule(self) or (self is None))\n', '    return False\n', '\n', '\n', 'def allmethods(cl):\n', '    methods = {}\n', '    for key, value in inspect.getmembers(cl, inspect.isroutine):\n', '        methods[key] = 1\n', '    for base in cl.__bases__:\n', '        methods.update(allmethods(base)) # all your base are belong to us\n', '    for key in methods.keys():\n', '        methods[key] = getattr(cl, key)\n', '    return methods\n', '\n', 'def _split_list(s, predicate):\n', '    """Split sequence s via predicate, and return pair ([true], [false]).\n', '\n', '    The return value is a 2-tuple of lists,\n', '        ([x for x in s if predicate(x)],\n', '         [x for x in s if not predicate(x)])\n', '    """\n', '\n', '    yes = []\n', '    no = []\n', '    for x in s:\n', '        if predicate(x):\n', '            yes.append(x)\n', '        else:\n', '            no.append(x)\n', '    return yes, no\n', '\n', 'def visiblename(name, all=None, obj=None):\n', '    """Decide whether to show documentation on a variable."""\n', '    # Certain special names are redundant or internal.\n', '    # XXX Remove __initializing__?\n', "    if name in {'__author__', '__builtins__', '__cached__', '__credits__',\n", "                '__date__', '__doc__', '__file__', '__spec__',\n", "                '__loader__', '__module__', '__name__', '__package__',\n", "                '__path__', '__qualname__', '__slots__', '__version__'}:\n", '        return 0\n', '    # Private names are hidden, but special names are displayed.\n', "    if name.startswith('__') and name.endswith('__'): return 1\n", '    # Namedtuples have public fields and methods with a single leading underscore\n', "    if name.startswith('_') and hasattr(obj, '_fields'):\n", '        return True\n', '    if all is not None:\n', '        # only document that which the programmer exported in __all__\n', '        return name in all\n', '    else:\n', "        return not name.startswith('_')\n", '\n', 'def classify_class_attrs(object):\n', '    """Wrap inspect.classify_class_attrs, with fixup for data descriptors."""\n', '    results = []\n', '    for (name, kind, cls, value) in inspect.classify_class_attrs(object):\n', '        if inspect.isdatadescriptor(value):\n', "            kind = 'data descriptor'\n", '            if isinstance(value, property) and value.fset is None:\n', "                kind = 'readonly property'\n", '        results.append((name, kind, cls, value))\n', '    return results\n', '\n', 'def sort_attributes(attrs, object):\n', "    'Sort the attrs list in-place by _fields and then alphabetically by name'\n", '    # This allows data descriptors to be ordered according\n', '    # to a _fields attribute if present.\n', "    fields = getattr(object, '_fields', [])\n", '    try:\n', '        field_order = {name : i-len(fields) for (i, name) in enumerate(fields)}\n', '    except TypeError:\n', '        field_order = {}\n', '    keyfunc = lambda attr: (field_order.get(attr[0], 0), attr[0])\n', '    attrs.sort(key=keyfunc)\n', '\n', '# ----------------------------------------------------- module manipulation\n', '\n', 'def ispackage(path):\n', '    """Guess whether a path refers to a package directory."""\n', '    if os.path.isdir(path):\n', "        for ext in ('.py', '.pyc'):\n", "            if os.path.isfile(os.path.join(path, '__init__' + ext)):\n", '                return True\n', '    return False\n', '\n', 'def source_synopsis(file):\n', '    line = file.readline()\n', "    while line[:1] == '#' or not line.strip():\n", '        line = file.readline()\n', '        if not line: break\n', '    line = line.strip()\n', '    if line[:4] == \'r"""\': line = line[1:]\n', '    if line[:3] == \'"""\':\n', '        line = line[3:]\n', "        if line[-1:] == '\\\\': line = line[:-1]\n", '        while not line.strip():\n', '            line = file.readline()\n', '            if not line: break\n', '        result = line.split(\'"""\')[0].strip()\n', '    else: result = None\n', '    return result\n', '\n', 'def synopsis(filename, cache={}):\n', '    """Get the one-line summary out of a module file."""\n', '    mtime = os.stat(filename).st_mtime\n', '    lastupdate, result = cache.get(filename, (None, None))\n', '    if lastupdate is None or lastupdate < mtime:\n', '        # Look for binary suffixes first, falling back to source.\n', '        if filename.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):\n', '            loader_cls = importlib.machinery.SourcelessFileLoader\n', '        elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)):\n', '            loader_cls = importlib.machinery.ExtensionFileLoader\n', '        else:\n', '            loader_cls = None\n', '        # Now handle the choice.\n', '        if loader_cls is None:\n', '            # Must be a source file.\n', '            try:\n', '                file = tokenize.open(filename)\n', '            except OSError:\n', "                # module can't be opened, so skip it\n", '                return None\n', '            # text modules can be directly examined\n', '            with file:\n', '                result = source_synopsis(file)\n', '        else:\n', '            # Must be a binary module, which has to be imported.\n', "            loader = loader_cls('__temp__', filename)\n", "            # XXX We probably don't need to pass in the loader here.\n", "            spec = importlib.util.spec_from_file_location('__temp__', filename,\n", '                                                          loader=loader)\n', '            try:\n', '                module = importlib._bootstrap._load(spec)\n', '            except:\n', '                return None\n', "            del sys.modules['__temp__']\n", '            result = module.__doc__.splitlines()[0] if module.__doc__ else None\n', '        # Cache the result.\n', '        cache[filename] = (mtime, result)\n', '    return result\n', '\n', 'class ErrorDuringImport(Exception):\n', '    """Errors that occurred while trying to import something to document it."""\n', '    def __init__(self, filename, exc_info):\n', '        self.filename = filename\n', '        self.exc, self.value, self.tb = exc_info\n', '\n', '    def __str__(self):\n', '        exc = self.exc.__name__\n', "        return 'problem in %s - %s: %s' % (self.filename, exc, self.value)\n", '\n', 'def importfile(path):\n', '    """Import a Python source file or compiled file given its path."""\n', '    magic = importlib.util.MAGIC_NUMBER\n', "    with open(path, 'rb') as file:\n", '        is_bytecode = magic == file.read(len(magic))\n', '    filename = os.path.basename(path)\n', '    name, ext = os.path.splitext(filename)\n', '    if is_bytecode:\n', '        loader = importlib._bootstrap_external.SourcelessFileLoader(name, path)\n', '    else:\n', '        loader = importlib._bootstrap_external.SourceFileLoader(name, path)\n', "    # XXX We probably don't need to pass in the loader here.\n", '    spec = importlib.util.spec_from_file_location(name, path, loader=loader)\n', '    try:\n', '        return importlib._bootstrap._load(spec)\n', '    except:\n', '        raise ErrorDuringImport(path, sys.exc_info())\n', '\n', 'def safeimport(path, forceload=0, cache={}):\n', '    """Import a module; handle errors; return None if the module isn\'t found.\n', '\n', "    If the module *is* found but an exception occurs, it's wrapped in an\n", '    ErrorDuringImport exception and reraised.  Unlike __import__, if a\n', '    package path is specified, the module at the end of the path is returned,\n', "    not the package at the beginning.  If the optional 'forceload' argument\n", '    is 1, we reload the module from disk (unless it\'s a dynamic extension)."""\n', '    try:\n', '        # If forceload is 1 and the module has been previously loaded from\n', "        # disk, we always have to reload the module.  Checking the file's\n", "        # mtime isn't good enough (e.g. the module could contain a class\n", '        # that inherits from another module that has changed).\n', '        if forceload and path in sys.modules:\n', '            if path not in sys.builtin_module_names:\n', '                # Remove the module from sys.modules and re-import to try\n', '                # and avoid problems with partially loaded modules.\n', "                # Also remove any submodules because they won't appear\n", "                # in the newly loaded module's namespace if they're already\n", '                # in sys.modules.\n', "                subs = [m for m in sys.modules if m.startswith(path + '.')]\n", '                for key in [path] + subs:\n', '                    # Prevent garbage collection.\n', '                    cache[key] = sys.modules[key]\n', '                    del sys.modules[key]\n', '        module = __import__(path)\n', '    except:\n', '        # Did the error occur before or after the module was found?\n', '        (exc, value, tb) = info = sys.exc_info()\n', '        if path in sys.modules:\n', '            # An error occurred while executing the imported module.\n', '            raise ErrorDuringImport(sys.modules[path].__file__, info)\n', '        elif exc is SyntaxError:\n', '            # A SyntaxError occurred before we could execute the module.\n', '            raise ErrorDuringImport(value.filename, info)\n', '        elif issubclass(exc, ImportError) and value.name == path:\n', '            # No such module in the path.\n', '            return None\n', '        else:\n', '            # Some other error occurred during the importing process.\n', '            raise ErrorDuringImport(path, sys.exc_info())\n', "    for part in path.split('.')[1:]:\n", '        try: module = getattr(module, part)\n', '        except AttributeError: return None\n', '    return module\n', '\n', '# ---------------------------------------------------- formatter base class\n', '\n', 'class Doc:\n', '\n', '    PYTHONDOCS = os.environ.get("PYTHONDOCS",\n', '                                "https://docs.python.org/%d.%d/library"\n', '                                % sys.version_info[:2])\n', '\n', '    def document(self, object, name=None, *args):\n', '        """Generate documentation for an object."""\n', '        args = (object, name) + args\n', "        # 'try' clause is to attempt to handle the possibility that inspect\n", '        # identifies something in a way that pydoc itself has issues handling;\n', "        # think 'super' and how it is a descriptor (which raises the exception\n", '        # by lacking a __name__ attribute) and an instance.\n', '        try:\n', '            if inspect.ismodule(object): return self.docmodule(*args)\n', '            if inspect.isclass(object): return self.docclass(*args)\n', '            if inspect.isroutine(object): return self.docroutine(*args)\n', '        except AttributeError:\n', '            pass\n', '        if inspect.isdatadescriptor(object): return self.docdata(*args)\n', '        return self.docother(*args)\n', '\n', '    def fail(self, object, name=None, *args):\n', '        """Raise an exception for unimplemented types."""\n', '        message = "don\'t know how to document object%s of type %s" % (\n', "            name and ' ' + repr(name), type(object).__name__)\n", '        raise TypeError(message)\n', '\n', '    docmodule = docclass = docroutine = docother = docproperty = docdata = fail\n', '\n', "    def getdocloc(self, object, basedir=sysconfig.get_path('stdlib')):\n", '        """Return the location of module docs or None"""\n', '\n', '        try:\n', '            file = inspect.getabsfile(object)\n', '        except TypeError:\n', "            file = '(built-in)'\n", '\n', '        docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)\n', '\n', '        basedir = os.path.normcase(basedir)\n', '        if (isinstance(object, type(os)) and\n', "            (object.__name__ in ('errno', 'exceptions', 'gc', 'imp',\n", "                                 'marshal', 'posix', 'signal', 'sys',\n", "                                 '_thread', 'zipimport') or\n", '             (file.startswith(basedir) and\n', "              not file.startswith(os.path.join(basedir, 'site-packages')))) and\n", "            object.__name__ not in ('xml.etree', 'test.pydoc_mod')):\n", '            if docloc.startswith(("http://", "https://")):\n', '                docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower())\n', '            else:\n', '                docloc = os.path.join(docloc, object.__name__.lower() + ".html")\n', '        else:\n', '            docloc = None\n', '        return docloc\n', '\n', '# -------------------------------------------- HTML documentation generator\n', '\n', 'class HTMLRepr(Repr):\n', '    """Class for safely making an HTML representation of a Python object."""\n', '    def __init__(self):\n', '        Repr.__init__(self)\n', '        self.maxlist = self.maxtuple = 20\n', '        self.maxdict = 10\n', '        self.maxstring = self.maxother = 100\n', '\n', '    def escape(self, text):\n', "        return replace(text, '&', '&amp;', '<', '&lt;', '>', '&gt;')\n", '\n', '    def repr(self, object):\n', '        return Repr.repr(self, object)\n', '\n', '    def repr1(self, x, level):\n', "        if hasattr(type(x), '__name__'):\n", "            methodname = 'repr_' + '_'.join(type(x).__name__.split())\n", '            if hasattr(self, methodname):\n', '                return getattr(self, methodname)(x, level)\n', '        return self.escape(cram(stripid(repr(x)), self.maxother))\n', '\n', '    def repr_string(self, x, level):\n', '        test = cram(x, self.maxstring)\n', '        testrepr = repr(test)\n', "        if '\\\\' in test and '\\\\' not in replace(testrepr, r'\\\\', ''):\n", '            # Backslashes are only literal in the string and are never\n', '            # needed to make any special characters, so show a raw string.\n', "            return 'r' + testrepr[0] + self.escape(test) + testrepr[0]\n", '        return re.sub(r\'((\\\\[\\\\abfnrtv\\\'"]|\\\\[0-9]..|\\\\x..|\\\\u....)+)\',\n', '                      r\'<font color="#c040c0">\\1</font>\',\n', '                      self.escape(testrepr))\n', '\n', '    repr_str = repr_string\n', '\n', '    def repr_instance(self, x, level):\n', '        try:\n', '            return self.escape(cram(stripid(repr(x)), self.maxstring))\n', '        except:\n', "            return self.escape('<%s instance>' % x.__class__.__name__)\n", '\n', '    repr_unicode = repr_string\n', '\n', 'class HTMLDoc(Doc):\n', '    """Formatter class for HTML documentation."""\n', '\n', '    # ------------------------------------------- HTML formatting utilities\n', '\n', '    _repr_instance = HTMLRepr()\n', '    repr = _repr_instance.repr\n', '    escape = _repr_instance.escape\n', '\n', '    def page(self, title, contents):\n', '        """Format an HTML page."""\n', "        return '''\\\n", '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">\n', '<html><head><title>Python: %s</title>\n', '<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\n', '</head><body bgcolor="#f0f0f8">\n', '%s\n', "</body></html>''' % (title, contents)\n", '\n', "    def heading(self, title, fgcol, bgcol, extras=''):\n", '        """Format a page heading."""\n', "        return '''\n", '<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">\n', '<tr bgcolor="%s">\n', '<td valign=bottom>&nbsp;<br>\n', '<font color="%s" face="helvetica, arial">&nbsp;<br>%s</font></td\n', '><td align=right valign=bottom\n', '><font color="%s" face="helvetica, arial">%s</font></td></tr></table>\n', "    ''' % (bgcol, fgcol, title, fgcol, extras or '&nbsp;')\n", '\n', '    def section(self, title, fgcol, bgcol, contents, width=6,\n', "                prelude='', marginalia=None, gap='&nbsp;'):\n", '        """Format a section with a heading."""\n', '        if marginalia is None:\n', "            marginalia = '<tt>' + '&nbsp;' * width + '</tt>'\n", "        result = '''<p>\n", '<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">\n', '<tr bgcolor="%s">\n', '<td colspan=3 valign=bottom>&nbsp;<br>\n', '<font color="%s" face="helvetica, arial">%s</font></td></tr>\n', "    ''' % (bgcol, fgcol, title)\n", '        if prelude:\n', "            result = result + '''\n", '<tr bgcolor="%s"><td rowspan=2>%s</td>\n', '<td colspan=2>%s</td></tr>\n', "<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)\n", '        else:\n', "            result = result + '''\n", '<tr><td bgcolor="%s">%s</td><td>%s</td>\'\'\' % (bgcol, marginalia, gap)\n', '\n', '        return result + \'\\n<td width="100%%">%s</td></tr></table>\' % contents\n', '\n', '    def bigsection(self, title, *args):\n', '        """Format a section with a big heading."""\n', "        title = '<big><strong>%s</strong></big>' % title\n", '        return self.section(title, *args)\n', '\n', '    def preformat(self, text):\n', '        """Format literal preformatted text."""\n', '        text = self.escape(text.expandtabs())\n', "        return replace(text, '\\n\\n', '\\n \\n', '\\n\\n', '\\n \\n',\n", "                             ' ', '&nbsp;', '\\n', '<br>\\n')\n", '\n', '    def multicolumn(self, list, format, cols=4):\n', '        """Format a list of items into a multi-column list."""\n', "        result = ''\n", '        rows = (len(list)+cols-1)//cols\n', '        for col in range(cols):\n', '            result = result + \'<td width="%d%%" valign=top>\' % (100//cols)\n', '            for i in range(rows*col, rows*col+rows):\n', '                if i < len(list):\n', "                    result = result + format(list[i]) + '<br>\\n'\n", "            result = result + '</td>'\n", '        return \'<table width="100%%" summary="list"><tr>%s</tr></table>\' % result\n', '\n', '    def grey(self, text): return \'<font color="#909090">%s</font>\' % text\n', '\n', '    def namelink(self, name, *dicts):\n', '        """Make a link for an identifier, given name-to-URL mappings."""\n', '        for dict in dicts:\n', '            if name in dict:\n', '                return \'<a href="%s">%s</a>\' % (dict[name], name)\n', '        return name\n', '\n', '    def classlink(self, object, modname):\n', '        """Make a link for a class."""\n', '        name, module = object.__name__, sys.modules.get(object.__module__)\n', '        if hasattr(module, name) and getattr(module, name) is object:\n', '            return \'<a href="%s.html#%s">%s</a>\' % (\n', '                module.__name__, name, classname(object, modname))\n', '        return classname(object, modname)\n', '\n', '    def modulelink(self, object):\n', '        """Make a link for a module."""\n', '        return \'<a href="%s.html">%s</a>\' % (object.__name__, object.__name__)\n', '\n', '    def modpkglink(self, modpkginfo):\n', '        """Make a link for a module or package to display in an index."""\n', '        name, path, ispackage, shadowed = modpkginfo\n', '        if shadowed:\n', '            return self.grey(name)\n', '        if path:\n', "            url = '%s.%s.html' % (path, name)\n", '        else:\n', "            url = '%s.html' % name\n", '        if ispackage:\n', "            text = '<strong>%s</strong>&nbsp;(package)' % name\n", '        else:\n', '            text = name\n', '        return \'<a href="%s">%s</a>\' % (url, text)\n', '\n', '    def filelink(self, url, path):\n', '        """Make a link to source file."""\n', '        return \'<a href="file:%s">%s</a>\' % (url, path)\n', '\n', '    def markup(self, text, escape=None, funcs={}, classes={}, methods={}):\n', '        """Mark up some plain text, given a context of symbols to look for.\n', '        Each context dictionary maps object names to anchor names."""\n', '        escape = escape or self.escape\n', '        results = []\n', '        here = 0\n', "        pattern = re.compile(r'\\b((http|https|ftp)://\\S+[\\w/]|'\n", "                                r'RFC[- ]?(\\d+)|'\n", "                                r'PEP[- ]?(\\d+)|'\n", "                                r'(self\\.)?(\\w+))')\n", '        while True:\n', '            match = pattern.search(text, here)\n', '            if not match: break\n', '            start, end = match.span()\n', '            results.append(escape(text[here:start]))\n', '\n', '            all, scheme, rfc, pep, selfdot, name = match.groups()\n', '            if scheme:\n', '                url = escape(all).replace(\'"\', \'&quot;\')\n', '                results.append(\'<a href="%s">%s</a>\' % (url, url))\n', '            elif rfc:\n', "                url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)\n", '                results.append(\'<a href="%s">%s</a>\' % (url, escape(all)))\n', '            elif pep:\n', "                url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)\n", '                results.append(\'<a href="%s">%s</a>\' % (url, escape(all)))\n', '            elif selfdot:\n', "                # Create a link for methods like 'self.method(...)'\n", "                # and use <strong> for attributes like 'self.attr'\n", "                if text[end:end+1] == '(':\n", "                    results.append('self.' + self.namelink(name, methods))\n", '                else:\n', "                    results.append('self.<strong>%s</strong>' % name)\n", "            elif text[end:end+1] == '(':\n", '                results.append(self.namelink(name, methods, funcs, classes))\n', '            else:\n', '                results.append(self.namelink(name, classes))\n', '            here = end\n', '        results.append(escape(text[here:]))\n', "        return ''.join(results)\n", '\n', '    # ---------------------------------------------- type-specific routines\n', '\n', '    def formattree(self, tree, modname, parent=None):\n', '        """Produce HTML for a class tree as given by inspect.getclasstree()."""\n', "        result = ''\n", '        for entry in tree:\n', '            if type(entry) is type(()):\n', '                c, bases = entry\n', '                result = result + \'<dt><font face="helvetica, arial">\'\n', '                result = result + self.classlink(c, modname)\n', '                if bases and bases != (parent,):\n', '                    parents = []\n', '                    for base in bases:\n', '                        parents.append(self.classlink(base, modname))\n', "                    result = result + '(' + ', '.join(parents) + ')'\n", "                result = result + '\\n</font></dt>'\n", '            elif type(entry) is type([]):\n', "                result = result + '<dd>\\n%s</dd>\\n' % self.formattree(\n", '                    entry, modname, c)\n', "        return '<dl>\\n%s</dl>\\n' % result\n", '\n', '    def docmodule(self, object, name=None, mod=None, *ignored):\n', '        """Produce HTML documentation for a module object."""\n', '        name = object.__name__ # ignore the passed-in name\n', '        try:\n', '            all = object.__all__\n', '        except AttributeError:\n', '            all = None\n', "        parts = name.split('.')\n", '        links = []\n', '        for i in range(len(parts)-1):\n', '            links.append(\n', '                \'<a href="%s.html"><font color="#ffffff">%s</font></a>\' %\n', "                ('.'.join(parts[:i+1]), parts[i]))\n", "        linkedname = '.'.join(links + parts[-1:])\n", "        head = '<big><big><strong>%s</strong></big></big>' % linkedname\n", '        try:\n', '            path = inspect.getabsfile(object)\n', '            url = urllib.parse.quote(path)\n', '            filelink = self.filelink(url, path)\n', '        except TypeError:\n', "            filelink = '(built-in)'\n", '        info = []\n', "        if hasattr(object, '__version__'):\n", '            version = str(object.__version__)\n', "            if version[:11] == '


 + 'Revision: ' and version[-1:] == '


:\n", '                version = version[11:-1].strip()\n', "            info.append('version %s' % self.escape(version))\n", "        if hasattr(object, '__date__'):\n", '            info.append(self.escape(str(object.__date__)))\n', '        if info:\n', "            head = head + ' (%s)' % ', '.join(info)\n", '        docloc = self.getdocloc(object)\n', '        if docloc is not None:\n', '            docloc = \'<br><a href="%(docloc)s">Module Reference</a>\' % locals()\n', '        else:\n', "            docloc = ''\n", '        result = self.heading(\n', "            head, '#ffffff', '#7799ee',\n", '            \'<a href=".">index</a><br>\' + filelink + docloc)\n', '\n', '        modules = inspect.getmembers(object, inspect.ismodule)\n', '\n', '        classes, cdict = [], {}\n', '        for key, value in inspect.getmembers(object, inspect.isclass):\n', '            # if __all__ exists, believe it.  Otherwise use old heuristic.\n', '            if (all is not None or\n', '                (inspect.getmodule(value) or object) is object):\n', '                if visiblename(key, all, object):\n', '                    classes.append((key, value))\n', "                    cdict[key] = cdict[value] = '#' + key\n", '        for key, value in classes:\n', '            for base in value.__bases__:\n', '                key, modname = base.__name__, base.__module__\n', '                module = sys.modules.get(modname)\n', '                if modname != name and module and hasattr(module, key):\n', '                    if getattr(module, key) is base:\n', '                        if not key in cdict:\n', "                            cdict[key] = cdict[base] = modname + '.html#' + key\n", '        funcs, fdict = [], {}\n', '        for key, value in inspect.getmembers(object, inspect.isroutine):\n', '            # if __all__ exists, believe it.  Otherwise use old heuristic.\n', '            if (all is not None or\n', '                inspect.isbuiltin(value) or inspect.getmodule(value) is object):\n', '                if visiblename(key, all, object):\n', '                    funcs.append((key, value))\n', "                    fdict[key] = '#-' + key\n", '                    if inspect.isfunction(value): fdict[value] = fdict[key]\n', '        data = []\n', '        for key, value in inspect.getmembers(object, isdata):\n', '            if visiblename(key, all, object):\n', '                data.append((key, value))\n', '\n', '        doc = self.markup(getdoc(object), self.preformat, fdict, cdict)\n', "        doc = doc and '<tt>%s</tt>' % doc\n", "        result = result + '<p>%s</p>\\n' % doc\n", '\n', "        if hasattr(object, '__path__'):\n", '            modpkgs = []\n', '            for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):\n', '                modpkgs.append((modname, name, ispkg, 0))\n', '            modpkgs.sort()\n', '            contents = self.multicolumn(modpkgs, self.modpkglink)\n', '            result = result + self.bigsection(\n', "                'Package Contents', '#ffffff', '#aa55cc', contents)\n", '        elif modules:\n', '            contents = self.multicolumn(\n', '                modules, lambda t: self.modulelink(t[1]))\n', '            result = result + self.bigsection(\n', "                'Modules', '#ffffff', '#aa55cc', contents)\n", '\n', '        if classes:\n', '            classlist = [value for (key, value) in classes]\n', '            contents = [\n', '                self.formattree(inspect.getclasstree(classlist, 1), name)]\n', '            for key, value in classes:\n', '                contents.append(self.document(value, key, name, fdict, cdict))\n', '            result = result + self.bigsection(\n', "                'Classes', '#ffffff', '#ee77aa', ' '.join(contents))\n", '        if funcs:\n', '            contents = []\n', '            for key, value in funcs:\n', '                contents.append(self.document(value, key, name, fdict, cdict))\n', '            result = result + self.bigsection(\n', "                'Functions', '#ffffff', '#eeaa77', ' '.join(contents))\n", '        if data:\n', '            contents = []\n', '            for key, value in data:\n', '                contents.append(self.document(value, key))\n', '            result = result + self.bigsection(\n', "                'Data', '#ffffff', '#55aa55', '<br>\\n'.join(contents))\n", "        if hasattr(object, '__author__'):\n", '            contents = self.markup(str(object.__author__), self.preformat)\n', '            result = result + self.bigsection(\n', "                'Author', '#ffffff', '#7799ee', contents)\n", "        if hasattr(object, '__credits__'):\n", '            contents = self.markup(str(object.__credits__), self.preformat)\n', '            result = result + self.bigsection(\n', "                'Credits', '#ffffff', '#7799ee', contents)\n", '\n', '        return result\n', '\n', '    def docclass(self, object, name=None, mod=None, funcs={}, classes={},\n', '                 *ignored):\n', '        """Produce HTML documentation for a class object."""\n', '        realname = object.__name__\n', '        name = name or realname\n', '        bases = object.__bases__\n', '\n', '        contents = []\n', '        push = contents.append\n', '\n', '        # Cute little class to pump out a horizontal rule between sections.\n', '        class HorizontalRule:\n', '            def __init__(self):\n', '                self.needone = 0\n', '            def maybe(self):\n', '                if self.needone:\n', "                    push('<hr>\\n')\n", '                self.needone = 1\n', '        hr = HorizontalRule()\n', '\n', '        # List the mro, if non-trivial.\n', '        mro = deque(inspect.getmro(object))\n', '        if len(mro) > 2:\n', '            hr.maybe()\n', "            push('<dl><dt>Method resolution order:</dt>\\n')\n", '            for base in mro:\n', "                push('<dd>%s</dd>\\n' % self.classlink(base,\n", '                                                      object.__module__))\n', "            push('</dl>\\n')\n", '\n', '        def spill(msg, attrs, predicate):\n', '            ok, attrs = _split_list(attrs, predicate)\n', '            if ok:\n', '                hr.maybe()\n', '                push(msg)\n', '                for name, kind, homecls, value in ok:\n', '                    try:\n', '                        value = getattr(object, name)\n', '                    except Exception:\n', '                        # Some descriptors may meet a failure in their __get__.\n', '                        # (bug #1785)\n', '                        push(self.docdata(value, name, mod))\n', '                    else:\n', '                        push(self.document(value, name, mod,\n', '                                        funcs, classes, mdict, object))\n', "                    push('\\n')\n", '            return attrs\n', '\n', '        def spilldescriptors(msg, attrs, predicate):\n', '            ok, attrs = _split_list(attrs, predicate)\n', '            if ok:\n', '                hr.maybe()\n', '                push(msg)\n', '                for name, kind, homecls, value in ok:\n', '                    push(self.docdata(value, name, mod))\n', '            return attrs\n', '\n', '        def spilldata(msg, attrs, predicate):\n', '            ok, attrs = _split_list(attrs, predicate)\n', '            if ok:\n', '                hr.maybe()\n', '                push(msg)\n', '                for name, kind, homecls, value in ok:\n', '                    base = self.docother(getattr(object, name), name, mod)\n', '                    doc = getdoc(value)\n', '                    if not doc:\n', "                        push('<dl><dt>%s</dl>\\n' % base)\n", '                    else:\n', '                        doc = self.markup(getdoc(value), self.preformat,\n', '                                          funcs, classes, mdict)\n', "                        doc = '<dd><tt>%s</tt>' % doc\n", "                        push('<dl><dt>%s%s</dl>\\n' % (base, doc))\n", "                    push('\\n')\n", '            return attrs\n', '\n', '        attrs = [(name, kind, cls, value)\n', '                 for name, kind, cls, value in classify_class_attrs(object)\n', '                 if visiblename(name, obj=object)]\n', '\n', '        mdict = {}\n', '        for key, kind, homecls, value in attrs:\n', "            mdict[key] = anchor = '#' + name + '-' + key\n", '            try:\n', '                value = getattr(object, name)\n', '            except Exception:\n', '                # Some descriptors may meet a failure in their __get__.\n', '                # (bug #1785)\n', '                pass\n', '            try:\n', '                # The value may not be hashable (e.g., a data attr with\n', '                # a dict or list value).\n', '                mdict[value] = anchor\n', '            except TypeError:\n', '                pass\n', '\n', '        while attrs:\n', '            if mro:\n', '                thisclass = mro.popleft()\n', '            else:\n', '                thisclass = attrs[0][2]\n', '            attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)\n', '\n', '            if object is not builtins.object and thisclass is builtins.object:\n', '                attrs = inherited\n', '                continue\n', '            elif thisclass is object:\n', "                tag = 'defined here'\n", '            else:\n', "                tag = 'inherited from %s' % self.classlink(thisclass,\n", '                                                           object.__module__)\n', "            tag += ':<br>\\n'\n", '\n', '            sort_attributes(attrs, object)\n', '\n', '            # Pump out the attrs, segregated by kind.\n', "            attrs = spill('Methods %s' % tag, attrs,\n", "                          lambda t: t[1] == 'method')\n", "            attrs = spill('Class methods %s' % tag, attrs,\n", "                          lambda t: t[1] == 'class method')\n", "            attrs = spill('Static methods %s' % tag, attrs,\n", "                          lambda t: t[1] == 'static method')\n", '            attrs = spilldescriptors("Readonly properties %s" % tag, attrs,\n', "                                     lambda t: t[1] == 'readonly property')\n", "            attrs = spilldescriptors('Data descriptors %s' % tag, attrs,\n", "                                     lambda t: t[1] == 'data descriptor')\n", "            attrs = spilldata('Data and other attributes %s' % tag, attrs,\n", "                              lambda t: t[1] == 'data')\n", '            assert attrs == []\n', '            attrs = inherited\n', '\n', "        contents = ''.join(contents)\n", '\n', '        if name == realname:\n', '            title = \'<a name="%s">class <strong>%s</strong></a>\' % (\n', '                name, realname)\n', '        else:\n', '            title = \'<strong>%s</strong> = <a name="%s">class %s</a>\' % (\n', '                name, name, realname)\n', '        if bases:\n', '            parents = []\n', '            for base in bases:\n', '                parents.append(self.classlink(base, object.__module__))\n', "            title = title + '(%s)' % ', '.join(parents)\n", '\n', "        decl = ''\n", '        try:\n', '            signature = inspect.signature(object)\n', '        except (ValueError, TypeError):\n', '            signature = None\n', '        if signature:\n', '            argspec = str(signature)\n', "            if argspec and argspec != '()':\n", "                decl = name + self.escape(argspec) + '\\n\\n'\n", '\n', '        doc = getdoc(object)\n', '        if decl:\n', "            doc = decl + (doc or '')\n", '        doc = self.markup(doc, self.preformat, funcs, classes, mdict)\n', "        doc = doc and '<tt>%s<br>&nbsp;</tt>' % doc\n", '\n', "        return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)\n", '\n', '    def formatvalue(self, object):\n', '        """Format an argument default value as text."""\n', "        return self.grey('=' + self.repr(object))\n", '\n', '    def docroutine(self, object, name=None, mod=None,\n', '                   funcs={}, classes={}, methods={}, cl=None):\n', '        """Produce HTML documentation for a function or method object."""\n', '        realname = object.__name__\n', '        name = name or realname\n', "        anchor = (cl and cl.__name__ or '') + '-' + name\n", "        note = ''\n", '        skipdocs = 0\n', '        if _is_bound_method(object):\n', '            imclass = object.__self__.__class__\n', '            if cl:\n', '                if imclass is not cl:\n', "                    note = ' from ' + self.classlink(imclass, mod)\n", '            else:\n', '                if object.__self__ is not None:\n', "                    note = ' method of %s instance' % self.classlink(\n", '                        object.__self__.__class__, mod)\n', '                else:\n', "                    note = ' unbound %s method' % self.classlink(imclass,mod)\n", '\n', '        if (inspect.iscoroutinefunction(object) or\n', '                inspect.isasyncgenfunction(object)):\n', "            asyncqualifier = 'async '\n", '        else:\n', "            asyncqualifier = ''\n", '\n', '        if name == realname:\n', '            title = \'<a name="%s"><strong>%s</strong></a>\' % (anchor, realname)\n', '        else:\n', '            if cl and inspect.getattr_static(cl, realname, []) is object:\n', '                reallink = \'<a href="#%s">%s</a>\' % (\n', "                    cl.__name__ + '-' + realname, realname)\n", '                skipdocs = 1\n', '            else:\n', '                reallink = realname\n', '            title = \'<a name="%s"><strong>%s</strong></a> = %s\' % (\n', '                anchor, name, reallink)\n', '        argspec = None\n', '        if inspect.isroutine(object):\n', '            try:\n', '                signature = inspect.signature(object)\n', '            except (ValueError, TypeError):\n', '                signature = None\n', '            if signature:\n', '                argspec = str(signature)\n', "                if realname == '<lambda>':\n", "                    title = '<strong>%s</strong> <em>lambda</em> ' % name\n", "                    # XXX lambda's won't usually have func_annotations['return']\n", "                    # since the syntax doesn't support but it is possible.\n", "                    # So removing parentheses isn't truly safe.\n", '                    argspec = argspec[1:-1] # remove parentheses\n', '        if not argspec:\n', "            argspec = '(...)'\n", '\n', '        decl = asyncqualifier + title + self.escape(argspec) + (note and\n', '               self.grey(\'<font face="helvetica, arial">%s</font>\' % note))\n', '\n', '        if skipdocs:\n', "            return '<dl><dt>%s</dt></dl>\\n' % decl\n", '        else:\n', '            doc = self.markup(\n', '                getdoc(object), self.preformat, funcs, classes, methods)\n', "            doc = doc and '<dd><tt>%s</tt></dd>' % doc\n", "            return '<dl><dt>%s</dt>%s</dl>\\n' % (decl, doc)\n", '\n', '    def docdata(self, object, name=None, mod=None, cl=None):\n', '        """Produce html documentation for a data descriptor."""\n', '        results = []\n', '        push = results.append\n', '\n', '        if name:\n', "            push('<dl><dt><strong>%s</strong></dt>\\n' % name)\n", '        doc = self.markup(getdoc(object), self.preformat)\n', '        if doc:\n', "            push('<dd><tt>%s</tt></dd>\\n' % doc)\n", "        push('</dl>\\n')\n", '\n', "        return ''.join(results)\n", '\n', '    docproperty = docdata\n', '\n', '    def docother(self, object, name=None, mod=None, *ignored):\n', '        """Produce HTML documentation for a data object."""\n', "        lhs = name and '<strong>%s</strong> = ' % name or ''\n", '        return lhs + self.repr(object)\n', '\n', '    def index(self, dir, shadowed=None):\n', '        """Generate an HTML index for a directory of modules."""\n', '        modpkgs = []\n', '        if shadowed is None: shadowed = {}\n', '        for importer, name, ispkg in pkgutil.iter_modules([dir]):\n', '            if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):\n', '                # ignore a module if its name contains a surrogate character\n', '                continue\n', "            modpkgs.append((name, '', ispkg, name in shadowed))\n", '            shadowed[name] = 1\n', '\n', '        modpkgs.sort()\n', '        contents = self.multicolumn(modpkgs, self.modpkglink)\n', "        return self.bigsection(dir, '#ffffff', '#ee77aa', contents)\n", '\n', '# -------------------------------------------- text documentation generator\n', '\n', 'class TextRepr(Repr):\n', '    """Class for safely making a text representation of a Python object."""\n', '    def __init__(self):\n', '        Repr.__init__(self)\n', '        self.maxlist = self.maxtuple = 20\n', '        self.maxdict = 10\n', '        self.maxstring = self.maxother = 100\n', '\n', '    def repr1(self, x, level):\n', "        if hasattr(type(x), '__name__'):\n", "            methodname = 'repr_' + '_'.join(type(x).__name__.split())\n", '            if hasattr(self, methodname):\n', '                return getattr(self, methodname)(x, level)\n', '        return cram(stripid(repr(x)), self.maxother)\n', '\n', '    def repr_string(self, x, level):\n', '        test = cram(x, self.maxstring)\n', '        testrepr = repr(test)\n', "        if '\\\\' in test and '\\\\' not in replace(testrepr, r'\\\\', ''):\n", '            # Backslashes are only literal in the string and are never\n', '            # needed to make any special characters, so show a raw string.\n', "            return 'r' + testrepr[0] + test + testrepr[0]\n", '        return testrepr\n', '\n', '    repr_str = repr_string\n', '\n', '    def repr_instance(self, x, level):\n', '        try:\n', '            return cram(stripid(repr(x)), self.maxstring)\n', '        except:\n', "            return '<%s instance>' % x.__class__.__name__\n", '\n', 'class TextDoc(Doc):\n', '    """Formatter class for text documentation."""\n', '\n', '    # ------------------------------------------- text formatting utilities\n', '\n', '    _repr_instance = TextRepr()\n', '    repr = _repr_instance.repr\n', '\n', '    def bold(self, text):\n', '        """Format a string in bold by overstriking."""\n', "        return ''.join(ch + '\\b' + ch for ch in text)\n", '\n', "    def indent(self, text, prefix='    '):\n", '        """Indent text by prepending a given prefix to each line."""\n', "        if not text: return ''\n", "        lines = [prefix + line for line in text.split('\\n')]\n", '        if lines: lines[-1] = lines[-1].rstrip()\n', "        return '\\n'.join(lines)\n", '\n', '    def section(self, title, contents):\n', '        """Format a section with a given heading."""\n', '        clean_contents = self.indent(contents).rstrip()\n', "        return self.bold(title) + '\\n' + clean_contents + '\\n\\n'\n", '\n', '    # ---------------------------------------------- type-specific routines\n', '\n', "    def formattree(self, tree, modname, parent=None, prefix=''):\n", '        """Render in text a class tree as returned by inspect.getclasstree()."""\n', "        result = ''\n", '        for entry in tree:\n', '            if type(entry) is type(()):\n', '                c, bases = entry\n', '                result = result + prefix + classname(c, modname)\n', '                if bases and bases != (parent,):\n', '                    parents = (classname(c, modname) for c in bases)\n', "                    result = result + '(%s)' % ', '.join(parents)\n", "                result = result + '\\n'\n", '            elif type(entry) is type([]):\n', '                result = result + self.formattree(\n', "                    entry, modname, c, prefix + '    ')\n", '        return result\n', '\n', '    def docmodule(self, object, name=None, mod=None):\n', '        """Produce text documentation for a given module object."""\n', '        name = object.__name__ # ignore the passed-in name\n', '        synop, desc = splitdoc(getdoc(object))\n', "        result = self.section('NAME', name + (synop and ' - ' + synop))\n", "        all = getattr(object, '__all__', None)\n", '        docloc = self.getdocloc(object)\n', '        if docloc is not None:\n', '            result = result + self.section(\'MODULE REFERENCE\', docloc + """\n', '\n', 'The following documentation is automatically generated from the Python\n', 'source files.  It may be incomplete, incorrect or include features that\n', 'are considered implementation detail and may vary between Python\n', 'implementations.  When in doubt, consult the module reference at the\n', 'location listed above.\n', '""")\n', '\n', '        if desc:\n', "            result = result + self.section('DESCRIPTION', desc)\n", '\n', '        classes = []\n', '        for key, value in inspect.getmembers(object, inspect.isclass):\n', '            # if __all__ exists, believe it.  Otherwise use old heuristic.\n', '            if (all is not None\n', '                or (inspect.getmodule(value) or object) is object):\n', '                if visiblename(key, all, object):\n', '                    classes.append((key, value))\n', '        funcs = []\n', '        for key, value in inspect.getmembers(object, inspect.isroutine):\n', '            # if __all__ exists, believe it.  Otherwise use old heuristic.\n', '            if (all is not None or\n', '                inspect.isbuiltin(value) or inspect.getmodule(value) is object):\n', '                if visiblename(key, all, object):\n', '                    funcs.append((key, value))\n', '        data = []\n', '        for key, value in inspect.getmembers(object, isdata):\n', '            if visiblename(key, all, object):\n', '                data.append((key, value))\n', '\n', '        modpkgs = []\n', '        modpkgs_names = set()\n', "        if hasattr(object, '__path__'):\n", '            for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):\n', '                modpkgs_names.add(modname)\n', '                if ispkg:\n', "                    modpkgs.append(modname + ' (package)')\n", '                else:\n', '                    modpkgs.append(modname)\n', '\n', '            modpkgs.sort()\n', '            result = result + self.section(\n', "                'PACKAGE CONTENTS', '\\n'.join(modpkgs))\n", '\n', '        # Detect submodules as sometimes created by C extensions\n', '        submodules = []\n', '        for key, value in inspect.getmembers(object, inspect.ismodule):\n', "            if value.__name__.startswith(name + '.') and key not in modpkgs_names:\n", '                submodules.append(key)\n', '        if submodules:\n', '            submodules.sort()\n', '            result = result + self.section(\n', "                'SUBMODULES', '\\n'.join(submodules))\n", '\n', '        if classes:\n', '            classlist = [value for key, value in classes]\n', '            contents = [self.formattree(\n', '                inspect.getclasstree(classlist, 1), name)]\n', '            for key, value in classes:\n', '                contents.append(self.document(value, key, name))\n', "            result = result + self.section('CLASSES', '\\n'.join(contents))\n", '\n', '        if funcs:\n', '            contents = []\n', '            for key, value in funcs:\n', '                contents.append(self.document(value, key, name))\n', "            result = result + self.section('FUNCTIONS', '\\n'.join(contents))\n", '\n', '        if data:\n', '            contents = []\n', '            for key, value in data:\n', '                contents.append(self.docother(value, key, name, maxlen=70))\n', "            result = result + self.section('DATA', '\\n'.join(contents))\n", '\n', "        if hasattr(object, '__version__'):\n", '            version = str(object.__version__)\n', "            if version[:11] == '


 + 'Revision: ' and version[-1:] == '


:\n", '                version = version[11:-1].strip()\n', "            result = result + self.section('VERSION', version)\n", "        if hasattr(object, '__date__'):\n", "            result = result + self.section('DATE', str(object.__date__))\n", "        if hasattr(object, '__author__'):\n", "            result = result + self.section('AUTHOR', str(object.__author__))\n", "        if hasattr(object, '__credits__'):\n", "            result = result + self.section('CREDITS', str(object.__credits__))\n", '        try:\n', '            file = inspect.getabsfile(object)\n', '        except TypeError:\n', "            file = '(built-in)'\n", "        result = result + self.section('FILE', file)\n", '        return result\n', '\n', '    def docclass(self, object, name=None, mod=None, *ignored):\n', '        """Produce text documentation for a given class object."""\n', '        realname = object.__name__\n', '        name = name or realname\n', '        bases = object.__bases__\n', '\n', '        def makename(c, m=object.__module__):\n', '            return classname(c, m)\n', '\n', '        if name == realname:\n', "            title = 'class ' + self.bold(realname)\n", '        else:\n', "            title = self.bold(name) + ' = class ' + realname\n", '        if bases:\n', '            parents = map(makename, bases)\n', "            title = title + '(%s)' % ', '.join(parents)\n", '\n', '        contents = []\n', '        push = contents.append\n', '\n', '        try:\n', '            signature = inspect.signature(object)\n', '        except (ValueError, TypeError):\n', '            signature = None\n', '        if signature:\n', '            argspec = str(signature)\n', "            if argspec and argspec != '()':\n", "                push(name + argspec + '\\n')\n", '\n', '        doc = getdoc(object)\n', '        if doc:\n', "            push(doc + '\\n')\n", '\n', '        # List the mro, if non-trivial.\n', '        mro = deque(inspect.getmro(object))\n', '        if len(mro) > 2:\n', '            push("Method resolution order:")\n', '            for base in mro:\n', "                push('    ' + makename(base))\n", "            push('')\n", '\n', '        # List the built-in subclasses, if any:\n', '        subclasses = sorted(\n', '            (str(cls.__name__) for cls in type.__subclasses__(object)\n', '             if not cls.__name__.startswith("_") and cls.__module__ == "builtins"),\n', '            key=str.lower\n', '        )\n', '        no_of_subclasses = len(subclasses)\n', '        MAX_SUBCLASSES_TO_DISPLAY = 4\n', '        if subclasses:\n', '            push("Built-in subclasses:")\n', '            for subclassname in subclasses[:MAX_SUBCLASSES_TO_DISPLAY]:\n', "                push('    ' + subclassname)\n", '            if no_of_subclasses > MAX_SUBCLASSES_TO_DISPLAY:\n', "                push('    ... and ' +\n", '                     str(no_of_subclasses - MAX_SUBCLASSES_TO_DISPLAY) +\n', "                     ' other subclasses')\n", "            push('')\n", '\n', '        # Cute little class to pump out a horizontal rule between sections.\n', '        class HorizontalRule:\n', '            def __init__(self):\n', '                self.needone = 0\n', '            def maybe(self):\n', '                if self.needone:\n', "                    push('-' * 70)\n", '                self.needone = 1\n', '        hr = HorizontalRule()\n', '\n', '        def spill(msg, attrs, predicate):\n', '            ok, attrs = _split_list(attrs, predicate)\n', '            if ok:\n', '                hr.maybe()\n', '                push(msg)\n', '                for name, kind, homecls, value in ok:\n', '                    try:\n', '                        value = getattr(object, name)\n', '                    except Exception:\n', '                        # Some descriptors may meet a failure in their __get__.\n', '                        # (bug #1785)\n', '                        push(self.docdata(value, name, mod))\n', '                    else:\n', '                        push(self.document(value,\n', '                                        name, mod, object))\n', '            return attrs\n', '\n', '        def spilldescriptors(msg, attrs, predicate):\n', '            ok, attrs = _split_list(attrs, predicate)\n', '            if ok:\n', '                hr.maybe()\n', '                push(msg)\n', '                for name, kind, homecls, value in ok:\n', '                    push(self.docdata(value, name, mod))\n', '            return attrs\n', '\n', '        def spilldata(msg, attrs, predicate):\n', '            ok, attrs = _split_list(attrs, predicate)\n', '            if ok:\n', '                hr.maybe()\n', '                push(msg)\n', '                for name, kind, homecls, value in ok:\n', '                    doc = getdoc(value)\n', '                    try:\n', '                        obj = getattr(object, name)\n', '                    except AttributeError:\n', '                        obj = homecls.__dict__[name]\n', '                    push(self.docother(obj, name, mod, maxlen=70, doc=doc) +\n', "                         '\\n')\n", '            return attrs\n', '\n', '        attrs = [(name, kind, cls, value)\n', '                 for name, kind, cls, value in classify_class_attrs(object)\n', '                 if visiblename(name, obj=object)]\n', '\n', '        while attrs:\n', '            if mro:\n', '                thisclass = mro.popleft()\n', '            else:\n', '                thisclass = attrs[0][2]\n', '            attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)\n', '\n', '            if object is not builtins.object and thisclass is builtins.object:\n', '                attrs = inherited\n', '                continue\n', '            elif thisclass is object:\n', '                tag = "defined here"\n', '            else:\n', '                tag = "inherited from %s" % classname(thisclass,\n', '                                                      object.__module__)\n', '\n', '            sort_attributes(attrs, object)\n', '\n', '            # Pump out the attrs, segregated by kind.\n', '            attrs = spill("Methods %s:\\n" % tag, attrs,\n', "                          lambda t: t[1] == 'method')\n", '            attrs = spill("Class methods %s:\\n" % tag, attrs,\n', "                          lambda t: t[1] == 'class method')\n", '            attrs = spill("Static methods %s:\\n" % tag, attrs,\n', "                          lambda t: t[1] == 'static method')\n", '            attrs = spilldescriptors("Readonly properties %s:\\n" % tag, attrs,\n', "                                     lambda t: t[1] == 'readonly property')\n", '            attrs = spilldescriptors("Data descriptors %s:\\n" % tag, attrs,\n', "                                     lambda t: t[1] == 'data descriptor')\n", '            attrs = spilldata("Data and other attributes %s:\\n" % tag, attrs,\n', "                              lambda t: t[1] == 'data')\n", '\n', '            assert attrs == []\n', '            attrs = inherited\n', '\n', "        contents = '\\n'.join(contents)\n", '        if not contents:\n', "            return title + '\\n'\n", "        return title + '\\n' + self.indent(contents.rstrip(), ' |  ') + '\\n'\n", '\n', '    def formatvalue(self, object):\n', '        """Format an argument default value as text."""\n', "        return '=' + self.repr(object)\n", '\n', '    def docroutine(self, object, name=None, mod=None, cl=None):\n', '        """Produce text documentation for a function or method object."""\n', '        realname = object.__name__\n', '        name = name or realname\n', "        note = ''\n", '        skipdocs = 0\n', '        if _is_bound_method(object):\n', '            imclass = object.__self__.__class__\n', '            if cl:\n', '                if imclass is not cl:\n', "                    note = ' from ' + classname(imclass, mod)\n", '            else:\n', '                if object.__self__ is not None:\n', "                    note = ' method of %s instance' % classname(\n", '                        object.__self__.__class__, mod)\n', '                else:\n', "                    note = ' unbound %s method' % classname(imclass,mod)\n", '\n', '        if (inspect.iscoroutinefunction(object) or\n', '                inspect.isasyncgenfunction(object)):\n', "            asyncqualifier = 'async '\n", '        else:\n', "            asyncqualifier = ''\n", '\n', '        if name == realname:\n', '            title = self.bold(realname)\n', '        else:\n', '            if cl and inspect.getattr_static(cl, realname, []) is object:\n', '                skipdocs = 1\n', "            title = self.bold(name) + ' = ' + realname\n", '        argspec = None\n', '\n', '        if inspect.isroutine(object):\n', '            try:\n', '                signature = inspect.signature(object)\n', '            except (ValueError, TypeError):\n', '                signature = None\n', '            if signature:\n', '                argspec = str(signature)\n', "                if realname == '<lambda>':\n", "                    title = self.bold(name) + ' lambda '\n", "                    # XXX lambda's won't usually have func_annotations['return']\n", "                    # since the syntax doesn't support but it is possible.\n", "                    # So removing parentheses isn't truly safe.\n", '                    argspec = argspec[1:-1] # remove parentheses\n', '        if not argspec:\n', "            argspec = '(...)'\n", '        decl = asyncqualifier + title + argspec + note\n', '\n', '        if skipdocs:\n', "            return decl + '\\n'\n", '        else:\n', "            doc = getdoc(object) or ''\n", "            return decl + '\\n' + (doc and self.indent(doc).rstrip() + '\\n')\n", '\n', '    def docdata(self, object, name=None, mod=None, cl=None):\n', '        """Produce text documentation for a data descriptor."""\n', '        results = []\n', '        push = results.append\n', '\n', '        if name:\n', '            push(self.bold(name))\n', "            push('\\n')\n", "        doc = getdoc(object) or ''\n", '        if doc:\n', '            push(self.indent(doc))\n', "            push('\\n')\n", "        return ''.join(results)\n", '\n', '    docproperty = docdata\n', '\n', '    def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):\n', '        """Produce text documentation for a data object."""\n', '        repr = self.repr(object)\n', '        if maxlen:\n', "            line = (name and name + ' = ' or '') + repr\n", '            chop = maxlen - len(line)\n', "            if chop < 0: repr = repr[:chop] + '...'\n", "        line = (name and self.bold(name) + ' = ' or '') + repr\n", '        if not doc:\n', '            doc = getdoc(object)\n', '        if doc:\n', "            line += '\\n' + self.indent(str(doc)) + '\\n'\n", '        return line\n', '\n', 'class _PlainTextDoc(TextDoc):\n', '    """Subclass of TextDoc which overrides string styling"""\n', '    def bold(self, text):\n', '        return text\n', '\n', '# --------------------------------------------------------- user interfaces\n', '\n', 'def pager(text):\n', '    """The first time this is called, determine what kind of pager to use."""\n', '    global pager\n', '    pager = getpager()\n', '    pager(text)\n', '\n', 'def getpager():\n', '    """Decide what method to use for paging through text."""\n', '    if not hasattr(sys.stdin, "isatty"):\n', '        return plainpager\n', '    if not hasattr(sys.stdout, "isatty"):\n', '        return plainpager\n', '    if not sys.stdin.isatty() or not sys.stdout.isatty():\n', '        return plainpager\n', "    use_pager = os.environ.get('MANPAGER') or os.environ.get('PAGER')\n", '    if use_pager:\n', "        if sys.platform == 'win32': # pipes completely broken in Windows\n", '            return lambda text: tempfilepager(plain(text), use_pager)\n', "        elif os.environ.get('TERM') in ('dumb', 'emacs'):\n", '            return lambda text: pipepager(plain(text), use_pager)\n', '        else:\n', '            return lambda text: pipepager(text, use_pager)\n', "    if os.environ.get('TERM') in ('dumb', 'emacs'):\n", '        return plainpager\n', "    if sys.platform == 'win32':\n", "        return lambda text: tempfilepager(plain(text), 'more <')\n", "    if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:\n", "        return lambda text: pipepager(text, 'less')\n", '\n', '    import tempfile\n', '    (fd, filename) = tempfile.mkstemp()\n', '    os.close(fd)\n', '    try:\n', '        if hasattr(os, \'system\') and os.system(\'more "%s"\' % filename) == 0:\n', "            return lambda text: pipepager(text, 'more')\n", '        else:\n', '            return ttypager\n', '    finally:\n', '        os.unlink(filename)\n', '\n', 'def plain(text):\n', '    """Remove boldface formatting from text."""\n', "    return re.sub('.\\b', '', text)\n", '\n', 'def pipepager(text, cmd):\n', '    """Page through text by feeding it to another program."""\n', '    import subprocess\n', '    proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)\n', '    try:\n', "        with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:\n", '            try:\n', '                pipe.write(text)\n', '            except KeyboardInterrupt:\n', "                # We've hereby abandoned whatever text hasn't been written,\n", '                # but the pager is still in control of the terminal.\n', '                pass\n', '    except OSError:\n', '        pass # Ignore broken pipes caused by quitting the pager program.\n', '    while True:\n', '        try:\n', '            proc.wait()\n', '            break\n', '        except KeyboardInterrupt:\n', '            # Ignore ctl-c like the pager itself does.  Otherwise the pager is\n', '            # left running and the terminal is in raw mode and unusable.\n', '            pass\n', '\n', 'def tempfilepager(text, cmd):\n', '    """Page through text by invoking a program on a temporary file."""\n', '    import tempfile\n', '    filename = tempfile.mktemp()\n', "    with open(filename, 'w', errors='backslashreplace') as file:\n", '        file.write(text)\n', '    try:\n', '        os.system(cmd + \' "\' + filename + \'"\')\n', '    finally:\n', '        os.unlink(filename)\n', '\n', 'def _escape_stdout(text):\n', '    # Escape non-encodable characters to avoid encoding errors later\n', "    encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8'\n", "    return text.encode(encoding, 'backslashreplace').decode(encoding)\n", '\n', 'def ttypager(text):\n', '    """Page through text on a text terminal."""\n', "    lines = plain(_escape_stdout(text)).split('\\n')\n", '    try:\n', '        import tty\n', '        fd = sys.stdin.fileno()\n', '        old = tty.tcgetattr(fd)\n', '        tty.setcbreak(fd)\n', '        getchar = lambda: sys.stdin.read(1)\n', '    except (ImportError, AttributeError, io.UnsupportedOperation):\n', '        tty = None\n', '        getchar = lambda: sys.stdin.readline()[:-1][:1]\n', '\n', '    try:\n', '        try:\n', "            h = int(os.environ.get('LINES', 0))\n", '        except ValueError:\n', '            h = 0\n', '        if h <= 1:\n', '            h = 25\n', '        r = inc = h - 1\n', "        sys.stdout.write('\\n'.join(lines[:inc]) + '\\n')\n", '        while lines[r:]:\n', "            sys.stdout.write('-- more --')\n", '            sys.stdout.flush()\n', '            c = getchar()\n', '\n', "            if c in ('q', 'Q'):\n", "                sys.stdout.write('\\r          \\r')\n", '                break\n', "            elif c in ('\\r', '\\n'):\n", "                sys.stdout.write('\\r          \\r' + lines[r] + '\\n')\n", '                r = r + 1\n', '                continue\n', "            if c in ('b', 'B', '\\x1b'):\n", '                r = r - inc - inc\n', '                if r < 0: r = 0\n', "            sys.stdout.write('\\n' + '\\n'.join(lines[r:r+inc]) + '\\n')\n", '            r = r + inc\n', '\n', '    finally:\n', '        if tty:\n', '            tty.tcsetattr(fd, tty.TCSAFLUSH, old)\n', '\n', 'def plainpager(text):\n', '    """Simply print unformatted text.  This is the ultimate fallback."""\n', '    sys.stdout.write(plain(_escape_stdout(text)))\n', '\n', 'def describe(thing):\n', '    """Produce a short description of the given thing."""\n', '    if inspect.ismodule(thing):\n', '        if thing.__name__ in sys.builtin_module_names:\n', "            return 'built-in module ' + thing.__name__\n", "        if hasattr(thing, '__path__'):\n", "            return 'package ' + thing.__name__\n", '        else:\n', "            return 'module ' + thing.__name__\n", '    if inspect.isbuiltin(thing):\n', "        return 'built-in function ' + thing.__name__\n", '    if inspect.isgetsetdescriptor(thing):\n', "        return 'getset descriptor %s.%s.%s' % (\n", '            thing.__objclass__.__module__, thing.__objclass__.__name__,\n', '            thing.__name__)\n', '    if inspect.ismemberdescriptor(thing):\n', "        return 'member descriptor %s.%s.%s' % (\n", '            thing.__objclass__.__module__, thing.__objclass__.__name__,\n', '            thing.__name__)\n', '    if inspect.isclass(thing):\n', "        return 'class ' + thing.__name__\n", '    if inspect.isfunction(thing):\n', "        return 'function ' + thing.__name__\n", '    if inspect.ismethod(thing):\n', "        return 'method ' + thing.__name__\n", '    return type(thing).__name__\n', '\n', 'def locate(path, forceload=0):\n', '    """Locate an object by name or dotted path, importing as necessary."""\n', "    parts = [part for part in path.split('.') if part]\n", '    module, n = None, 0\n', '    while n < len(parts):\n', "        nextmodule = safeimport('.'.join(parts[:n+1]), forceload)\n", '        if nextmodule: module, n = nextmodule, n + 1\n', '        else: break\n', '    if module:\n', '        object = module\n', '    else:\n', '        object = builtins\n', '    for part in parts[n:]:\n', '        try:\n', '            object = getattr(object, part)\n', '        except AttributeError:\n', '            return None\n', '    return object\n', '\n', '# --------------------------------------- interactive interpreter interface\n', '\n', 'text = TextDoc()\n', 'plaintext = _PlainTextDoc()\n', 'html = HTMLDoc()\n', '\n', 'def resolve(thing, forceload=0):\n', '    """Given an object or a path to an object, get the object and its name."""\n', '    if isinstance(thing, str):\n', '        object = locate(thing, forceload)\n', '        if object is None:\n', "            raise ImportError('''\\\n", 'No Python documentation found for %r.\n', 'Use help() to get the interactive help utility.\n', "Use help(str) for help on the str class.''' % thing)\n", '        return object, thing\n', '    else:\n', "        name = getattr(thing, '__name__', None)\n", '        return thing, name if isinstance(name, str) else None\n', '\n', "def render_doc(thing, title='Python Library Documentation: %s', forceload=0,\n", '        renderer=None):\n', '    """Render text documentation, given an object or a path to an object."""\n', '    if renderer is None:\n', '        renderer = text\n', '    object, name = resolve(thing, forceload)\n', '    desc = describe(object)\n', '    module = inspect.getmodule(object)\n', "    if name and '.' in name:\n", "        desc += ' in ' + name[:name.rfind('.')]\n", '    elif module and module is not object:\n', "        desc += ' in module ' + module.__name__\n", '\n', '    if not (inspect.ismodule(object) or\n', '              inspect.isclass(object) or\n', '              inspect.isroutine(object) or\n', '              inspect.isdatadescriptor(object) or\n', '              _getdoc(object)):\n', '        # If the passed object is a piece of data or an instance,\n', '        # document its available methods instead of its value.\n', "        if hasattr(object, '__origin__'):\n", '            object = object.__origin__\n', '        else:\n', '            object = type(object)\n', "            desc += ' object'\n", "    return title % desc + '\\n\\n' + renderer.document(object, name)\n", '\n', "def doc(thing, title='Python Library Documentation: %s', forceload=0,\n", '        output=None):\n', '    """Display text documentation, given an object or a path to an object."""\n', '    try:\n', '        if output is None:\n', '            pager(render_doc(thing, title, forceload))\n', '        else:\n', '            output.write(render_doc(thing, title, forceload, plaintext))\n', '    except (ImportError, ErrorDuringImport) as value:\n', '        print(value)\n', '\n', 'def writedoc(thing, forceload=0):\n', '    """Write HTML documentation to a file in the current directory."""\n', '    try:\n', '        object, name = resolve(thing, forceload)\n', '        page = html.page(describe(object), html.document(object, name))\n', "        with open(name + '.html', 'w', encoding='utf-8') as file:\n", '            file.write(page)\n', "        print('wrote', name + '.html')\n", '    except (ImportError, ErrorDuringImport) as value:\n', '        print(value)\n', '\n', "def writedocs(dir, pkgpath='', done=None):\n", '    """Write out HTML documentation for all modules in a directory tree."""\n', '    if done is None: done = {}\n', '    for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):\n', '        writedoc(modname)\n', '    return\n', '\n', 'class Helper:\n', '\n', '    # These dictionaries map a topic name to either an alias, or a tuple\n', '    # (label, seealso-items).  The "label" is the label of the corresponding\n', '    # section in the .rst file under Doc/ and an index into the dictionary\n', '    # in pydoc_data/topics.py.\n', '    #\n', '    # CAUTION: if you change one of these dictionaries, be sure to adapt the\n', '    #          list of needed labels in Doc/tools/extensions/pyspecific.py and\n', '    #          regenerate the pydoc_data/topics.py file by running\n', '    #              make pydoc-topics\n', '    #          in Doc/ and copying the output file into the Lib/ directory.\n', '\n', '    keywords = {\n', "        'False': '',\n", "        'None': '',\n", "        'True': '',\n", "        '__peg_parser__': '',\n", "        'and': 'BOOLEAN',\n", "        'as': 'with',\n", "        'assert': ('assert', ''),\n", "        'async': ('async', ''),\n", "        'await': ('await', ''),\n", "        'break': ('break', 'while for'),\n", "        'class': ('class', 'CLASSES SPECIALMETHODS'),\n", "        'continue': ('continue', 'while for'),\n", "        'def': ('function', ''),\n", "        'del': ('del', 'BASICMETHODS'),\n", "        'elif': 'if',\n", "        'else': ('else', 'while for'),\n", "        'except': 'try',\n", "        'finally': 'try',\n", "        'for': ('for', 'break continue while'),\n", "        'from': 'import',\n", "        'global': ('global', 'nonlocal NAMESPACES'),\n", "        'if': ('if', 'TRUTHVALUE'),\n", "        'import': ('import', 'MODULES'),\n", "        'in': ('in', 'SEQUENCEMETHODS'),\n", "        'is': 'COMPARISON',\n", "        'lambda': ('lambda', 'FUNCTIONS'),\n", "        'nonlocal': ('nonlocal', 'global NAMESPACES'),\n", "        'not': 'BOOLEAN',\n", "        'or': 'BOOLEAN',\n", "        'pass': ('pass', ''),\n", "        'raise': ('raise', 'EXCEPTIONS'),\n", "        'return': ('return', 'FUNCTIONS'),\n", "        'try': ('try', 'EXCEPTIONS'),\n", "        'while': ('while', 'break continue if TRUTHVALUE'),\n", "        'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),\n", "        'yield': ('yield', ''),\n", '    }\n', '    # Either add symbols to this dictionary or to the symbols dictionary\n', '    # directly: Whichever is easier. They are merged later.\n', '    _strprefixes = [p + q for p in (\'b\', \'f\', \'r\', \'u\') for q in ("\'", \'"\')]\n', '    _symbols_inverse = {\n', '        \'STRINGS\' : ("\'", "\'\'\'", \'"\', \'"""\', *_strprefixes),\n', "        'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',\n", "                       '|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),\n", "        'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),\n", "        'UNARY' : ('-', '~'),\n", "        'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',\n", "                                '^=', '<<=', '>>=', '**=', '//='),\n", "        'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),\n", "        'COMPLEX' : ('j', 'J')\n", '    }\n', '    symbols = {\n', "        '%': 'OPERATORS FORMATTING',\n", "        '**': 'POWER',\n", "        ',': 'TUPLES LISTS FUNCTIONS',\n", "        '.': 'ATTRIBUTES FLOAT MODULES OBJECTS',\n", "        '...': 'ELLIPSIS',\n", "        ':': 'SLICINGS DICTIONARYLITERALS',\n", "        '@': 'def class',\n", "        '\\\\': 'STRINGS',\n", "        '_': 'PRIVATENAMES',\n", "        '__': 'PRIVATENAMES SPECIALMETHODS',\n", "        '`': 'BACKQUOTES',\n", "        '(': 'TUPLES FUNCTIONS CALLS',\n", "        ')': 'TUPLES FUNCTIONS CALLS',\n", "        '[': 'LISTS SUBSCRIPTS SLICINGS',\n", "        ']': 'LISTS SUBSCRIPTS SLICINGS'\n", '    }\n', '    for topic, symbols_ in _symbols_inverse.items():\n', '        for symbol in symbols_:\n', '            topics = symbols.get(symbol, topic)\n', '            if topic not in topics:\n', "                topics = topics + ' ' + topic\n", '            symbols[symbol] = topics\n', '\n', '    topics = {\n', "        'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '\n", "                  'FUNCTIONS CLASSES MODULES FILES inspect'),\n", "        'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '\n", "                    'FORMATTING TYPES'),\n", "        'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),\n", "        'FORMATTING': ('formatstrings', 'OPERATORS'),\n", "        'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '\n", "                    'FORMATTING TYPES'),\n", "        'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),\n", "        'INTEGER': ('integers', 'int range'),\n", "        'FLOAT': ('floating', 'float math'),\n", "        'COMPLEX': ('imaginary', 'complex cmath'),\n", "        'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),\n", "        'MAPPINGS': 'DICTIONARIES',\n", "        'FUNCTIONS': ('typesfunctions', 'def TYPES'),\n", "        'METHODS': ('typesmethods', 'class def CLASSES TYPES'),\n", "        'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),\n", "        'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),\n", "        'FRAMEOBJECTS': 'TYPES',\n", "        'TRACEBACKS': 'TYPES',\n", "        'NONE': ('bltin-null-object', ''),\n", "        'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),\n", "        'SPECIALATTRIBUTES': ('specialattrs', ''),\n", "        'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),\n", "        'MODULES': ('typesmodules', 'import'),\n", "        'PACKAGES': 'import',\n", "        'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '\n", "                        'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '\n", "                        'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '\n", "                        'LISTS DICTIONARIES'),\n", "        'OPERATORS': 'EXPRESSIONS',\n", "        'PRECEDENCE': 'EXPRESSIONS',\n", "        'OBJECTS': ('objects', 'TYPES'),\n", "        'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '\n", "                           'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '\n", "                           'NUMBERMETHODS CLASSES'),\n", "        'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),\n", "        'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),\n", "        'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),\n", "        'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '\n", "                             'SPECIALMETHODS'),\n", "        'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),\n", "        'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '\n", "                          'SPECIALMETHODS'),\n", "        'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),\n", "        'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),\n", "        'DYNAMICFEATURES': ('dynamic-features', ''),\n", "        'SCOPING': 'NAMESPACES',\n", "        'FRAMES': 'NAMESPACES',\n", "        'EXCEPTIONS': ('exceptions', 'try except finally raise'),\n", "        'CONVERSIONS': ('conversions', ''),\n", "        'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),\n", "        'SPECIALIDENTIFIERS': ('id-classes', ''),\n", "        'PRIVATENAMES': ('atom-identifiers', ''),\n", "        'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '\n", "                     'LISTLITERALS DICTIONARYLITERALS'),\n", "        'TUPLES': 'SEQUENCES',\n", "        'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),\n", "        'LISTS': ('typesseq-mutable', 'LISTLITERALS'),\n", "        'LISTLITERALS': ('lists', 'LISTS LITERALS'),\n", "        'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),\n", "        'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),\n", "        'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),\n", "        'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),\n", "        'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),\n", "        'CALLS': ('calls', 'EXPRESSIONS'),\n", "        'POWER': ('power', 'EXPRESSIONS'),\n", "        'UNARY': ('unary', 'EXPRESSIONS'),\n", "        'BINARY': ('binary', 'EXPRESSIONS'),\n", "        'SHIFTING': ('shifting', 'EXPRESSIONS'),\n", "        'BITWISE': ('bitwise', 'EXPRESSIONS'),\n", "        'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),\n", "        'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),\n", "        'ASSERTION': 'assert',\n", "        'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),\n", "        'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),\n", "        'DELETION': 'del',\n", "        'RETURNING': 'return',\n", "        'IMPORTING': 'import',\n", "        'CONDITIONAL': 'if',\n", "        'LOOPING': ('compound', 'for while break continue'),\n", "        'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),\n", "        'DEBUGGING': ('debugger', 'pdb'),\n", "        'CONTEXTMANAGERS': ('context-managers', 'with'),\n", '    }\n', '\n', '    def __init__(self, input=None, output=None):\n', '        self._input = input\n', '        self._output = output\n', '\n', '    @property\n', '    def input(self):\n', '        return self._input or sys.stdin\n', '\n', '    @property\n', '    def output(self):\n', '        return self._output or sys.stdout\n', '\n', '    def __repr__(self):\n', "        if inspect.stack()[1][3] == '?':\n", '            self()\n', "            return ''\n", "        return '<%s.%s instance>' % (self.__class__.__module__,\n", '                                     self.__class__.__qualname__)\n', '\n', '    _GoInteractive = object()\n', '    def __call__(self, request=_GoInteractive):\n', '        if request is not self._GoInteractive:\n', '            self.help(request)\n', '        else:\n', '            self.intro()\n', '            self.interact()\n', "            self.output.write('''\n", 'You are now leaving help and returning to the Python interpreter.\n', 'If you want to ask for help on a particular object directly from the\n', 'interpreter, you can type "help(object)".  Executing "help(\'string\')"\n', 'has the same effect as typing a particular string at the help> prompt.\n', "''')\n", '\n', '    def interact(self):\n', "        self.output.write('\\n')\n", '        while True:\n', '            try:\n', "                request = self.getline('help> ')\n", '                if not request: break\n', '            except (KeyboardInterrupt, EOFError):\n', '                break\n', '            request = request.strip()\n', '\n', "            # Make sure significant trailing quoting marks of literals don't\n", '            # get deleted while cleaning input\n', '            if (len(request) > 2 and request[0] == request[-1] in ("\'", \'"\')\n', '                    and request[0] not in request[1:-1]):\n', '                request = request[1:-1]\n', "            if request.lower() in ('q', 'quit'): break\n", "            if request == 'help':\n", '                self.intro()\n', '            else:\n', '                self.help(request)\n', '\n', '    def getline(self, prompt):\n', '        """Read one line, using input() when appropriate."""\n', '        if self.input is sys.stdin:\n', '            return input(prompt)\n', '        else:\n', '            self.output.write(prompt)\n', '            self.output.flush()\n', '            return self.input.readline()\n', '\n', '    def help(self, request):\n', "        if type(request) is type(''):\n", '            request = request.strip()\n', "            if request == 'keywords': self.listkeywords()\n", "            elif request == 'symbols': self.listsymbols()\n", "            elif request == 'topics': self.listtopics()\n", "            elif request == 'modules': self.listmodules()\n", "            elif request[:8] == 'modules ':\n", '                self.listmodules(request.split()[1])\n', '            elif request in self.symbols: self.showsymbol(request)\n', "            elif request in ['True', 'False', 'None']:\n", '                # special case these keywords since they are objects too\n', "                doc(eval(request), 'Help on %s:')\n", '            elif request in self.keywords: self.showtopic(request)\n', '            elif request in self.topics: self.showtopic(request)\n', "            elif request: doc(request, 'Help on %s:', output=self._output)\n", "            else: doc(str, 'Help on %s:', output=self._output)\n", '        elif isinstance(request, Helper): self()\n', "        else: doc(request, 'Help on %s:', output=self._output)\n", "        self.output.write('\\n')\n", '\n', '    def intro(self):\n', "        self.output.write('''\n", "Welcome to Python {0}'s help utility!\n", '\n', 'If this is your first time using Python, you should definitely check out\n', 'the tutorial on the Internet at https://docs.python.org/{0}/tutorial/.\n', '\n', 'Enter the name of any module, keyword, or topic to get help on writing\n', 'Python programs and using Python modules.  To quit this help utility and\n', 'return to the interpreter, just type "quit".\n', '\n', 'To get a list of available modules, keywords, symbols, or topics, type\n', '"modules", "keywords", "symbols", or "topics".  Each module also comes\n', 'with a one-line summary of what it does; to list the modules whose name\n', 'or summary contain a given string such as "spam", type "modules spam".\n', "'''.format('%d.%d' % sys.version_info[:2]))\n", '\n', '    def list(self, items, columns=4, width=80):\n', '        items = list(sorted(items))\n', '        colw = width // columns\n', '        rows = (len(items) + columns - 1) // columns\n', '        for row in range(rows):\n', '            for col in range(columns):\n', '                i = col * rows + row\n', '                if i < len(items):\n', '                    self.output.write(items[i])\n', '                    if col < columns - 1:\n', "                        self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))\n", "            self.output.write('\\n')\n", '\n', '    def listkeywords(self):\n', "        self.output.write('''\n", 'Here is a list of the Python keywords.  Enter any keyword to get more help.\n', '\n', "''')\n", '        self.list(self.keywords.keys())\n', '\n', '    def listsymbols(self):\n', "        self.output.write('''\n", 'Here is a list of the punctuation symbols which Python assigns special meaning\n', 'to. Enter any symbol to get more help.\n', '\n', "''')\n", '        self.list(self.symbols.keys())\n', '\n', '    def listtopics(self):\n', "        self.output.write('''\n", 'Here is a list of available topics.  Enter any topic name to get more help.\n', '\n', "''')\n", '        self.list(self.topics.keys())\n', '\n', "    def showtopic(self, topic, more_xrefs=''):\n", '        try:\n', '            import pydoc_data.topics\n', '        except ImportError:\n', "            self.output.write('''\n", 'Sorry, topic and keyword documentation is not available because the\n', 'module "pydoc_data.topics" could not be found.\n', "''')\n", '            return\n', '        target = self.topics.get(topic, self.keywords.get(topic))\n', '        if not target:\n', "            self.output.write('no documentation found for %s\\n' % repr(topic))\n", '            return\n', "        if type(target) is type(''):\n", '            return self.showtopic(target, more_xrefs)\n', '\n', '        label, xrefs = target\n', '        try:\n', '            doc = pydoc_data.topics.topics[label]\n', '        except KeyError:\n', "            self.output.write('no documentation found for %s\\n' % repr(topic))\n", '            return\n', "        doc = doc.strip() + '\\n'\n", '        if more_xrefs:\n', "            xrefs = (xrefs or '') + ' ' + more_xrefs\n", '        if xrefs:\n', '            import textwrap\n', "            text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\\n'\n", '            wrapped_text = textwrap.wrap(text, 72)\n', "            doc += '\\n%s\\n' % '\\n'.join(wrapped_text)\n", '        pager(doc)\n', '\n', "    def _gettopic(self, topic, more_xrefs=''):\n", '        """Return unbuffered tuple of (topic, xrefs).\n', '\n', '        If an error occurs here, the exception is caught and displayed by\n', '        the url handler.\n', '\n', '        This function duplicates the showtopic method but returns its\n', '        result directly so it can be formatted for display in an html page.\n', '        """\n', '        try:\n', '            import pydoc_data.topics\n', '        except ImportError:\n', "            return('''\n", 'Sorry, topic and keyword documentation is not available because the\n', 'module "pydoc_data.topics" could not be found.\n', "''' , '')\n", '        target = self.topics.get(topic, self.keywords.get(topic))\n', '        if not target:\n', "            raise ValueError('could not find topic')\n", '        if isinstance(target, str):\n', '            return self._gettopic(target, more_xrefs)\n', '        label, xrefs = target\n', '        doc = pydoc_data.topics.topics[label]\n', '        if more_xrefs:\n', "            xrefs = (xrefs or '') + ' ' + more_xrefs\n", '        return doc, xrefs\n', '\n', '    def showsymbol(self, symbol):\n', '        target = self.symbols[symbol]\n', "        topic, _, xrefs = target.partition(' ')\n", '        self.showtopic(topic, xrefs)\n', '\n', "    def listmodules(self, key=''):\n", '        if key:\n', "            self.output.write('''\n", "Here is a list of modules whose name or summary contains '{}'.\n", 'If there are any, enter a module name to get more help.\n', '\n', "'''.format(key))\n", '            apropos(key)\n', '        else:\n', "            self.output.write('''\n", 'Please wait a moment while I gather a list of all available modules...\n', '\n', "''')\n", '            modules = {}\n', '            def callback(path, modname, desc, modules=modules):\n', "                if modname and modname[-9:] == '.__init__':\n", "                    modname = modname[:-9] + ' (package)'\n", "                if modname.find('.') < 0:\n", '                    modules[modname] = 1\n', '            def onerror(modname):\n', '                callback(None, modname, None)\n', '            ModuleScanner().run(callback, onerror=onerror)\n', '            self.list(modules.keys())\n', "            self.output.write('''\n", 'Enter any module name to get more help.  Or, type "modules spam" to search\n', 'for modules whose name or summary contain the string "spam".\n', "''')\n", '\n', 'help = Helper()\n', '\n', 'class ModuleScanner:\n', '    """An interruptible scanner that searches module synopses."""\n', '\n', '    def run(self, callback, key=None, completer=None, onerror=None):\n', '        if key: key = key.lower()\n', '        self.quit = False\n', '        seen = {}\n', '\n', '        for modname in sys.builtin_module_names:\n', "            if modname != '__main__':\n", '                seen[modname] = 1\n', '                if key is None:\n', "                    callback(None, modname, '')\n", '                else:\n', "                    name = __import__(modname).__doc__ or ''\n", "                    desc = name.split('\\n')[0]\n", "                    name = modname + ' - ' + desc\n", '                    if name.lower().find(key) >= 0:\n', '                        callback(None, modname, desc)\n', '\n', '        for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):\n', '            if self.quit:\n', '                break\n', '\n', '            if key is None:\n', "                callback(None, modname, '')\n", '            else:\n', '                try:\n', '                    spec = pkgutil._get_spec(importer, modname)\n', '                except SyntaxError:\n', '                    # raised by tests for bad coding cookies or BOM\n', '                    continue\n', '                loader = spec.loader\n', "                if hasattr(loader, 'get_source'):\n", '                    try:\n', '                        source = loader.get_source(modname)\n', '                    except Exception:\n', '                        if onerror:\n', '                            onerror(modname)\n', '                        continue\n', "                    desc = source_synopsis(io.StringIO(source)) or ''\n", "                    if hasattr(loader, 'get_filename'):\n", '                        path = loader.get_filename(modname)\n', '                    else:\n', '                        path = None\n', '                else:\n', '                    try:\n', '                        module = importlib._bootstrap._load(spec)\n', '                    except ImportError:\n', '                        if onerror:\n', '                            onerror(modname)\n', '                        continue\n', "                    desc = module.__doc__.splitlines()[0] if module.__doc__ else ''\n", "                    path = getattr(module,'__file__',None)\n", "                name = modname + ' - ' + desc\n", '                if name.lower().find(key) >= 0:\n', '                    callback(path, modname, desc)\n', '\n', '        if completer:\n', '            completer()\n', '\n', 'def apropos(key):\n', '    """Print all the one-line module summaries that contain a substring."""\n', '    def callback(path, modname, desc):\n', "        if modname[-9:] == '.__init__':\n", "            modname = modname[:-9] + ' (package)'\n", "        print(modname, desc and '- ' + desc)\n", '    def onerror(modname):\n', '        pass\n', '    with warnings.catch_warnings():\n', "        warnings.filterwarnings('ignore') # ignore problems during import\n", '        ModuleScanner().run(callback, key, onerror=onerror)\n', '\n', '# --------------------------------------- enhanced Web browser interface\n', '\n', 'def _start_server(urlhandler, hostname, port):\n', '    """Start an HTTP server thread on a specific port.\n', '\n', '    Start an HTML/text server thread, so HTML or text documents can be\n', '    browsed dynamically and interactively with a Web browser.  Example use:\n', '\n', '        >>> import time\n', '        >>> import pydoc\n', '\n', '        Define a URL handler.  To determine what the client is asking\n', '        for, check the URL and content_type.\n', '\n', '        Then get or generate some text or HTML code and return it.\n', '\n', '        >>> def my_url_handler(url, content_type):\n', "        ...     text = 'the URL sent was: (%s, %s)' % (url, content_type)\n", '        ...     return text\n', '\n', '        Start server thread on port 0.\n', '        If you use port 0, the server will pick a random port number.\n', '        You can then use serverthread.port to get the port number.\n', '\n', '        >>> port = 0\n', '        >>> serverthread = pydoc._start_server(my_url_handler, port)\n', '\n', '        Check that the server is really started.  If it is, open browser\n', '        and get first page.  Use serverthread.url as the starting page.\n', '\n', '        >>> if serverthread.serving:\n', '        ...    import webbrowser\n', '\n', "        The next two lines are commented out so a browser doesn't open if\n", '        doctest is run on this module.\n', '\n', '        #...    webbrowser.open(serverthread.url)\n', '        #True\n', '\n', '        Let the server do its thing. We just need to monitor its status.\n', "        Use time.sleep so the loop doesn't hog the CPU.\n", '\n', '        >>> starttime = time.monotonic()\n', '        >>> timeout = 1                    #seconds\n', '\n', '        This is a short timeout for testing purposes.\n', '\n', '        >>> while serverthread.serving:\n', '        ...     time.sleep(.01)\n', '        ...     if serverthread.serving and time.monotonic() - starttime > timeout:\n', '        ...          serverthread.stop()\n', '        ...          break\n', '\n', '        Print any errors that may have occurred.\n', '\n', '        >>> print(serverthread.error)\n', '        None\n', '   """\n', '    import http.server\n', '    import email.message\n', '    import select\n', '    import threading\n', '\n', '    class DocHandler(http.server.BaseHTTPRequestHandler):\n', '\n', '        def do_GET(self):\n', '            """Process a request from an HTML browser.\n', '\n', '            The URL received is in self.path.\n', '            Get an HTML page from self.urlhandler and send it.\n', '            """\n', "            if self.path.endswith('.css'):\n", "                content_type = 'text/css'\n", '            else:\n', "                content_type = 'text/html'\n", '            self.send_response(200)\n', "            self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)\n", '            self.end_headers()\n', '            self.wfile.write(self.urlhandler(\n', "                self.path, content_type).encode('utf-8'))\n", '\n', '        def log_message(self, *args):\n', "            # Don't log messages.\n", '            pass\n', '\n', '    class DocServer(http.server.HTTPServer):\n', '\n', '        def __init__(self, host, port, callback):\n', '            self.host = host\n', '            self.address = (self.host, port)\n', '            self.callback = callback\n', '            self.base.__init__(self, self.address, self.handler)\n', '            self.quit = False\n', '\n', '        def serve_until_quit(self):\n', '            while not self.quit:\n', '                rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)\n', '                if rd:\n', '                    self.handle_request()\n', '            self.server_close()\n', '\n', '        def server_activate(self):\n', '            self.base.server_activate(self)\n', '            if self.callback:\n', '                self.callback(self)\n', '\n', '    class ServerThread(threading.Thread):\n', '\n', '        def __init__(self, urlhandler, host, port):\n', '            self.urlhandler = urlhandler\n', '            self.host = host\n', '            self.port = int(port)\n', '            threading.Thread.__init__(self)\n', '            self.serving = False\n', '            self.error = None\n', '\n', '        def run(self):\n', '            """Start the server."""\n', '            try:\n', '                DocServer.base = http.server.HTTPServer\n', '                DocServer.handler = DocHandler\n', '                DocHandler.MessageClass = email.message.Message\n', '                DocHandler.urlhandler = staticmethod(self.urlhandler)\n', '                docsvr = DocServer(self.host, self.port, self.ready)\n', '                self.docserver = docsvr\n', '                docsvr.serve_until_quit()\n', '            except Exception as e:\n', '                self.error = e\n', '\n', '        def ready(self, server):\n', '            self.serving = True\n', '            self.host = server.host\n', '            self.port = server.server_port\n', "            self.url = 'http://%s:%d/' % (self.host, self.port)\n", '\n', '        def stop(self):\n', '            """Stop the server and this thread nicely"""\n', '            self.docserver.quit = True\n', '            self.join()\n', '            # explicitly break a reference cycle: DocServer.callback\n', '            # has indirectly a reference to ServerThread.\n', '            self.docserver = None\n', '            self.serving = False\n', '            self.url = None\n', '\n', '    thread = ServerThread(urlhandler, hostname, port)\n', '    thread.start()\n', '    # Wait until thread.serving is True to make sure we are\n', '    # really up before returning.\n', '    while not thread.error and not thread.serving:\n', '        time.sleep(.01)\n', '    return thread\n', '\n', '\n', 'def _url_handler(url, content_type="text/html"):\n', '    """The pydoc url handler for use with the pydoc server.\n', '\n', "    If the content_type is 'text/css', the _pydoc.css style\n", '    sheet is read and returned if it exits.\n', '\n', "    If the content_type is 'text/html', then the result of\n", '    get_html_page(url) is returned.\n', '    """\n', '    class _HTMLDoc(HTMLDoc):\n', '\n', '        def page(self, title, contents):\n', '            """Format an HTML page."""\n', '            css_path = "pydoc_data/_pydoc.css"\n', '            css_link = (\n', '                \'<link rel="stylesheet" type="text/css" href="%s">\' %\n', '                css_path)\n', "            return '''\\\n", '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">\n', '<html><head><title>Pydoc: %s</title>\n', '<meta http-equiv="Content-Type" content="text/html; charset=utf-8">\n', '%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>\n', "</body></html>''' % (title, css_link, html_navbar(), contents)\n", '\n', '\n', '    html = _HTMLDoc()\n', '\n', '    def html_navbar():\n', '        version = html.escape("%s [%s, %s]" % (platform.python_version(),\n', '                                               platform.python_build()[0],\n', '                                               platform.python_compiler()))\n', '        return """\n', "            <div style='float:left'>\n", '                Python %s<br>%s\n', '            </div>\n', "            <div style='float:right'>\n", "                <div style='text-align:center'>\n", '                  <a href="index.html">Module Index</a>\n', '                  : <a href="topics.html">Topics</a>\n', '                  : <a href="keywords.html">Keywords</a>\n', '                </div>\n', '                <div>\n', '                    <form action="get" style=\'display:inline;\'>\n', '                      <input type=text name=key size=15>\n', '                      <input type=submit value="Get">\n', '                    </form>&nbsp;\n', '                    <form action="search" style=\'display:inline;\'>\n', '                      <input type=text name=key size=15>\n', '                      <input type=submit value="Search">\n', '                    </form>\n', '                </div>\n', '            </div>\n', '            """ % (version, html.escape(platform.platform(terse=True)))\n', '\n', '    def html_index():\n', '        """Module Index page."""\n', '\n', '        def bltinlink(name):\n', '            return \'<a href="%s.html">%s</a>\' % (name, name)\n', '\n', '        heading = html.heading(\n', "            '<big><big><strong>Index of Modules</strong></big></big>',\n", "            '#ffffff', '#7799ee')\n", '        names = [name for name in sys.builtin_module_names\n', "                 if name != '__main__']\n", '        contents = html.multicolumn(names, bltinlink)\n', "        contents = [heading, '<p>' + html.bigsection(\n", "            'Built-in Modules', '#ffffff', '#ee77aa', contents)]\n", '\n', '        seen = {}\n', '        for dir in sys.path:\n', '            contents.append(html.index(dir, seen))\n', '\n', '        contents.append(\n', '            \'<p align=right><font color="#909090" face="helvetica,\'\n', '            \'arial"><strong>pydoc</strong> by Ka-Ping Yee\'\n', "            '&lt;ping@lfw.org&gt;</font>')\n", "        return 'Index of Modules', ''.join(contents)\n", '\n', '    def html_search(key):\n', '        """Search results page."""\n', '        # scan for modules\n', '        search_result = []\n', '\n', '        def callback(path, modname, desc):\n', "            if modname[-9:] == '.__init__':\n", "                modname = modname[:-9] + ' (package)'\n", "            search_result.append((modname, desc and '- ' + desc))\n", '\n', '        with warnings.catch_warnings():\n', "            warnings.filterwarnings('ignore') # ignore problems during import\n", '            def onerror(modname):\n', '                pass\n', '            ModuleScanner().run(callback, key, onerror=onerror)\n', '\n', '        # format page\n', '        def bltinlink(name):\n', '            return \'<a href="%s.html">%s</a>\' % (name, name)\n', '\n', '        results = []\n', '        heading = html.heading(\n', "            '<big><big><strong>Search Results</strong></big></big>',\n", "            '#ffffff', '#7799ee')\n", '        for name, desc in search_result:\n', '            results.append(bltinlink(name) + desc)\n', '        contents = heading + html.bigsection(\n', "            'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))\n", "        return 'Search Results', contents\n", '\n', '    def html_topics():\n', '        """Index of topic texts available."""\n', '\n', '        def bltinlink(name):\n', '            return \'<a href="topic?key=%s">%s</a>\' % (name, name)\n', '\n', '        heading = html.heading(\n', "            '<big><big><strong>INDEX</strong></big></big>',\n", "            '#ffffff', '#7799ee')\n", '        names = sorted(Helper.topics.keys())\n', '\n', '        contents = html.multicolumn(names, bltinlink)\n', '        contents = heading + html.bigsection(\n', "            'Topics', '#ffffff', '#ee77aa', contents)\n", "        return 'Topics', contents\n", '\n', '    def html_keywords():\n', '        """Index of keywords."""\n', '        heading = html.heading(\n', "            '<big><big><strong>INDEX</strong></big></big>',\n", "            '#ffffff', '#7799ee')\n", '        names = sorted(Helper.keywords.keys())\n', '\n', '        def bltinlink(name):\n', '            return \'<a href="topic?key=%s">%s</a>\' % (name, name)\n', '\n', '        contents = html.multicolumn(names, bltinlink)\n', '        contents = heading + html.bigsection(\n', "            'Keywords', '#ffffff', '#ee77aa', contents)\n", "        return 'Keywords', contents\n", '\n', '    def html_topicpage(topic):\n', '        """Topic or keyword help page."""\n', '        buf = io.StringIO()\n', '        htmlhelp = Helper(buf, buf)\n', '        contents, xrefs = htmlhelp._gettopic(topic)\n', '        if topic in htmlhelp.keywords:\n', "            title = 'KEYWORD'\n", '        else:\n', "            title = 'TOPIC'\n", '        heading = html.heading(\n', "            '<big><big><strong>%s</strong></big></big>' % title,\n", "            '#ffffff', '#7799ee')\n", "        contents = '<pre>%s</pre>' % html.markup(contents)\n", "        contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)\n", '        if xrefs:\n', '            xrefs = sorted(xrefs.split())\n', '\n', '            def bltinlink(name):\n', '                return \'<a href="topic?key=%s">%s</a>\' % (name, name)\n', '\n', '            xrefs = html.multicolumn(xrefs, bltinlink)\n', "            xrefs = html.section('Related help topics: ',\n", "                                 '#ffffff', '#ee77aa', xrefs)\n", "        return ('%s %s' % (title, topic),\n", "                ''.join((heading, contents, xrefs)))\n", '\n', '    def html_getobj(url):\n', '        obj = locate(url, forceload=1)\n', "        if obj is None and url != 'None':\n", "            raise ValueError('could not find object')\n", '        title = describe(obj)\n', '        content = html.document(obj, url)\n', '        return title, content\n', '\n', '    def html_error(url, exc):\n', '        heading = html.heading(\n', "            '<big><big><strong>Error</strong></big></big>',\n", "            '#ffffff', '#7799ee')\n", "        contents = '<br>'.join(html.escape(line) for line in\n", '                               format_exception_only(type(exc), exc))\n', "        contents = heading + html.bigsection(url, '#ffffff', '#bb0000',\n", '                                             contents)\n', '        return "Error - %s" % url, contents\n', '\n', '    def get_html_page(url):\n', '        """Generate an HTML page for url."""\n', '        complete_url = url\n', "        if url.endswith('.html'):\n", '            url = url[:-5]\n', '        try:\n', '            if url in ("", "index"):\n', '                title, content = html_index()\n', '            elif url == "topics":\n', '                title, content = html_topics()\n', '            elif url == "keywords":\n', '                title, content = html_keywords()\n', "            elif '=' in url:\n", "                op, _, url = url.partition('=')\n", '                if op == "search?key":\n', '                    title, content = html_search(url)\n', '                elif op == "topic?key":\n', '                    # try topics first, then objects.\n', '                    try:\n', '                        title, content = html_topicpage(url)\n', '                    except ValueError:\n', '                        title, content = html_getobj(url)\n', '                elif op == "get?key":\n', '                    # try objects first, then topics.\n', '                    if url in ("", "index"):\n', '                        title, content = html_index()\n', '                    else:\n', '                        try:\n', '                            title, content = html_getobj(url)\n', '                        except ValueError:\n', '                            title, content = html_topicpage(url)\n', '                else:\n', "                    raise ValueError('bad pydoc url')\n", '            else:\n', '                title, content = html_getobj(url)\n', '        except Exception as exc:\n', '            # Catch any errors and display them in an error page.\n', '            title, content = html_error(complete_url, exc)\n', '        return html.page(title, content)\n', '\n', "    if url.startswith('/'):\n", '        url = url[1:]\n', "    if content_type == 'text/css':\n", '        path_here = os.path.dirname(os.path.realpath(__file__))\n', '        css_path = os.path.join(path_here, url)\n', '        with open(css_path) as fp:\n', "            return ''.join(fp.readlines())\n", "    elif content_type == 'text/html':\n", '        return get_html_page(url)\n', '    # Errors outside the url handler are caught by the server.\n', "    raise TypeError('unknown content type %r for url %s' % (content_type, url))\n", '\n', '\n', "def browse(port=0, *, open_browser=True, hostname='localhost'):\n", '    """Start the enhanced pydoc Web server and open a Web browser.\n', '\n', "    Use port '0' to start the server on an arbitrary port.\n", '    Set open_browser to False to suppress opening a browser.\n', '    """\n', '    import webbrowser\n', '    serverthread = _start_server(_url_handler, hostname, port)\n', '    if serverthread.error:\n', '        print(serverthread.error)\n', '        return\n', '    if serverthread.serving:\n', "        server_help_msg = 'Server commands: [b]rowser, [q]uit'\n", '        if open_browser:\n', '            webbrowser.open(serverthread.url)\n', '        try:\n', "            print('Server ready at', serverthread.url)\n", '            print(server_help_msg)\n', '            while serverthread.serving:\n', "                cmd = input('server> ')\n", '                cmd = cmd.lower()\n', "                if cmd == 'q':\n", '                    break\n', "                elif cmd == 'b':\n", '                    webbrowser.open(serverthread.url)\n', '                else:\n', '                    print(server_help_msg)\n', '        except (KeyboardInterrupt, EOFError):\n', '            print()\n', '        finally:\n', '            if serverthread.serving:\n', '                serverthread.stop()\n', "                print('Server stopped')\n", '\n', '\n', '# -------------------------------------------------- command-line interface\n', '\n', 'def ispath(x):\n', '    return isinstance(x, str) and x.find(os.sep) >= 0\n', '\n', 'def _get_revised_path(given_path, argv0):\n', '    """Ensures current directory is on returned path, and argv0 directory is not\n', '\n', "    Exception: argv0 dir is left alone if it's also pydoc's directory.\n", '\n', '    Returns a new path entry list, or None if no adjustment is needed.\n', '    """\n', "    # Scripts may get the current directory in their path by default if they're\n", '    # run with the -m switch, or directly from the current directory.\n', '    # The interactive prompt also allows imports from the current directory.\n', '\n', "    # Accordingly, if the current directory is already present, don't make\n", '    # any changes to the given_path\n', "    if '' in given_path or os.curdir in given_path or os.getcwd() in given_path:\n", '        return None\n', '\n', '    # Otherwise, add the current directory to the given path, and remove the\n', "    # script directory (as long as the latter isn't also pydoc's directory.\n", '    stdlib_dir = os.path.dirname(__file__)\n', '    script_dir = os.path.dirname(argv0)\n', '    revised_path = given_path.copy()\n', '    if script_dir in given_path and not os.path.samefile(script_dir, stdlib_dir):\n', '        revised_path.remove(script_dir)\n', '    revised_path.insert(0, os.getcwd())\n', '    return revised_path\n', '\n', '\n', '# Note: the tests only cover _get_revised_path, not _adjust_cli_path itself\n', 'def _adjust_cli_sys_path():\n', '    """Ensures current directory is on sys.path, and __main__ directory is not.\n', '\n', "    Exception: __main__ dir is left alone if it's also pydoc's directory.\n", '    """\n', '    revised_path = _get_revised_path(sys.path, sys.argv[0])\n', '    if revised_path is not None:\n', '        sys.path[:] = revised_path\n', '\n', '\n', 'def cli():\n', '    """Command-line interface (looks at sys.argv to decide what to do)."""\n', '    import getopt\n', '    class BadUsage(Exception): pass\n', '\n', '    _adjust_cli_sys_path()\n', '\n', '    try:\n', "        opts, args = getopt.getopt(sys.argv[1:], 'bk:n:p:w')\n", '        writing = False\n', '        start_server = False\n', '        open_browser = False\n', '        port = 0\n', "        hostname = 'localhost'\n", '        for opt, val in opts:\n', "            if opt == '-b':\n", '                start_server = True\n', '                open_browser = True\n', "            if opt == '-k':\n", '                apropos(val)\n', '                return\n', "            if opt == '-p':\n", '                start_server = True\n', '                port = val\n', "            if opt == '-w':\n", '                writing = True\n', "            if opt == '-n':\n", '                start_server = True\n', '                hostname = val\n', '\n', '        if start_server:\n', '            browse(port, hostname=hostname, open_browser=open_browser)\n', '            return\n', '\n', '        if not args: raise BadUsage\n', '        for arg in args:\n', '            if ispath(arg) and not os.path.exists(arg):\n', "                print('file %r does not exist' % arg)\n", '                break\n', '            try:\n', '                if ispath(arg) and os.path.isfile(arg):\n', '                    arg = importfile(arg)\n', '                if writing:\n', '                    if ispath(arg) and os.path.isdir(arg):\n', '                        writedocs(arg)\n', '                    else:\n', '                        writedoc(arg)\n', '                else:\n', '                    help.help(arg)\n', '            except ErrorDuringImport as value:\n', '                print(value)\n', '\n', '    except (getopt.error, BadUsage):\n', '        cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]\n', '        print("""pydoc - the Python documentation tool\n', '\n', '{cmd} <name> ...\n', '    Show text documentation on something.  <name> may be the name of a\n', '    Python keyword, topic, function, module, or package, or a dotted\n', '    reference to a class or function within a module or module in a\n', "    package.  If <name> contains a '{sep}', it is used as the path to a\n", "    Python source file to document. If name is 'keywords', 'topics',\n", "    or 'modules', a listing of these things is displayed.\n", '\n', '{cmd} -k <keyword>\n', '    Search for a keyword in the synopsis lines of all available modules.\n', '\n', '{cmd} -n <hostname>\n', '    Start an HTTP server with the given hostname (default: localhost).\n', '\n', '{cmd} -p <port>\n', '    Start an HTTP server on the given port on the local machine.  Port\n', '    number 0 can be used to get an arbitrary unused port.\n', '\n', '{cmd} -b\n', '    Start an HTTP server on an arbitrary unused port and open a Web browser\n', '    to interactively browse documentation.  This option can be used in\n', '    combination with -n and/or -p.\n', '\n', '{cmd} -w <name> ...\n', '    Write out the HTML documentation for a module to a file in the current\n', "    directory.  If <name> contains a '{sep}', it is treated as a filename; if\n", '    it names a directory, documentation is written for all the contents.\n', '""".format(cmd=cmd, sep=os.sep))\n', '\n', "if __name__ == '__main__':\n", '    cli()\n'], '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/pydoc.py'), '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/asyncio/events.py': (26378, 1.0, ['"""Event loop and event loop policy."""\n', '\n', '__all__ = (\n', "    'AbstractEventLoopPolicy',\n", "    'AbstractEventLoop', 'AbstractServer',\n", "    'Handle', 'TimerHandle',\n", "    'get_event_loop_policy', 'set_event_loop_policy',\n", "    'get_event_loop', 'set_event_loop', 'new_event_loop',\n", "    'get_child_watcher', 'set_child_watcher',\n", "    '_set_running_loop', 'get_running_loop',\n", "    '_get_running_loop',\n", ')\n', '\n', 'import contextvars\n', 'import os\n', 'import socket\n', 'import subprocess\n', 'import sys\n', 'import threading\n', '\n', 'from . import format_helpers\n', '\n', '\n', 'class Handle:\n', '    """Object returned by callback registration methods."""\n', '\n', "    __slots__ = ('_callback', '_args', '_cancelled', '_loop',\n", "                 '_source_traceback', '_repr', '__weakref__',\n", "                 '_context')\n", '\n', '    def __init__(self, callback, args, loop, context=None):\n', '        if context is None:\n', '            context = contextvars.copy_context()\n', '        self._context = context\n', '        self._loop = loop\n', '        self._callback = callback\n', '        self._args = args\n', '        self._cancelled = False\n', '        self._repr = None\n', '        if self._loop.get_debug():\n', '            self._source_traceback = format_helpers.extract_stack(\n', '                sys._getframe(1))\n', '        else:\n', '            self._source_traceback = None\n', '\n', '    def _repr_info(self):\n', '        info = [self.__class__.__name__]\n', '        if self._cancelled:\n', "            info.append('cancelled')\n", '        if self._callback is not None:\n', '            info.append(format_helpers._format_callback_source(\n', '                self._callback, self._args))\n', '        if self._source_traceback:\n', '            frame = self._source_traceback[-1]\n', "            info.append(f'created at {frame[0]}:{frame[1]}')\n", '        return info\n', '\n', '    def __repr__(self):\n', '        if self._repr is not None:\n', '            return self._repr\n', '        info = self._repr_info()\n', "        return '<{}>'.format(' '.join(info))\n", '\n', '    def cancel(self):\n', '        if not self._cancelled:\n', '            self._cancelled = True\n', '            if self._loop.get_debug():\n', '                # Keep a representation in debug mode to keep callback and\n', '                # parameters. For example, to log the warning\n', '                # "Executing <Handle...> took 2.5 second"\n', '                self._repr = repr(self)\n', '            self._callback = None\n', '            self._args = None\n', '\n', '    def cancelled(self):\n', '        return self._cancelled\n', '\n', '    def _run(self):\n', '        try:\n', '            self._context.run(self._callback, *self._args)\n', '        except (SystemExit, KeyboardInterrupt):\n', '            raise\n', '        except BaseException as exc:\n', '            cb = format_helpers._format_callback_source(\n', '                self._callback, self._args)\n', "            msg = f'Exception in callback {cb}'\n", '            context = {\n', "                'message': msg,\n", "                'exception': exc,\n", "                'handle': self,\n", '            }\n', '            if self._source_traceback:\n', "                context['source_traceback'] = self._source_traceback\n", '            self._loop.call_exception_handler(context)\n', '        self = None  # Needed to break cycles when an exception occurs.\n', '\n', '\n', 'class TimerHandle(Handle):\n', '    """Object returned by timed callback registration methods."""\n', '\n', "    __slots__ = ['_scheduled', '_when']\n", '\n', '    def __init__(self, when, callback, args, loop, context=None):\n', '        assert when is not None\n', '        super().__init__(callback, args, loop, context)\n', '        if self._source_traceback:\n', '            del self._source_traceback[-1]\n', '        self._when = when\n', '        self._scheduled = False\n', '\n', '    def _repr_info(self):\n', '        info = super()._repr_info()\n', '        pos = 2 if self._cancelled else 1\n', "        info.insert(pos, f'when={self._when}')\n", '        return info\n', '\n', '    def __hash__(self):\n', '        return hash(self._when)\n', '\n', '    def __lt__(self, other):\n', '        if isinstance(other, TimerHandle):\n', '            return self._when < other._when\n', '        return NotImplemented\n', '\n', '    def __le__(self, other):\n', '        if isinstance(other, TimerHandle):\n', '            return self._when < other._when or self.__eq__(other)\n', '        return NotImplemented\n', '\n', '    def __gt__(self, other):\n', '        if isinstance(other, TimerHandle):\n', '            return self._when > other._when\n', '        return NotImplemented\n', '\n', '    def __ge__(self, other):\n', '        if isinstance(other, TimerHandle):\n', '            return self._when > other._when or self.__eq__(other)\n', '        return NotImplemented\n', '\n', '    def __eq__(self, other):\n', '        if isinstance(other, TimerHandle):\n', '            return (self._when == other._when and\n', '                    self._callback == other._callback and\n', '                    self._args == other._args and\n', '                    self._cancelled == other._cancelled)\n', '        return NotImplemented\n', '\n', '    def cancel(self):\n', '        if not self._cancelled:\n', '            self._loop._timer_handle_cancelled(self)\n', '        super().cancel()\n', '\n', '    def when(self):\n', '        """Return a scheduled callback time.\n', '\n', '        The time is an absolute timestamp, using the same time\n', '        reference as loop.time().\n', '        """\n', '        return self._when\n', '\n', '\n', 'class AbstractServer:\n', '    """Abstract server returned by create_server()."""\n', '\n', '    def close(self):\n', '        """Stop serving.  This leaves existing connections open."""\n', '        raise NotImplementedError\n', '\n', '    def get_loop(self):\n', '        """Get the event loop the Server object is attached to."""\n', '        raise NotImplementedError\n', '\n', '    def is_serving(self):\n', '        """Return True if the server is accepting connections."""\n', '        raise NotImplementedError\n', '\n', '    async def start_serving(self):\n', '        """Start accepting connections.\n', '\n', '        This method is idempotent, so it can be called when\n', '        the server is already being serving.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    async def serve_forever(self):\n', '        """Start accepting connections until the coroutine is cancelled.\n', '\n', '        The server is closed when the coroutine is cancelled.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    async def wait_closed(self):\n', '        """Coroutine to wait until service is closed."""\n', '        raise NotImplementedError\n', '\n', '    async def __aenter__(self):\n', '        return self\n', '\n', '    async def __aexit__(self, *exc):\n', '        self.close()\n', '        await self.wait_closed()\n', '\n', '\n', 'class AbstractEventLoop:\n', '    """Abstract event loop."""\n', '\n', '    # Running and stopping the event loop.\n', '\n', '    def run_forever(self):\n', '        """Run the event loop until stop() is called."""\n', '        raise NotImplementedError\n', '\n', '    def run_until_complete(self, future):\n', '        """Run the event loop until a Future is done.\n', '\n', "        Return the Future's result, or raise its exception.\n", '        """\n', '        raise NotImplementedError\n', '\n', '    def stop(self):\n', '        """Stop the event loop as soon as reasonable.\n', '\n', '        Exactly how soon that is may depend on the implementation, but\n', '        no more I/O callbacks should be scheduled.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    def is_running(self):\n', '        """Return whether the event loop is currently running."""\n', '        raise NotImplementedError\n', '\n', '    def is_closed(self):\n', '        """Returns True if the event loop was closed."""\n', '        raise NotImplementedError\n', '\n', '    def close(self):\n', '        """Close the loop.\n', '\n', '        The loop should not be running.\n', '\n', '        This is idempotent and irreversible.\n', '\n', '        No other methods should be called after this one.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    async def shutdown_asyncgens(self):\n', '        """Shutdown all active asynchronous generators."""\n', '        raise NotImplementedError\n', '\n', '    async def shutdown_default_executor(self):\n', '        """Schedule the shutdown of the default executor."""\n', '        raise NotImplementedError\n', '\n', '    # Methods scheduling callbacks.  All these return Handles.\n', '\n', '    def _timer_handle_cancelled(self, handle):\n', '        """Notification that a TimerHandle has been cancelled."""\n', '        raise NotImplementedError\n', '\n', '    def call_soon(self, callback, *args):\n', '        return self.call_later(0, callback, *args)\n', '\n', '    def call_later(self, delay, callback, *args):\n', '        raise NotImplementedError\n', '\n', '    def call_at(self, when, callback, *args):\n', '        raise NotImplementedError\n', '\n', '    def time(self):\n', '        raise NotImplementedError\n', '\n', '    def create_future(self):\n', '        raise NotImplementedError\n', '\n', '    # Method scheduling a coroutine object: create a task.\n', '\n', '    def create_task(self, coro, *, name=None):\n', '        raise NotImplementedError\n', '\n', '    # Methods for interacting with threads.\n', '\n', '    def call_soon_threadsafe(self, callback, *args):\n', '        raise NotImplementedError\n', '\n', '    def run_in_executor(self, executor, func, *args):\n', '        raise NotImplementedError\n', '\n', '    def set_default_executor(self, executor):\n', '        raise NotImplementedError\n', '\n', '    # Network I/O methods returning Futures.\n', '\n', '    async def getaddrinfo(self, host, port, *,\n', '                          family=0, type=0, proto=0, flags=0):\n', '        raise NotImplementedError\n', '\n', '    async def getnameinfo(self, sockaddr, flags=0):\n', '        raise NotImplementedError\n', '\n', '    async def create_connection(\n', '            self, protocol_factory, host=None, port=None,\n', '            *, ssl=None, family=0, proto=0,\n', '            flags=0, sock=None, local_addr=None,\n', '            server_hostname=None,\n', '            ssl_handshake_timeout=None,\n', '            happy_eyeballs_delay=None, interleave=None):\n', '        raise NotImplementedError\n', '\n', '    async def create_server(\n', '            self, protocol_factory, host=None, port=None,\n', '            *, family=socket.AF_UNSPEC,\n', '            flags=socket.AI_PASSIVE, sock=None, backlog=100,\n', '            ssl=None, reuse_address=None, reuse_port=None,\n', '            ssl_handshake_timeout=None,\n', '            start_serving=True):\n', '        """A coroutine which creates a TCP server bound to host and port.\n', '\n', '        The return value is a Server object which can be used to stop\n', '        the service.\n', '\n', '        If host is an empty string or None all interfaces are assumed\n', '        and a list of multiple sockets will be returned (most likely\n', '        one for IPv4 and another one for IPv6). The host parameter can also be\n', '        a sequence (e.g. list) of hosts to bind to.\n', '\n', '        family can be set to either AF_INET or AF_INET6 to force the\n', '        socket to use IPv4 or IPv6. If not set it will be determined\n', '        from host (defaults to AF_UNSPEC).\n', '\n', '        flags is a bitmask for getaddrinfo().\n', '\n', '        sock can optionally be specified in order to use a preexisting\n', '        socket object.\n', '\n', '        backlog is the maximum number of queued connections passed to\n', '        listen() (defaults to 100).\n', '\n', '        ssl can be set to an SSLContext to enable SSL over the\n', '        accepted connections.\n', '\n', '        reuse_address tells the kernel to reuse a local socket in\n', '        TIME_WAIT state, without waiting for its natural timeout to\n', '        expire. If not specified will automatically be set to True on\n', '        UNIX.\n', '\n', '        reuse_port tells the kernel to allow this endpoint to be bound to\n', '        the same port as other existing endpoints are bound to, so long as\n', '        they all set this flag when being created. This option is not\n', '        supported on Windows.\n', '\n', '        ssl_handshake_timeout is the time in seconds that an SSL server\n', '        will wait for completion of the SSL handshake before aborting the\n', '        connection. Default is 60s.\n', '\n', '        start_serving set to True (default) causes the created server\n', '        to start accepting connections immediately.  When set to False,\n', '        the user should await Server.start_serving() or Server.serve_forever()\n', '        to make the server to start accepting connections.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    async def sendfile(self, transport, file, offset=0, count=None,\n', '                       *, fallback=True):\n', '        """Send a file through a transport.\n', '\n', '        Return an amount of sent bytes.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    async def start_tls(self, transport, protocol, sslcontext, *,\n', '                        server_side=False,\n', '                        server_hostname=None,\n', '                        ssl_handshake_timeout=None):\n', '        """Upgrade a transport to TLS.\n', '\n', '        Return a new transport that *protocol* should start using\n', '        immediately.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    async def create_unix_connection(\n', '            self, protocol_factory, path=None, *,\n', '            ssl=None, sock=None,\n', '            server_hostname=None,\n', '            ssl_handshake_timeout=None):\n', '        raise NotImplementedError\n', '\n', '    async def create_unix_server(\n', '            self, protocol_factory, path=None, *,\n', '            sock=None, backlog=100, ssl=None,\n', '            ssl_handshake_timeout=None,\n', '            start_serving=True):\n', '        """A coroutine which creates a UNIX Domain Socket server.\n', '\n', '        The return value is a Server object, which can be used to stop\n', '        the service.\n', '\n', '        path is a str, representing a file system path to bind the\n', '        server socket to.\n', '\n', '        sock can optionally be specified in order to use a preexisting\n', '        socket object.\n', '\n', '        backlog is the maximum number of queued connections passed to\n', '        listen() (defaults to 100).\n', '\n', '        ssl can be set to an SSLContext to enable SSL over the\n', '        accepted connections.\n', '\n', '        ssl_handshake_timeout is the time in seconds that an SSL server\n', '        will wait for the SSL handshake to complete (defaults to 60s).\n', '\n', '        start_serving set to True (default) causes the created server\n', '        to start accepting connections immediately.  When set to False,\n', '        the user should await Server.start_serving() or Server.serve_forever()\n', '        to make the server to start accepting connections.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    async def create_datagram_endpoint(self, protocol_factory,\n', '                                       local_addr=None, remote_addr=None, *,\n', '                                       family=0, proto=0, flags=0,\n', '                                       reuse_address=None, reuse_port=None,\n', '                                       allow_broadcast=None, sock=None):\n', '        """A coroutine which creates a datagram endpoint.\n', '\n', '        This method will try to establish the endpoint in the background.\n', '        When successful, the coroutine returns a (transport, protocol) pair.\n', '\n', '        protocol_factory must be a callable returning a protocol instance.\n', '\n', '        socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on\n', '        host (or family if specified), socket type SOCK_DGRAM.\n', '\n', '        reuse_address tells the kernel to reuse a local socket in\n', '        TIME_WAIT state, without waiting for its natural timeout to\n', '        expire. If not specified it will automatically be set to True on\n', '        UNIX.\n', '\n', '        reuse_port tells the kernel to allow this endpoint to be bound to\n', '        the same port as other existing endpoints are bound to, so long as\n', '        they all set this flag when being created. This option is not\n', "        supported on Windows and some UNIX's. If the\n", '        :py:data:`~socket.SO_REUSEPORT` constant is not defined then this\n', '        capability is unsupported.\n', '\n', '        allow_broadcast tells the kernel to allow this endpoint to send\n', '        messages to the broadcast address.\n', '\n', '        sock can optionally be specified in order to use a preexisting\n', '        socket object.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    # Pipes and subprocesses.\n', '\n', '    async def connect_read_pipe(self, protocol_factory, pipe):\n', '        """Register read pipe in event loop. Set the pipe to non-blocking mode.\n', '\n', '        protocol_factory should instantiate object with Protocol interface.\n', '        pipe is a file-like object.\n', '        Return pair (transport, protocol), where transport supports the\n', '        ReadTransport interface."""\n', '        # The reason to accept file-like object instead of just file descriptor\n', '        # is: we need to own pipe and close it at transport finishing\n', '        # Can got complicated errors if pass f.fileno(),\n', '        # close fd in pipe transport then close f and vise versa.\n', '        raise NotImplementedError\n', '\n', '    async def connect_write_pipe(self, protocol_factory, pipe):\n', '        """Register write pipe in event loop.\n', '\n', '        protocol_factory should instantiate object with BaseProtocol interface.\n', '        Pipe is file-like object already switched to nonblocking.\n', '        Return pair (transport, protocol), where transport support\n', '        WriteTransport interface."""\n', '        # The reason to accept file-like object instead of just file descriptor\n', '        # is: we need to own pipe and close it at transport finishing\n', '        # Can got complicated errors if pass f.fileno(),\n', '        # close fd in pipe transport then close f and vise versa.\n', '        raise NotImplementedError\n', '\n', '    async def subprocess_shell(self, protocol_factory, cmd, *,\n', '                               stdin=subprocess.PIPE,\n', '                               stdout=subprocess.PIPE,\n', '                               stderr=subprocess.PIPE,\n', '                               **kwargs):\n', '        raise NotImplementedError\n', '\n', '    async def subprocess_exec(self, protocol_factory, *args,\n', '                              stdin=subprocess.PIPE,\n', '                              stdout=subprocess.PIPE,\n', '                              stderr=subprocess.PIPE,\n', '                              **kwargs):\n', '        raise NotImplementedError\n', '\n', '    # Ready-based callback registration methods.\n', '    # The add_*() methods return None.\n', '    # The remove_*() methods return True if something was removed,\n', '    # False if there was nothing to delete.\n', '\n', '    def add_reader(self, fd, callback, *args):\n', '        raise NotImplementedError\n', '\n', '    def remove_reader(self, fd):\n', '        raise NotImplementedError\n', '\n', '    def add_writer(self, fd, callback, *args):\n', '        raise NotImplementedError\n', '\n', '    def remove_writer(self, fd):\n', '        raise NotImplementedError\n', '\n', '    # Completion based I/O methods returning Futures.\n', '\n', '    async def sock_recv(self, sock, nbytes):\n', '        raise NotImplementedError\n', '\n', '    async def sock_recv_into(self, sock, buf):\n', '        raise NotImplementedError\n', '\n', '    async def sock_sendall(self, sock, data):\n', '        raise NotImplementedError\n', '\n', '    async def sock_connect(self, sock, address):\n', '        raise NotImplementedError\n', '\n', '    async def sock_accept(self, sock):\n', '        raise NotImplementedError\n', '\n', '    async def sock_sendfile(self, sock, file, offset=0, count=None,\n', '                            *, fallback=None):\n', '        raise NotImplementedError\n', '\n', '    # Signal handling.\n', '\n', '    def add_signal_handler(self, sig, callback, *args):\n', '        raise NotImplementedError\n', '\n', '    def remove_signal_handler(self, sig):\n', '        raise NotImplementedError\n', '\n', '    # Task factory.\n', '\n', '    def set_task_factory(self, factory):\n', '        raise NotImplementedError\n', '\n', '    def get_task_factory(self):\n', '        raise NotImplementedError\n', '\n', '    # Error handlers.\n', '\n', '    def get_exception_handler(self):\n', '        raise NotImplementedError\n', '\n', '    def set_exception_handler(self, handler):\n', '        raise NotImplementedError\n', '\n', '    def default_exception_handler(self, context):\n', '        raise NotImplementedError\n', '\n', '    def call_exception_handler(self, context):\n', '        raise NotImplementedError\n', '\n', '    # Debug flag management.\n', '\n', '    def get_debug(self):\n', '        raise NotImplementedError\n', '\n', '    def set_debug(self, enabled):\n', '        raise NotImplementedError\n', '\n', '\n', 'class AbstractEventLoopPolicy:\n', '    """Abstract policy for accessing the event loop."""\n', '\n', '    def get_event_loop(self):\n', '        """Get the event loop for the current context.\n', '\n', '        Returns an event loop object implementing the BaseEventLoop interface,\n', '        or raises an exception in case no event loop has been set for the\n', '        current context and the current policy does not specify to create one.\n', '\n', '        It should never return None."""\n', '        raise NotImplementedError\n', '\n', '    def set_event_loop(self, loop):\n', '        """Set the event loop for the current context to loop."""\n', '        raise NotImplementedError\n', '\n', '    def new_event_loop(self):\n', '        """Create and return a new event loop object according to this\n', "        policy's rules. If there's need to set this loop as the event loop for\n", '        the current context, set_event_loop must be called explicitly."""\n', '        raise NotImplementedError\n', '\n', '    # Child processes handling (Unix only).\n', '\n', '    def get_child_watcher(self):\n', '        "Get the watcher for child processes."\n', '        raise NotImplementedError\n', '\n', '    def set_child_watcher(self, watcher):\n', '        """Set the watcher for child processes."""\n', '        raise NotImplementedError\n', '\n', '\n', 'class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):\n', '    """Default policy implementation for accessing the event loop.\n', '\n', '    In this policy, each thread has its own event loop.  However, we\n', '    only automatically create an event loop by default for the main\n', '    thread; other threads by default have no event loop.\n', '\n', '    Other policies may have different rules (e.g. a single global\n', '    event loop, or automatically creating an event loop per thread, or\n', '    using some other notion of context to which an event loop is\n', '    associated).\n', '    """\n', '\n', '    _loop_factory = None\n', '\n', '    class _Local(threading.local):\n', '        _loop = None\n', '        _set_called = False\n', '\n', '    def __init__(self):\n', '        self._local = self._Local()\n', '\n', '    def get_event_loop(self):\n', '        """Get the event loop for the current context.\n', '\n', '        Returns an instance of EventLoop or raises an exception.\n', '        """\n', '        if (self._local._loop is None and\n', '                not self._local._set_called and\n', '                threading.current_thread() is threading.main_thread()):\n', '            self.set_event_loop(self.new_event_loop())\n', '\n', '        if self._local._loop is None:\n', "            raise RuntimeError('There is no current event loop in thread %r.'\n", '                               % threading.current_thread().name)\n', '\n', '        return self._local._loop\n', '\n', '    def set_event_loop(self, loop):\n', '        """Set the event loop."""\n', '        self._local._set_called = True\n', '        assert loop is None or isinstance(loop, AbstractEventLoop)\n', '        self._local._loop = loop\n', '\n', '    def new_event_loop(self):\n', '        """Create a new event loop.\n', '\n', '        You must call set_event_loop() to make this the current event\n', '        loop.\n', '        """\n', '        return self._loop_factory()\n', '\n', '\n', '# Event loop policy.  The policy itself is always global, even if the\n', "# policy's rules say that there is an event loop per thread (or other\n", '# notion of context).  The default policy is installed by the first\n', '# call to get_event_loop_policy().\n', '_event_loop_policy = None\n', '\n', '# Lock for protecting the on-the-fly creation of the event loop policy.\n', '_lock = threading.Lock()\n', '\n', '\n', '# A TLS for the running event loop, used by _get_running_loop.\n', 'class _RunningLoop(threading.local):\n', '    loop_pid = (None, None)\n', '\n', '\n', '_running_loop = _RunningLoop()\n', '\n', '\n', 'def get_running_loop():\n', '    """Return the running event loop.  Raise a RuntimeError if there is none.\n', '\n', '    This function is thread-specific.\n', '    """\n', '    # NOTE: this function is implemented in C (see _asynciomodule.c)\n', '    loop = _get_running_loop()\n', '    if loop is None:\n', "        raise RuntimeError('no running event loop')\n", '    return loop\n', '\n', '\n', 'def _get_running_loop():\n', '    """Return the running event loop or None.\n', '\n', '    This is a low-level function intended to be used by event loops.\n', '    This function is thread-specific.\n', '    """\n', '    # NOTE: this function is implemented in C (see _asynciomodule.c)\n', '    running_loop, pid = _running_loop.loop_pid\n', '    if running_loop is not None and pid == os.getpid():\n', '        return running_loop\n', '\n', '\n', 'def _set_running_loop(loop):\n', '    """Set the running event loop.\n', '\n', '    This is a low-level function intended to be used by event loops.\n', '    This function is thread-specific.\n', '    """\n', '    # NOTE: this function is implemented in C (see _asynciomodule.c)\n', '    _running_loop.loop_pid = (loop, os.getpid())\n', '\n', '\n', 'def _init_event_loop_policy():\n', '    global _event_loop_policy\n', '    with _lock:\n', '        if _event_loop_policy is None:  # pragma: no branch\n', '            from . import DefaultEventLoopPolicy\n', '            _event_loop_policy = DefaultEventLoopPolicy()\n', '\n', '\n', 'def get_event_loop_policy():\n', '    """Get the current event loop policy."""\n', '    if _event_loop_policy is None:\n', '        _init_event_loop_policy()\n', '    return _event_loop_policy\n', '\n', '\n', 'def set_event_loop_policy(policy):\n', '    """Set the current event loop policy.\n', '\n', '    If policy is None, the default policy is restored."""\n', '    global _event_loop_policy\n', '    assert policy is None or isinstance(policy, AbstractEventLoopPolicy)\n', '    _event_loop_policy = policy\n', '\n', '\n', 'def get_event_loop():\n', '    """Return an asyncio event loop.\n', '\n', '    When called from a coroutine or a callback (e.g. scheduled with call_soon\n', '    or similar API), this function will always return the running event loop.\n', '\n', '    If there is no running event loop set, the function will return\n', '    the result of `get_event_loop_policy().get_event_loop()` call.\n', '    """\n', '    # NOTE: this function is implemented in C (see _asynciomodule.c)\n', '    current_loop = _get_running_loop()\n', '    if current_loop is not None:\n', '        return current_loop\n', '    return get_event_loop_policy().get_event_loop()\n', '\n', '\n', 'def set_event_loop(loop):\n', '    """Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""\n', '    get_event_loop_policy().set_event_loop(loop)\n', '\n', '\n', 'def new_event_loop():\n', '    """Equivalent to calling get_event_loop_policy().new_event_loop()."""\n', '    return get_event_loop_policy().new_event_loop()\n', '\n', '\n', 'def get_child_watcher():\n', '    """Equivalent to calling get_event_loop_policy().get_child_watcher()."""\n', '    return get_event_loop_policy().get_child_watcher()\n', '\n', '\n', 'def set_child_watcher(watcher):\n', '    """Equivalent to calling\n', '    get_event_loop_policy().set_child_watcher(watcher)."""\n', '    return get_event_loop_policy().set_child_watcher(watcher)\n', '\n', '\n', '# Alias pure-Python implementations for testing purposes.\n', '_py__get_running_loop = _get_running_loop\n', '_py__set_running_loop = _set_running_loop\n', '_py_get_running_loop = get_running_loop\n', '_py_get_event_loop = get_event_loop\n', '\n', '\n', 'try:\n', '    # get_event_loop() is one of the most frequently called\n', '    # functions in asyncio.  Pure Python implementation is\n', '    # about 4 times slower than C-accelerated.\n', '    from _asyncio import (_get_running_loop, _set_running_loop,\n', '                          get_running_loop, get_event_loop)\n', 'except ImportError:\n', '    pass\n', 'else:\n', '    # Alias C implementations for testing purposes.\n', '    _c__get_running_loop = _get_running_loop\n', '    _c__set_running_loop = _set_running_loop\n', '    _c_get_running_loop = get_running_loop\n', '    _c_get_event_loop = get_event_loop\n'], '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/asyncio/events.py'), '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/asyncio/base_events.py': (73809, 1.0, ['"""Base implementation of event loop.\n', '\n', 'The event loop can be broken up into a multiplexer (the part\n', 'responsible for notifying us of I/O events) and the event loop proper,\n', 'which wraps a multiplexer with functionality for scheduling callbacks,\n', 'immediately or at a given time in the future.\n', '\n', 'Whenever a public API takes a callback, subsequent positional\n', 'arguments will be passed to the callback if/when it is called.  This\n', 'avoids the proliferation of trivial lambdas implementing closures.\n', 'Keyword arguments for the callback are not supported; this is a\n', 'conscious design decision, leaving the door open for keyword arguments\n', 'to modify the meaning of the API call itself.\n', '"""\n', '\n', 'import collections\n', 'import collections.abc\n', 'import concurrent.futures\n', 'import functools\n', 'import heapq\n', 'import itertools\n', 'import os\n', 'import socket\n', 'import stat\n', 'import subprocess\n', 'import threading\n', 'import time\n', 'import traceback\n', 'import sys\n', 'import warnings\n', 'import weakref\n', '\n', 'try:\n', '    import ssl\n', 'except ImportError:  # pragma: no cover\n', '    ssl = None\n', '\n', 'from . import constants\n', 'from . import coroutines\n', 'from . import events\n', 'from . import exceptions\n', 'from . import futures\n', 'from . import protocols\n', 'from . import sslproto\n', 'from . import staggered\n', 'from . import tasks\n', 'from . import transports\n', 'from . import trsock\n', 'from .log import logger\n', '\n', '\n', "__all__ = 'BaseEventLoop',\n", '\n', '\n', '# Minimum number of _scheduled timer handles before cleanup of\n', '# cancelled handles is performed.\n', '_MIN_SCHEDULED_TIMER_HANDLES = 100\n', '\n', '# Minimum fraction of _scheduled timer handles that are cancelled\n', '# before cleanup of cancelled handles is performed.\n', '_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5\n', '\n', '\n', "_HAS_IPv6 = hasattr(socket, 'AF_INET6')\n", '\n', '# Maximum timeout passed to select to avoid OS limitations\n', 'MAXIMUM_SELECT_TIMEOUT = 24 * 3600\n', '\n', "# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s\n", '# *reuse_address* parameter\n', '_unset = object()\n', '\n', '\n', 'def _format_handle(handle):\n', '    cb = handle._callback\n', "    if isinstance(getattr(cb, '__self__', None), tasks.Task):\n", '        # format the task\n', '        return repr(cb.__self__)\n', '    else:\n', '        return str(handle)\n', '\n', '\n', 'def _format_pipe(fd):\n', '    if fd == subprocess.PIPE:\n', "        return '<pipe>'\n", '    elif fd == subprocess.STDOUT:\n', "        return '<stdout>'\n", '    else:\n', '        return repr(fd)\n', '\n', '\n', 'def _set_reuseport(sock):\n', "    if not hasattr(socket, 'SO_REUSEPORT'):\n", "        raise ValueError('reuse_port not supported by socket module')\n", '    else:\n', '        try:\n', '            sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n', '        except OSError:\n', "            raise ValueError('reuse_port not supported by socket module, '\n", "                             'SO_REUSEPORT defined but not implemented.')\n", '\n', '\n', 'def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):\n', '    # Try to skip getaddrinfo if "host" is already an IP. Users might have\n', '    # handled name resolution in their own code and pass in resolved IPs.\n', "    if not hasattr(socket, 'inet_pton'):\n", '        return\n', '\n', '    if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \\\n', '            host is None:\n', '        return None\n', '\n', '    if type == socket.SOCK_STREAM:\n', '        proto = socket.IPPROTO_TCP\n', '    elif type == socket.SOCK_DGRAM:\n', '        proto = socket.IPPROTO_UDP\n', '    else:\n', '        return None\n', '\n', '    if port is None:\n', '        port = 0\n', "    elif isinstance(port, bytes) and port == b'':\n", '        port = 0\n', "    elif isinstance(port, str) and port == '':\n", '        port = 0\n', '    else:\n', '        # If port\'s a service name like "http", don\'t skip getaddrinfo.\n', '        try:\n', '            port = int(port)\n', '        except (TypeError, ValueError):\n', '            return None\n', '\n', '    if family == socket.AF_UNSPEC:\n', '        afs = [socket.AF_INET]\n', '        if _HAS_IPv6:\n', '            afs.append(socket.AF_INET6)\n', '    else:\n', '        afs = [family]\n', '\n', '    if isinstance(host, bytes):\n', "        host = host.decode('idna')\n", "    if '%' in host:\n", "        # Linux's inet_pton doesn't accept an IPv6 zone index after host,\n", "        # like '::1%lo0'.\n", '        return None\n', '\n', '    for af in afs:\n', '        try:\n', '            socket.inet_pton(af, host)\n', '            # The host has already been resolved.\n', '            if _HAS_IPv6 and af == socket.AF_INET6:\n', "                return af, type, proto, '', (host, port, flowinfo, scopeid)\n", '            else:\n', "                return af, type, proto, '', (host, port)\n", '        except OSError:\n', '            pass\n', '\n', '    # "host" is not an IP address.\n', '    return None\n', '\n', '\n', 'def _interleave_addrinfos(addrinfos, first_address_family_count=1):\n', '    """Interleave list of addrinfo tuples by family."""\n', '    # Group addresses by family\n', '    addrinfos_by_family = collections.OrderedDict()\n', '    for addr in addrinfos:\n', '        family = addr[0]\n', '        if family not in addrinfos_by_family:\n', '            addrinfos_by_family[family] = []\n', '        addrinfos_by_family[family].append(addr)\n', '    addrinfos_lists = list(addrinfos_by_family.values())\n', '\n', '    reordered = []\n', '    if first_address_family_count > 1:\n', '        reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])\n', '        del addrinfos_lists[0][:first_address_family_count - 1]\n', '    reordered.extend(\n', '        a for a in itertools.chain.from_iterable(\n', '            itertools.zip_longest(*addrinfos_lists)\n', '        ) if a is not None)\n', '    return reordered\n', '\n', '\n', 'def _run_until_complete_cb(fut):\n', '    if not fut.cancelled():\n', '        exc = fut.exception()\n', '        if isinstance(exc, (SystemExit, KeyboardInterrupt)):\n', '            # Issue #22429: run_forever() already finished, no need to\n', '            # stop it.\n', '            return\n', '    futures._get_loop(fut).stop()\n', '\n', '\n', "if hasattr(socket, 'TCP_NODELAY'):\n", '    def _set_nodelay(sock):\n', '        if (sock.family in {socket.AF_INET, socket.AF_INET6} and\n', '                sock.type == socket.SOCK_STREAM and\n', '                sock.proto == socket.IPPROTO_TCP):\n', '            sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n', 'else:\n', '    def _set_nodelay(sock):\n', '        pass\n', '\n', '\n', 'class _SendfileFallbackProtocol(protocols.Protocol):\n', '    def __init__(self, transp):\n', '        if not isinstance(transp, transports._FlowControlMixin):\n', '            raise TypeError("transport should be _FlowControlMixin instance")\n', '        self._transport = transp\n', '        self._proto = transp.get_protocol()\n', '        self._should_resume_reading = transp.is_reading()\n', '        self._should_resume_writing = transp._protocol_paused\n', '        transp.pause_reading()\n', '        transp.set_protocol(self)\n', '        if self._should_resume_writing:\n', '            self._write_ready_fut = self._transport._loop.create_future()\n', '        else:\n', '            self._write_ready_fut = None\n', '\n', '    async def drain(self):\n', '        if self._transport.is_closing():\n', '            raise ConnectionError("Connection closed by peer")\n', '        fut = self._write_ready_fut\n', '        if fut is None:\n', '            return\n', '        await fut\n', '\n', '    def connection_made(self, transport):\n', '        raise RuntimeError("Invalid state: "\n', '                           "connection should have been established already.")\n', '\n', '    def connection_lost(self, exc):\n', '        if self._write_ready_fut is not None:\n', '            # Never happens if peer disconnects after sending the whole content\n', '            # Thus disconnection is always an exception from user perspective\n', '            if exc is None:\n', '                self._write_ready_fut.set_exception(\n', '                    ConnectionError("Connection is closed by peer"))\n', '            else:\n', '                self._write_ready_fut.set_exception(exc)\n', '        self._proto.connection_lost(exc)\n', '\n', '    def pause_writing(self):\n', '        if self._write_ready_fut is not None:\n', '            return\n', '        self._write_ready_fut = self._transport._loop.create_future()\n', '\n', '    def resume_writing(self):\n', '        if self._write_ready_fut is None:\n', '            return\n', '        self._write_ready_fut.set_result(False)\n', '        self._write_ready_fut = None\n', '\n', '    def data_received(self, data):\n', '        raise RuntimeError("Invalid state: reading should be paused")\n', '\n', '    def eof_received(self):\n', '        raise RuntimeError("Invalid state: reading should be paused")\n', '\n', '    async def restore(self):\n', '        self._transport.set_protocol(self._proto)\n', '        if self._should_resume_reading:\n', '            self._transport.resume_reading()\n', '        if self._write_ready_fut is not None:\n', '            # Cancel the future.\n', '            # Basically it has no effect because protocol is switched back,\n', '            # no code should wait for it anymore.\n', '            self._write_ready_fut.cancel()\n', '        if self._should_resume_writing:\n', '            self._proto.resume_writing()\n', '\n', '\n', 'class Server(events.AbstractServer):\n', '\n', '    def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,\n', '                 ssl_handshake_timeout):\n', '        self._loop = loop\n', '        self._sockets = sockets\n', '        self._active_count = 0\n', '        self._waiters = []\n', '        self._protocol_factory = protocol_factory\n', '        self._backlog = backlog\n', '        self._ssl_context = ssl_context\n', '        self._ssl_handshake_timeout = ssl_handshake_timeout\n', '        self._serving = False\n', '        self._serving_forever_fut = None\n', '\n', '    def __repr__(self):\n', "        return f'<{self.__class__.__name__} sockets={self.sockets!r}>'\n", '\n', '    def _attach(self):\n', '        assert self._sockets is not None\n', '        self._active_count += 1\n', '\n', '    def _detach(self):\n', '        assert self._active_count > 0\n', '        self._active_count -= 1\n', '        if self._active_count == 0 and self._sockets is None:\n', '            self._wakeup()\n', '\n', '    def _wakeup(self):\n', '        waiters = self._waiters\n', '        self._waiters = None\n', '        for waiter in waiters:\n', '            if not waiter.done():\n', '                waiter.set_result(waiter)\n', '\n', '    def _start_serving(self):\n', '        if self._serving:\n', '            return\n', '        self._serving = True\n', '        for sock in self._sockets:\n', '            sock.listen(self._backlog)\n', '            self._loop._start_serving(\n', '                self._protocol_factory, sock, self._ssl_context,\n', '                self, self._backlog, self._ssl_handshake_timeout)\n', '\n', '    def get_loop(self):\n', '        return self._loop\n', '\n', '    def is_serving(self):\n', '        return self._serving\n', '\n', '    @property\n', '    def sockets(self):\n', '        if self._sockets is None:\n', '            return ()\n', '        return tuple(trsock.TransportSocket(s) for s in self._sockets)\n', '\n', '    def close(self):\n', '        sockets = self._sockets\n', '        if sockets is None:\n', '            return\n', '        self._sockets = None\n', '\n', '        for sock in sockets:\n', '            self._loop._stop_serving(sock)\n', '\n', '        self._serving = False\n', '\n', '        if (self._serving_forever_fut is not None and\n', '                not self._serving_forever_fut.done()):\n', '            self._serving_forever_fut.cancel()\n', '            self._serving_forever_fut = None\n', '\n', '        if self._active_count == 0:\n', '            self._wakeup()\n', '\n', '    async def start_serving(self):\n', '        self._start_serving()\n', "        # Skip one loop iteration so that all 'loop.add_reader'\n", '        # go through.\n', '        await tasks.sleep(0, loop=self._loop)\n', '\n', '    async def serve_forever(self):\n', '        if self._serving_forever_fut is not None:\n', '            raise RuntimeError(\n', "                f'server {self!r} is already being awaited on serve_forever()')\n", '        if self._sockets is None:\n', "            raise RuntimeError(f'server {self!r} is closed')\n", '\n', '        self._start_serving()\n', '        self._serving_forever_fut = self._loop.create_future()\n', '\n', '        try:\n', '            await self._serving_forever_fut\n', '        except exceptions.CancelledError:\n', '            try:\n', '                self.close()\n', '                await self.wait_closed()\n', '            finally:\n', '                raise\n', '        finally:\n', '            self._serving_forever_fut = None\n', '\n', '    async def wait_closed(self):\n', '        if self._sockets is None or self._waiters is None:\n', '            return\n', '        waiter = self._loop.create_future()\n', '        self._waiters.append(waiter)\n', '        await waiter\n', '\n', '\n', 'class BaseEventLoop(events.AbstractEventLoop):\n', '\n', '    def __init__(self):\n', '        self._timer_cancelled_count = 0\n', '        self._closed = False\n', '        self._stopping = False\n', '        self._ready = collections.deque()\n', '        self._scheduled = []\n', '        self._default_executor = None\n', '        self._internal_fds = 0\n', '        # Identifier of the thread running the event loop, or None if the\n', '        # event loop is not running\n', '        self._thread_id = None\n', "        self._clock_resolution = time.get_clock_info('monotonic').resolution\n", '        self._exception_handler = None\n', '        self.set_debug(coroutines._is_debug_mode())\n', '        # In debug mode, if the execution of a callback or a step of a task\n', '        # exceed this duration in seconds, the slow callback/task is logged.\n', '        self.slow_callback_duration = 0.1\n', '        self._current_handle = None\n', '        self._task_factory = None\n', '        self._coroutine_origin_tracking_enabled = False\n', '        self._coroutine_origin_tracking_saved_depth = None\n', '\n', '        # A weak set of all asynchronous generators that are\n', '        # being iterated by the loop.\n', '        self._asyncgens = weakref.WeakSet()\n', '        # Set to True when `loop.shutdown_asyncgens` is called.\n', '        self._asyncgens_shutdown_called = False\n', '        # Set to True when `loop.shutdown_default_executor` is called.\n', '        self._executor_shutdown_called = False\n', '\n', '    def __repr__(self):\n', '        return (\n', "            f'<{self.__class__.__name__} running={self.is_running()} '\n", "            f'closed={self.is_closed()} debug={self.get_debug()}>'\n", '        )\n', '\n', '    def create_future(self):\n', '        """Create a Future object attached to the loop."""\n', '        return futures.Future(loop=self)\n', '\n', '    def create_task(self, coro, *, name=None):\n', '        """Schedule a coroutine object.\n', '\n', '        Return a task object.\n', '        """\n', '        self._check_closed()\n', '        if self._task_factory is None:\n', '            task = tasks.Task(coro, loop=self, name=name)\n', '            if task._source_traceback:\n', '                del task._source_traceback[-1]\n', '        else:\n', '            task = self._task_factory(self, coro)\n', '            tasks._set_task_name(task, name)\n', '\n', '        return task\n', '\n', '    def set_task_factory(self, factory):\n', '        """Set a task factory that will be used by loop.create_task().\n', '\n', '        If factory is None the default task factory will be set.\n', '\n', '        If factory is a callable, it should have a signature matching\n', "        '(loop, coro)', where 'loop' will be a reference to the active\n", "        event loop, 'coro' will be a coroutine object.  The callable\n", '        must return a Future.\n', '        """\n', '        if factory is not None and not callable(factory):\n', "            raise TypeError('task factory must be a callable or None')\n", '        self._task_factory = factory\n', '\n', '    def get_task_factory(self):\n', '        """Return a task factory, or None if the default one is in use."""\n', '        return self._task_factory\n', '\n', '    def _make_socket_transport(self, sock, protocol, waiter=None, *,\n', '                               extra=None, server=None):\n', '        """Create socket transport."""\n', '        raise NotImplementedError\n', '\n', '    def _make_ssl_transport(\n', '            self, rawsock, protocol, sslcontext, waiter=None,\n', '            *, server_side=False, server_hostname=None,\n', '            extra=None, server=None,\n', '            ssl_handshake_timeout=None,\n', '            call_connection_made=True):\n', '        """Create SSL transport."""\n', '        raise NotImplementedError\n', '\n', '    def _make_datagram_transport(self, sock, protocol,\n', '                                 address=None, waiter=None, extra=None):\n', '        """Create datagram transport."""\n', '        raise NotImplementedError\n', '\n', '    def _make_read_pipe_transport(self, pipe, protocol, waiter=None,\n', '                                  extra=None):\n', '        """Create read pipe transport."""\n', '        raise NotImplementedError\n', '\n', '    def _make_write_pipe_transport(self, pipe, protocol, waiter=None,\n', '                                   extra=None):\n', '        """Create write pipe transport."""\n', '        raise NotImplementedError\n', '\n', '    async def _make_subprocess_transport(self, protocol, args, shell,\n', '                                         stdin, stdout, stderr, bufsize,\n', '                                         extra=None, **kwargs):\n', '        """Create subprocess transport."""\n', '        raise NotImplementedError\n', '\n', '    def _write_to_self(self):\n', '        """Write a byte to self-pipe, to wake up the event loop.\n', '\n', '        This may be called from a different thread.\n', '\n', '        The subclass is responsible for implementing the self-pipe.\n', '        """\n', '        raise NotImplementedError\n', '\n', '    def _process_events(self, event_list):\n', '        """Process selector events."""\n', '        raise NotImplementedError\n', '\n', '    def _check_closed(self):\n', '        if self._closed:\n', "            raise RuntimeError('Event loop is closed')\n", '\n', '    def _check_default_executor(self):\n', '        if self._executor_shutdown_called:\n', "            raise RuntimeError('Executor shutdown has been called')\n", '\n', '    def _asyncgen_finalizer_hook(self, agen):\n', '        self._asyncgens.discard(agen)\n', '        if not self.is_closed():\n', '            self.call_soon_threadsafe(self.create_task, agen.aclose())\n', '\n', '    def _asyncgen_firstiter_hook(self, agen):\n', '        if self._asyncgens_shutdown_called:\n', '            warnings.warn(\n', '                f"asynchronous generator {agen!r} was scheduled after "\n', '                f"loop.shutdown_asyncgens() call",\n', '                ResourceWarning, source=self)\n', '\n', '        self._asyncgens.add(agen)\n', '\n', '    async def shutdown_asyncgens(self):\n', '        """Shutdown all active asynchronous generators."""\n', '        self._asyncgens_shutdown_called = True\n', '\n', '        if not len(self._asyncgens):\n', "            # If Python version is <3.6 or we don't have any asynchronous\n", '            # generators alive.\n', '            return\n', '\n', '        closing_agens = list(self._asyncgens)\n', '        self._asyncgens.clear()\n', '\n', '        results = await tasks.gather(\n', '            *[ag.aclose() for ag in closing_agens],\n', '            return_exceptions=True,\n', '            loop=self)\n', '\n', '        for result, agen in zip(results, closing_agens):\n', '            if isinstance(result, Exception):\n', '                self.call_exception_handler({\n', "                    'message': f'an error occurred during closing of '\n", "                               f'asynchronous generator {agen!r}',\n", "                    'exception': result,\n", "                    'asyncgen': agen\n", '                })\n', '\n', '    async def shutdown_default_executor(self):\n', '        """Schedule the shutdown of the default executor."""\n', '        self._executor_shutdown_called = True\n', '        if self._default_executor is None:\n', '            return\n', '        future = self.create_future()\n', '        thread = threading.Thread(target=self._do_shutdown, args=(future,))\n', '        thread.start()\n', '        try:\n', '            await future\n', '        finally:\n', '            thread.join()\n', '\n', '    def _do_shutdown(self, future):\n', '        try:\n', '            self._default_executor.shutdown(wait=True)\n', '            self.call_soon_threadsafe(future.set_result, None)\n', '        except Exception as ex:\n', '            self.call_soon_threadsafe(future.set_exception, ex)\n', '\n', '    def _check_running(self):\n', '        if self.is_running():\n', "            raise RuntimeError('This event loop is already running')\n", '        if events._get_running_loop() is not None:\n', '            raise RuntimeError(\n', "                'Cannot run the event loop while another loop is running')\n", '\n', '    def run_forever(self):\n', '        """Run until stop() is called."""\n', '        self._check_closed()\n', '        self._check_running()\n', '        self._set_coroutine_origin_tracking(self._debug)\n', '        self._thread_id = threading.get_ident()\n', '\n', '        old_agen_hooks = sys.get_asyncgen_hooks()\n', '        sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,\n', '                               finalizer=self._asyncgen_finalizer_hook)\n', '        try:\n', '            events._set_running_loop(self)\n', '            while True:\n', '                self._run_once()\n', '                if self._stopping:\n', '                    break\n', '        finally:\n', '            self._stopping = False\n', '            self._thread_id = None\n', '            events._set_running_loop(None)\n', '            self._set_coroutine_origin_tracking(False)\n', '            sys.set_asyncgen_hooks(*old_agen_hooks)\n', '\n', '    def run_until_complete(self, future):\n', '        """Run until the Future is done.\n', '\n', '        If the argument is a coroutine, it is wrapped in a Task.\n', '\n', '        WARNING: It would be disastrous to call run_until_complete()\n', '        with the same coroutine twice -- it would wrap it in two\n', "        different Tasks and that can't be good.\n", '\n', "        Return the Future's result, or raise its exception.\n", '        """\n', '        self._check_closed()\n', '        self._check_running()\n', '\n', '        new_task = not futures.isfuture(future)\n', '        future = tasks.ensure_future(future, loop=self)\n', '        if new_task:\n', "            # An exception is raised if the future didn't complete, so there\n", '            # is no need to log the "destroy pending task" message\n', '            future._log_destroy_pending = False\n', '\n', '        future.add_done_callback(_run_until_complete_cb)\n', '        try:\n', '            self.run_forever()\n', '        except:\n', '            if new_task and future.done() and not future.cancelled():\n', '                # The coroutine raised a BaseException. Consume the exception\n', "                # to not log a warning, the caller doesn't have access to the\n", '                # local task.\n', '                future.exception()\n', '            raise\n', '        finally:\n', '            future.remove_done_callback(_run_until_complete_cb)\n', '        if not future.done():\n', "            raise RuntimeError('Event loop stopped before Future completed.')\n", '\n', '        return future.result()\n', '\n', '    def stop(self):\n', '        """Stop running the event loop.\n', '\n', '        Every callback already scheduled will still run.  This simply informs\n', '        run_forever to stop looping after a complete iteration.\n', '        """\n', '        self._stopping = True\n', '\n', '    def close(self):\n', '        """Close the event loop.\n', '\n', '        This clears the queues and shuts down the executor,\n', '        but does not wait for the executor to finish.\n', '\n', '        The event loop must not be running.\n', '        """\n', '        if self.is_running():\n', '            raise RuntimeError("Cannot close a running event loop")\n', '        if self._closed:\n', '            return\n', '        if self._debug:\n', '            logger.debug("Close %r", self)\n', '        self._closed = True\n', '        self._ready.clear()\n', '        self._scheduled.clear()\n', '        self._executor_shutdown_called = True\n', '        executor = self._default_executor\n', '        if executor is not None:\n', '            self._default_executor = None\n', '            executor.shutdown(wait=False)\n', '\n', '    def is_closed(self):\n', '        """Returns True if the event loop was closed."""\n', '        return self._closed\n', '\n', '    def __del__(self, _warn=warnings.warn):\n', '        if not self.is_closed():\n', '            _warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)\n', '            if not self.is_running():\n', '                self.close()\n', '\n', '    def is_running(self):\n', '        """Returns True if the event loop is running."""\n', '        return (self._thread_id is not None)\n', '\n', '    def time(self):\n', '        """Return the time according to the event loop\'s clock.\n', '\n', '        This is a float expressed in seconds since an epoch, but the\n', '        epoch, precision, accuracy and drift are unspecified and may\n', '        differ per event loop.\n', '        """\n', '        return time.monotonic()\n', '\n', '    def call_later(self, delay, callback, *args, context=None):\n', '        """Arrange for a callback to be called at a given time.\n', '\n', '        Return a Handle: an opaque object with a cancel() method that\n', '        can be used to cancel the call.\n', '\n', '        The delay can be an int or float, expressed in seconds.  It is\n', '        always relative to the current time.\n', '\n', '        Each callback will be called exactly once.  If two callbacks\n', '        are scheduled for exactly the same time, it undefined which\n', '        will be called first.\n', '\n', '        Any positional arguments after the callback will be passed to\n', '        the callback when it is called.\n', '        """\n', '        timer = self.call_at(self.time() + delay, callback, *args,\n', '                             context=context)\n', '        if timer._source_traceback:\n', '            del timer._source_traceback[-1]\n', '        return timer\n', '\n', '    def call_at(self, when, callback, *args, context=None):\n', '        """Like call_later(), but uses an absolute time.\n', '\n', "        Absolute time corresponds to the event loop's time() method.\n", '        """\n', '        self._check_closed()\n', '        if self._debug:\n', '            self._check_thread()\n', "            self._check_callback(callback, 'call_at')\n", '        timer = events.TimerHandle(when, callback, args, self, context)\n', '        if timer._source_traceback:\n', '            del timer._source_traceback[-1]\n', '        heapq.heappush(self._scheduled, timer)\n', '        timer._scheduled = True\n', '        return timer\n', '\n', '    def call_soon(self, callback, *args, context=None):\n', '        """Arrange for a callback to be called as soon as possible.\n', '\n', '        This operates as a FIFO queue: callbacks are called in the\n', '        order in which they are registered.  Each callback will be\n', '        called exactly once.\n', '\n', '        Any positional arguments after the callback will be passed to\n', '        the callback when it is called.\n', '        """\n', '        self._check_closed()\n', '        if self._debug:\n', '            self._check_thread()\n', "            self._check_callback(callback, 'call_soon')\n", '        handle = self._call_soon(callback, args, context)\n', '        if handle._source_traceback:\n', '            del handle._source_traceback[-1]\n', '        return handle\n', '\n', '    def _check_callback(self, callback, method):\n', '        if (coroutines.iscoroutine(callback) or\n', '                coroutines.iscoroutinefunction(callback)):\n', '            raise TypeError(\n', '                f"coroutines cannot be used with {method}()")\n', '        if not callable(callback):\n', '            raise TypeError(\n', "                f'a callable object was expected by {method}(), '\n", "                f'got {callback!r}')\n", '\n', '    def _call_soon(self, callback, args, context):\n', '        handle = events.Handle(callback, args, self, context)\n', '        if handle._source_traceback:\n', '            del handle._source_traceback[-1]\n', '        self._ready.append(handle)\n', '        return handle\n', '\n', '    def _check_thread(self):\n', '        """Check that the current thread is the thread running the event loop.\n', '\n', '        Non-thread-safe methods of this class make this assumption and will\n', '        likely behave incorrectly when the assumption is violated.\n', '\n', '        Should only be called when (self._debug == True).  The caller is\n', '        responsible for checking this condition for performance reasons.\n', '        """\n', '        if self._thread_id is None:\n', '            return\n', '        thread_id = threading.get_ident()\n', '        if thread_id != self._thread_id:\n', '            raise RuntimeError(\n', '                "Non-thread-safe operation invoked on an event loop other "\n', '                "than the current one")\n', '\n', '    def call_soon_threadsafe(self, callback, *args, context=None):\n', '        """Like call_soon(), but thread-safe."""\n', '        self._check_closed()\n', '        if self._debug:\n', "            self._check_callback(callback, 'call_soon_threadsafe')\n", '        handle = self._call_soon(callback, args, context)\n', '        if handle._source_traceback:\n', '            del handle._source_traceback[-1]\n', '        self._write_to_self()\n', '        return handle\n', '\n', '    def run_in_executor(self, executor, func, *args):\n', '        self._check_closed()\n', '        if self._debug:\n', "            self._check_callback(func, 'run_in_executor')\n", '        if executor is None:\n', '            executor = self._default_executor\n', '            # Only check when the default executor is being used\n', '            self._check_default_executor()\n', '            if executor is None:\n', '                executor = concurrent.futures.ThreadPoolExecutor(\n', "                    thread_name_prefix='asyncio'\n", '                )\n', '                self._default_executor = executor\n', '        return futures.wrap_future(\n', '            executor.submit(func, *args), loop=self)\n', '\n', '    def set_default_executor(self, executor):\n', '        if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):\n', '            warnings.warn(\n', "                'Using the default executor that is not an instance of '\n", "                'ThreadPoolExecutor is deprecated and will be prohibited '\n", "                'in Python 3.9',\n", '                DeprecationWarning, 2)\n', '        self._default_executor = executor\n', '\n', '    def _getaddrinfo_debug(self, host, port, family, type, proto, flags):\n', '        msg = [f"{host}:{port!r}"]\n', '        if family:\n', "            msg.append(f'family={family!r}')\n", '        if type:\n', "            msg.append(f'type={type!r}')\n", '        if proto:\n', "            msg.append(f'proto={proto!r}')\n", '        if flags:\n', "            msg.append(f'flags={flags!r}')\n", "        msg = ', '.join(msg)\n", "        logger.debug('Get address info %s', msg)\n", '\n', '        t0 = self.time()\n', '        addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)\n', '        dt = self.time() - t0\n', '\n', "        msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'\n", '        if dt >= self.slow_callback_duration:\n', '            logger.info(msg)\n', '        else:\n', '            logger.debug(msg)\n', '        return addrinfo\n', '\n', '    async def getaddrinfo(self, host, port, *,\n', '                          family=0, type=0, proto=0, flags=0):\n', '        if self._debug:\n', '            getaddr_func = self._getaddrinfo_debug\n', '        else:\n', '            getaddr_func = socket.getaddrinfo\n', '\n', '        return await self.run_in_executor(\n', '            None, getaddr_func, host, port, family, type, proto, flags)\n', '\n', '    async def getnameinfo(self, sockaddr, flags=0):\n', '        return await self.run_in_executor(\n', '            None, socket.getnameinfo, sockaddr, flags)\n', '\n', '    async def sock_sendfile(self, sock, file, offset=0, count=None,\n', '                            *, fallback=True):\n', '        if self._debug and sock.gettimeout() != 0:\n', '            raise ValueError("the socket must be non-blocking")\n', '        self._check_sendfile_params(sock, file, offset, count)\n', '        try:\n', '            return await self._sock_sendfile_native(sock, file,\n', '                                                    offset, count)\n', '        except exceptions.SendfileNotAvailableError as exc:\n', '            if not fallback:\n', '                raise\n', '        return await self._sock_sendfile_fallback(sock, file,\n', '                                                  offset, count)\n', '\n', '    async def _sock_sendfile_native(self, sock, file, offset, count):\n', '        # NB: sendfile syscall is not supported for SSL sockets and\n', '        # non-mmap files even if sendfile is supported by OS\n', '        raise exceptions.SendfileNotAvailableError(\n', '            f"syscall sendfile is not available for socket {sock!r} "\n', '            "and file {file!r} combination")\n', '\n', '    async def _sock_sendfile_fallback(self, sock, file, offset, count):\n', '        if offset:\n', '            file.seek(offset)\n', '        blocksize = (\n', '            min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)\n', '            if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE\n', '        )\n', '        buf = bytearray(blocksize)\n', '        total_sent = 0\n', '        try:\n', '            while True:\n', '                if count:\n', '                    blocksize = min(count - total_sent, blocksize)\n', '                    if blocksize <= 0:\n', '                        break\n', '                view = memoryview(buf)[:blocksize]\n', '                read = await self.run_in_executor(None, file.readinto, view)\n', '                if not read:\n', '                    break  # EOF\n', '                await self.sock_sendall(sock, view[:read])\n', '                total_sent += read\n', '            return total_sent\n', '        finally:\n', "            if total_sent > 0 and hasattr(file, 'seek'):\n", '                file.seek(offset + total_sent)\n', '\n', '    def _check_sendfile_params(self, sock, file, offset, count):\n', "        if 'b' not in getattr(file, 'mode', 'b'):\n", '            raise ValueError("file should be opened in binary mode")\n', '        if not sock.type == socket.SOCK_STREAM:\n', '            raise ValueError("only SOCK_STREAM type sockets are supported")\n', '        if count is not None:\n', '            if not isinstance(count, int):\n', '                raise TypeError(\n', '                    "count must be a positive integer (got {!r})".format(count))\n', '            if count <= 0:\n', '                raise ValueError(\n', '                    "count must be a positive integer (got {!r})".format(count))\n', '        if not isinstance(offset, int):\n', '            raise TypeError(\n', '                "offset must be a non-negative integer (got {!r})".format(\n', '                    offset))\n', '        if offset < 0:\n', '            raise ValueError(\n', '                "offset must be a non-negative integer (got {!r})".format(\n', '                    offset))\n', '\n', '    async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):\n', '        """Create, bind and connect one socket."""\n', '        my_exceptions = []\n', '        exceptions.append(my_exceptions)\n', '        family, type_, proto, _, address = addr_info\n', '        sock = None\n', '        try:\n', '            sock = socket.socket(family=family, type=type_, proto=proto)\n', '            sock.setblocking(False)\n', '            if local_addr_infos is not None:\n', '                for _, _, _, _, laddr in local_addr_infos:\n', '                    try:\n', '                        sock.bind(laddr)\n', '                        break\n', '                    except OSError as exc:\n', '                        msg = (\n', "                            f'error while attempting to bind on '\n", "                            f'address {laddr!r}: '\n", "                            f'{exc.strerror.lower()}'\n", '                        )\n', '                        exc = OSError(exc.errno, msg)\n', '                        my_exceptions.append(exc)\n', '                else:  # all bind attempts failed\n', '                    raise my_exceptions.pop()\n', '            await self.sock_connect(sock, address)\n', '            return sock\n', '        except OSError as exc:\n', '            my_exceptions.append(exc)\n', '            if sock is not None:\n', '                sock.close()\n', '            raise\n', '        except:\n', '            if sock is not None:\n', '                sock.close()\n', '            raise\n', '\n', '    async def create_connection(\n', '            self, protocol_factory, host=None, port=None,\n', '            *, ssl=None, family=0,\n', '            proto=0, flags=0, sock=None,\n', '            local_addr=None, server_hostname=None,\n', '            ssl_handshake_timeout=None,\n', '            happy_eyeballs_delay=None, interleave=None):\n', '        """Connect to a TCP server.\n', '\n', '        Create a streaming transport connection to a given Internet host and\n', '        port: socket family AF_INET or socket.AF_INET6 depending on host (or\n', '        family if specified), socket type SOCK_STREAM. protocol_factory must be\n', '        a callable returning a protocol instance.\n', '\n', '        This method is a coroutine which will try to establish the connection\n', '        in the background.  When successful, the coroutine returns a\n', '        (transport, protocol) pair.\n', '        """\n', '        if server_hostname is not None and not ssl:\n', "            raise ValueError('server_hostname is only meaningful with ssl')\n", '\n', '        if server_hostname is None and ssl:\n', '            # Use host as default for server_hostname.  It is an error\n', '            # if host is empty or not set, e.g. when an\n', '            # already-connected socket was passed or when only a port\n', '            # is given.  To avoid this error, you can pass\n', "            # server_hostname='' -- this will bypass the hostname\n", '            # check.  (This also means that if host is a numeric\n', '            # IP/IPv6 address, we will attempt to verify that exact\n', '            # address; this will probably fail, but it is possible to\n', '            # create a certificate for a specific IP address, so we\n', "            # don't judge it here.)\n", '            if not host:\n', "                raise ValueError('You must set server_hostname '\n", "                                 'when using ssl without a host')\n", '            server_hostname = host\n', '\n', '        if ssl_handshake_timeout is not None and not ssl:\n', '            raise ValueError(\n', "                'ssl_handshake_timeout is only meaningful with ssl')\n", '\n', '        if happy_eyeballs_delay is not None and interleave is None:\n', '            # If using happy eyeballs, default to interleave addresses by family\n', '            interleave = 1\n', '\n', '        if host is not None or port is not None:\n', '            if sock is not None:\n', '                raise ValueError(\n', "                    'host/port and sock can not be specified at the same time')\n", '\n', '            infos = await self._ensure_resolved(\n', '                (host, port), family=family,\n', '                type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)\n', '            if not infos:\n', "                raise OSError('getaddrinfo() returned empty list')\n", '\n', '            if local_addr is not None:\n', '                laddr_infos = await self._ensure_resolved(\n', '                    local_addr, family=family,\n', '                    type=socket.SOCK_STREAM, proto=proto,\n', '                    flags=flags, loop=self)\n', '                if not laddr_infos:\n', "                    raise OSError('getaddrinfo() returned empty list')\n", '            else:\n', '                laddr_infos = None\n', '\n', '            if interleave:\n', '                infos = _interleave_addrinfos(infos, interleave)\n', '\n', '            exceptions = []\n', '            if happy_eyeballs_delay is None:\n', '                # not using happy eyeballs\n', '                for addrinfo in infos:\n', '                    try:\n', '                        sock = await self._connect_sock(\n', '                            exceptions, addrinfo, laddr_infos)\n', '                        break\n', '                    except OSError:\n', '                        continue\n', '            else:  # using happy eyeballs\n', '                sock, _, _ = await staggered.staggered_race(\n', '                    (functools.partial(self._connect_sock,\n', '                                       exceptions, addrinfo, laddr_infos)\n', '                     for addrinfo in infos),\n', '                    happy_eyeballs_delay, loop=self)\n', '\n', '            if sock is None:\n', '                exceptions = [exc for sub in exceptions for exc in sub]\n', '                if len(exceptions) == 1:\n', '                    raise exceptions[0]\n', '                else:\n', '                    # If they all have the same str(), raise one.\n', '                    model = str(exceptions[0])\n', '                    if all(str(exc) == model for exc in exceptions):\n', '                        raise exceptions[0]\n', '                    # Raise a combined exception so the user can see all\n', '                    # the various error messages.\n', "                    raise OSError('Multiple exceptions: {}'.format(\n", "                        ', '.join(str(exc) for exc in exceptions)))\n", '\n', '        else:\n', '            if sock is None:\n', '                raise ValueError(\n', "                    'host and port was not specified and no sock specified')\n", '            if sock.type != socket.SOCK_STREAM:\n', '                # We allow AF_INET, AF_INET6, AF_UNIX as long as they\n', '                # are SOCK_STREAM.\n', '                # We support passing AF_UNIX sockets even though we have\n', '                # a dedicated API for that: create_unix_connection.\n', '                # Disallowing AF_UNIX in this method, breaks backwards\n', '                # compatibility.\n', '                raise ValueError(\n', "                    f'A Stream Socket was expected, got {sock!r}')\n", '\n', '        transport, protocol = await self._create_connection_transport(\n', '            sock, protocol_factory, ssl, server_hostname,\n', '            ssl_handshake_timeout=ssl_handshake_timeout)\n', '        if self._debug:\n', '            # Get the socket from the transport because SSL transport closes\n', '            # the old socket and creates a new SSL socket\n', "            sock = transport.get_extra_info('socket')\n", '            logger.debug("%r connected to %s:%r: (%r, %r)",\n', '                         sock, host, port, transport, protocol)\n', '        return transport, protocol\n', '\n', '    async def _create_connection_transport(\n', '            self, sock, protocol_factory, ssl,\n', '            server_hostname, server_side=False,\n', '            ssl_handshake_timeout=None):\n', '\n', '        sock.setblocking(False)\n', '\n', '        protocol = protocol_factory()\n', '        waiter = self.create_future()\n', '        if ssl:\n', '            sslcontext = None if isinstance(ssl, bool) else ssl\n', '            transport = self._make_ssl_transport(\n', '                sock, protocol, sslcontext, waiter,\n', '                server_side=server_side, server_hostname=server_hostname,\n', '                ssl_handshake_timeout=ssl_handshake_timeout)\n', '        else:\n', '            transport = self._make_socket_transport(sock, protocol, waiter)\n', '\n', '        try:\n', '            await waiter\n', '        except:\n', '            transport.close()\n', '            raise\n', '\n', '        return transport, protocol\n', '\n', '    async def sendfile(self, transport, file, offset=0, count=None,\n', '                       *, fallback=True):\n', '        """Send a file to transport.\n', '\n', '        Return the total number of bytes which were sent.\n', '\n', '        The method uses high-performance os.sendfile if available.\n', '\n', '        file must be a regular file object opened in binary mode.\n', '\n', '        offset tells from where to start reading the file. If specified,\n', '        count is the total number of bytes to transmit as opposed to\n', '        sending the file until EOF is reached. File position is updated on\n', '        return or also in case of error in which case file.tell()\n', '        can be used to figure out the number of bytes\n', '        which were sent.\n', '\n', '        fallback set to True makes asyncio to manually read and send\n', '        the file when the platform does not support the sendfile syscall\n', '        (e.g. Windows or SSL socket on Unix).\n', '\n', '        Raise SendfileNotAvailableError if the system does not support\n', '        sendfile syscall and fallback is False.\n', '        """\n', '        if transport.is_closing():\n', '            raise RuntimeError("Transport is closing")\n', "        mode = getattr(transport, '_sendfile_compatible',\n", '                       constants._SendfileMode.UNSUPPORTED)\n', '        if mode is constants._SendfileMode.UNSUPPORTED:\n', '            raise RuntimeError(\n', '                f"sendfile is not supported for transport {transport!r}")\n', '        if mode is constants._SendfileMode.TRY_NATIVE:\n', '            try:\n', '                return await self._sendfile_native(transport, file,\n', '                                                   offset, count)\n', '            except exceptions.SendfileNotAvailableError as exc:\n', '                if not fallback:\n', '                    raise\n', '\n', '        if not fallback:\n', '            raise RuntimeError(\n', '                f"fallback is disabled and native sendfile is not "\n', '                f"supported for transport {transport!r}")\n', '\n', '        return await self._sendfile_fallback(transport, file,\n', '                                             offset, count)\n', '\n', '    async def _sendfile_native(self, transp, file, offset, count):\n', '        raise exceptions.SendfileNotAvailableError(\n', '            "sendfile syscall is not supported")\n', '\n', '    async def _sendfile_fallback(self, transp, file, offset, count):\n', '        if offset:\n', '            file.seek(offset)\n', '        blocksize = min(count, 16384) if count else 16384\n', '        buf = bytearray(blocksize)\n', '        total_sent = 0\n', '        proto = _SendfileFallbackProtocol(transp)\n', '        try:\n', '            while True:\n', '                if count:\n', '                    blocksize = min(count - total_sent, blocksize)\n', '                    if blocksize <= 0:\n', '                        return total_sent\n', '                view = memoryview(buf)[:blocksize]\n', '                read = await self.run_in_executor(None, file.readinto, view)\n', '                if not read:\n', '                    return total_sent  # EOF\n', '                await proto.drain()\n', '                transp.write(view[:read])\n', '                total_sent += read\n', '        finally:\n', "            if total_sent > 0 and hasattr(file, 'seek'):\n", '                file.seek(offset + total_sent)\n', '            await proto.restore()\n', '\n', '    async def start_tls(self, transport, protocol, sslcontext, *,\n', '                        server_side=False,\n', '                        server_hostname=None,\n', '                        ssl_handshake_timeout=None):\n', '        """Upgrade transport to TLS.\n', '\n', '        Return a new transport that *protocol* should start using\n', '        immediately.\n', '        """\n', '        if ssl is None:\n', "            raise RuntimeError('Python ssl module is not available')\n", '\n', '        if not isinstance(sslcontext, ssl.SSLContext):\n', '            raise TypeError(\n', "                f'sslcontext is expected to be an instance of ssl.SSLContext, '\n", "                f'got {sslcontext!r}')\n", '\n', "        if not getattr(transport, '_start_tls_compatible', False):\n", '            raise TypeError(\n', "                f'transport {transport!r} is not supported by start_tls()')\n", '\n', '        waiter = self.create_future()\n', '        ssl_protocol = sslproto.SSLProtocol(\n', '            self, protocol, sslcontext, waiter,\n', '            server_side, server_hostname,\n', '            ssl_handshake_timeout=ssl_handshake_timeout,\n', '            call_connection_made=False)\n', '\n', '        # Pause early so that "ssl_protocol.data_received()" doesn\'t\n', '        # have a chance to get called before "ssl_protocol.connection_made()".\n', '        transport.pause_reading()\n', '\n', '        transport.set_protocol(ssl_protocol)\n', '        conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)\n', '        resume_cb = self.call_soon(transport.resume_reading)\n', '\n', '        try:\n', '            await waiter\n', '        except BaseException:\n', '            transport.close()\n', '            conmade_cb.cancel()\n', '            resume_cb.cancel()\n', '            raise\n', '\n', '        return ssl_protocol._app_transport\n', '\n', '    async def create_datagram_endpoint(self, protocol_factory,\n', '                                       local_addr=None, remote_addr=None, *,\n', '                                       family=0, proto=0, flags=0,\n', '                                       reuse_address=_unset, reuse_port=None,\n', '                                       allow_broadcast=None, sock=None):\n', '        """Create datagram connection."""\n', '        if sock is not None:\n', '            if sock.type != socket.SOCK_DGRAM:\n', '                raise ValueError(\n', "                    f'A UDP Socket was expected, got {sock!r}')\n", '            if (local_addr or remote_addr or\n', '                    family or proto or flags or\n', '                    reuse_port or allow_broadcast):\n', '                # show the problematic kwargs in exception msg\n', '                opts = dict(local_addr=local_addr, remote_addr=remote_addr,\n', '                            family=family, proto=proto, flags=flags,\n', '                            reuse_address=reuse_address, reuse_port=reuse_port,\n', '                            allow_broadcast=allow_broadcast)\n', "                problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)\n", '                raise ValueError(\n', "                    f'socket modifier keyword arguments can not be used '\n", "                    f'when sock is specified. ({problems})')\n", '            sock.setblocking(False)\n', '            r_addr = None\n', '        else:\n', '            if not (local_addr or remote_addr):\n', '                if family == 0:\n', "                    raise ValueError('unexpected address family')\n", '                addr_pairs_info = (((family, proto), (None, None)),)\n', "            elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:\n", '                for addr in (local_addr, remote_addr):\n', '                    if addr is not None and not isinstance(addr, str):\n', "                        raise TypeError('string is expected')\n", '\n', "                if local_addr and local_addr[0] not in (0, '\\x00'):\n", '                    try:\n', '                        if stat.S_ISSOCK(os.stat(local_addr).st_mode):\n', '                            os.remove(local_addr)\n', '                    except FileNotFoundError:\n', '                        pass\n', '                    except OSError as err:\n', '                        # Directory may have permissions only to create socket.\n', "                        logger.error('Unable to check or remove stale UNIX '\n", "                                     'socket %r: %r',\n", '                                     local_addr, err)\n', '\n', '                addr_pairs_info = (((family, proto),\n', '                                    (local_addr, remote_addr)), )\n', '            else:\n', '                # join address by (family, protocol)\n', '                addr_infos = {}  # Using order preserving dict\n', '                for idx, addr in ((0, local_addr), (1, remote_addr)):\n', '                    if addr is not None:\n', '                        assert isinstance(addr, tuple) and len(addr) == 2, (\n', "                            '2-tuple is expected')\n", '\n', '                        infos = await self._ensure_resolved(\n', '                            addr, family=family, type=socket.SOCK_DGRAM,\n', '                            proto=proto, flags=flags, loop=self)\n', '                        if not infos:\n', "                            raise OSError('getaddrinfo() returned empty list')\n", '\n', '                        for fam, _, pro, _, address in infos:\n', '                            key = (fam, pro)\n', '                            if key not in addr_infos:\n', '                                addr_infos[key] = [None, None]\n', '                            addr_infos[key][idx] = address\n', '\n', '                # each addr has to have info for each (family, proto) pair\n', '                addr_pairs_info = [\n', '                    (key, addr_pair) for key, addr_pair in addr_infos.items()\n', '                    if not ((local_addr and addr_pair[0] is None) or\n', '                            (remote_addr and addr_pair[1] is None))]\n', '\n', '                if not addr_pairs_info:\n', "                    raise ValueError('can not get address information')\n", '\n', '            exceptions = []\n', '\n', '            # bpo-37228\n', '            if reuse_address is not _unset:\n', '                if reuse_address:\n', '                    raise ValueError("Passing `reuse_address=True` is no "\n', '                                     "longer supported, as the usage of "\n', '                                     "SO_REUSEPORT in UDP poses a significant "\n', '                                     "security concern.")\n', '                else:\n', '                    warnings.warn("The *reuse_address* parameter has been "\n', '                                  "deprecated as of 3.5.10 and is scheduled "\n', '                                  "for removal in 3.11.", DeprecationWarning,\n', '                                  stacklevel=2)\n', '\n', '            for ((family, proto),\n', '                 (local_address, remote_address)) in addr_pairs_info:\n', '                sock = None\n', '                r_addr = None\n', '                try:\n', '                    sock = socket.socket(\n', '                        family=family, type=socket.SOCK_DGRAM, proto=proto)\n', '                    if reuse_port:\n', '                        _set_reuseport(sock)\n', '                    if allow_broadcast:\n', '                        sock.setsockopt(\n', '                            socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n', '                    sock.setblocking(False)\n', '\n', '                    if local_addr:\n', '                        sock.bind(local_address)\n', '                    if remote_addr:\n', '                        if not allow_broadcast:\n', '                            await self.sock_connect(sock, remote_address)\n', '                        r_addr = remote_address\n', '                except OSError as exc:\n', '                    if sock is not None:\n', '                        sock.close()\n', '                    exceptions.append(exc)\n', '                except:\n', '                    if sock is not None:\n', '                        sock.close()\n', '                    raise\n', '                else:\n', '                    break\n', '            else:\n', '                raise exceptions[0]\n', '\n', '        protocol = protocol_factory()\n', '        waiter = self.create_future()\n', '        transport = self._make_datagram_transport(\n', '            sock, protocol, r_addr, waiter)\n', '        if self._debug:\n', '            if local_addr:\n', '                logger.info("Datagram endpoint local_addr=%r remote_addr=%r "\n', '                            "created: (%r, %r)",\n', '                            local_addr, remote_addr, transport, protocol)\n', '            else:\n', '                logger.debug("Datagram endpoint remote_addr=%r created: "\n', '                             "(%r, %r)",\n', '                             remote_addr, transport, protocol)\n', '\n', '        try:\n', '            await waiter\n', '        except:\n', '            transport.close()\n', '            raise\n', '\n', '        return transport, protocol\n', '\n', '    async def _ensure_resolved(self, address, *,\n', '                               family=0, type=socket.SOCK_STREAM,\n', '                               proto=0, flags=0, loop):\n', '        host, port = address[:2]\n', '        info = _ipaddr_info(host, port, family, type, proto, *address[2:])\n', '        if info is not None:\n', '            # "host" is already a resolved IP.\n', '            return [info]\n', '        else:\n', '            return await loop.getaddrinfo(host, port, family=family, type=type,\n', '                                          proto=proto, flags=flags)\n', '\n', '    async def _create_server_getaddrinfo(self, host, port, family, flags):\n', '        infos = await self._ensure_resolved((host, port), family=family,\n', '                                            type=socket.SOCK_STREAM,\n', '                                            flags=flags, loop=self)\n', '        if not infos:\n', "            raise OSError(f'getaddrinfo({host!r}) returned empty list')\n", '        return infos\n', '\n', '    async def create_server(\n', '            self, protocol_factory, host=None, port=None,\n', '            *,\n', '            family=socket.AF_UNSPEC,\n', '            flags=socket.AI_PASSIVE,\n', '            sock=None,\n', '            backlog=100,\n', '            ssl=None,\n', '            reuse_address=None,\n', '            reuse_port=None,\n', '            ssl_handshake_timeout=None,\n', '            start_serving=True):\n', '        """Create a TCP server.\n', '\n', '        The host parameter can be a string, in that case the TCP server is\n', '        bound to host and port.\n', '\n', '        The host parameter can also be a sequence of strings and in that case\n', '        the TCP server is bound to all hosts of the sequence. If a host\n', '        appears multiple times (possibly indirectly e.g. when hostnames\n', '        resolve to the same IP address), the server is only bound once to that\n', '        host.\n', '\n', '        Return a Server object which can be used to stop the service.\n', '\n', '        This method is a coroutine.\n', '        """\n', '        if isinstance(ssl, bool):\n', "            raise TypeError('ssl argument must be an SSLContext or None')\n", '\n', '        if ssl_handshake_timeout is not None and ssl is None:\n', '            raise ValueError(\n', "                'ssl_handshake_timeout is only meaningful with ssl')\n", '\n', '        if host is not None or port is not None:\n', '            if sock is not None:\n', '                raise ValueError(\n', "                    'host/port and sock can not be specified at the same time')\n", '\n', '            if reuse_address is None:\n', "                reuse_address = os.name == 'posix' and sys.platform != 'cygwin'\n", '            sockets = []\n', "            if host == '':\n", '                hosts = [None]\n', '            elif (isinstance(host, str) or\n', '                  not isinstance(host, collections.abc.Iterable)):\n', '                hosts = [host]\n', '            else:\n', '                hosts = host\n', '\n', '            fs = [self._create_server_getaddrinfo(host, port, family=family,\n', '                                                  flags=flags)\n', '                  for host in hosts]\n', '            infos = await tasks.gather(*fs, loop=self)\n', '            infos = set(itertools.chain.from_iterable(infos))\n', '\n', '            completed = False\n', '            try:\n', '                for res in infos:\n', '                    af, socktype, proto, canonname, sa = res\n', '                    try:\n', '                        sock = socket.socket(af, socktype, proto)\n', '                    except socket.error:\n', "                        # Assume it's a bad family/type/protocol combination.\n", '                        if self._debug:\n', "                            logger.warning('create_server() failed to create '\n", "                                           'socket.socket(%r, %r, %r)',\n", '                                           af, socktype, proto, exc_info=True)\n', '                        continue\n', '                    sockets.append(sock)\n', '                    if reuse_address:\n', '                        sock.setsockopt(\n', '                            socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n', '                    if reuse_port:\n', '                        _set_reuseport(sock)\n', '                    # Disable IPv4/IPv6 dual stack support (enabled by\n', '                    # default on Linux) which makes a single socket\n', '                    # listen on both address families.\n', '                    if (_HAS_IPv6 and\n', '                            af == socket.AF_INET6 and\n', "                            hasattr(socket, 'IPPROTO_IPV6')):\n", '                        sock.setsockopt(socket.IPPROTO_IPV6,\n', '                                        socket.IPV6_V6ONLY,\n', '                                        True)\n', '                    try:\n', '                        sock.bind(sa)\n', '                    except OSError as err:\n', "                        raise OSError(err.errno, 'error while attempting '\n", "                                      'to bind on address %r: %s'\n", '                                      % (sa, err.strerror.lower())) from None\n', '                completed = True\n', '            finally:\n', '                if not completed:\n', '                    for sock in sockets:\n', '                        sock.close()\n', '        else:\n', '            if sock is None:\n', "                raise ValueError('Neither host/port nor sock were specified')\n", '            if sock.type != socket.SOCK_STREAM:\n', "                raise ValueError(f'A Stream Socket was expected, got {sock!r}')\n", '            sockets = [sock]\n', '\n', '        for sock in sockets:\n', '            sock.setblocking(False)\n', '\n', '        server = Server(self, sockets, protocol_factory,\n', '                        ssl, backlog, ssl_handshake_timeout)\n', '        if start_serving:\n', '            server._start_serving()\n', "            # Skip one loop iteration so that all 'loop.add_reader'\n", '            # go through.\n', '            await tasks.sleep(0, loop=self)\n', '\n', '        if self._debug:\n', '            logger.info("%r is serving", server)\n', '        return server\n', '\n', '    async def connect_accepted_socket(\n', '            self, protocol_factory, sock,\n', '            *, ssl=None,\n', '            ssl_handshake_timeout=None):\n', '        """Handle an accepted connection.\n', '\n', '        This is used by servers that accept connections outside of\n', '        asyncio but that use asyncio to handle connections.\n', '\n', '        This method is a coroutine.  When completed, the coroutine\n', '        returns a (transport, protocol) pair.\n', '        """\n', '        if sock.type != socket.SOCK_STREAM:\n', "            raise ValueError(f'A Stream Socket was expected, got {sock!r}')\n", '\n', '        if ssl_handshake_timeout is not None and not ssl:\n', '            raise ValueError(\n', "                'ssl_handshake_timeout is only meaningful with ssl')\n", '\n', '        transport, protocol = await self._create_connection_transport(\n', "            sock, protocol_factory, ssl, '', server_side=True,\n", '            ssl_handshake_timeout=ssl_handshake_timeout)\n', '        if self._debug:\n', '            # Get the socket from the transport because SSL transport closes\n', '            # the old socket and creates a new SSL socket\n', "            sock = transport.get_extra_info('socket')\n", '            logger.debug("%r handled: (%r, %r)", sock, transport, protocol)\n', '        return transport, protocol\n', '\n', '    async def connect_read_pipe(self, protocol_factory, pipe):\n', '        protocol = protocol_factory()\n', '        waiter = self.create_future()\n', '        transport = self._make_read_pipe_transport(pipe, protocol, waiter)\n', '\n', '        try:\n', '            await waiter\n', '        except:\n', '            transport.close()\n', '            raise\n', '\n', '        if self._debug:\n', "            logger.debug('Read pipe %r connected: (%r, %r)',\n", '                         pipe.fileno(), transport, protocol)\n', '        return transport, protocol\n', '\n', '    async def connect_write_pipe(self, protocol_factory, pipe):\n', '        protocol = protocol_factory()\n', '        waiter = self.create_future()\n', '        transport = self._make_write_pipe_transport(pipe, protocol, waiter)\n', '\n', '        try:\n', '            await waiter\n', '        except:\n', '            transport.close()\n', '            raise\n', '\n', '        if self._debug:\n', "            logger.debug('Write pipe %r connected: (%r, %r)',\n", '                         pipe.fileno(), transport, protocol)\n', '        return transport, protocol\n', '\n', '    def _log_subprocess(self, msg, stdin, stdout, stderr):\n', '        info = [msg]\n', '        if stdin is not None:\n', "            info.append(f'stdin={_format_pipe(stdin)}')\n", '        if stdout is not None and stderr == subprocess.STDOUT:\n', "            info.append(f'stdout=stderr={_format_pipe(stdout)}')\n", '        else:\n', '            if stdout is not None:\n', "                info.append(f'stdout={_format_pipe(stdout)}')\n", '            if stderr is not None:\n', "                info.append(f'stderr={_format_pipe(stderr)}')\n", "        logger.debug(' '.join(info))\n", '\n', '    async def subprocess_shell(self, protocol_factory, cmd, *,\n', '                               stdin=subprocess.PIPE,\n', '                               stdout=subprocess.PIPE,\n', '                               stderr=subprocess.PIPE,\n', '                               universal_newlines=False,\n', '                               shell=True, bufsize=0,\n', '                               encoding=None, errors=None, text=None,\n', '                               **kwargs):\n', '        if not isinstance(cmd, (bytes, str)):\n', '            raise ValueError("cmd must be a string")\n', '        if universal_newlines:\n', '            raise ValueError("universal_newlines must be False")\n', '        if not shell:\n', '            raise ValueError("shell must be True")\n', '        if bufsize != 0:\n', '            raise ValueError("bufsize must be 0")\n', '        if text:\n', '            raise ValueError("text must be False")\n', '        if encoding is not None:\n', '            raise ValueError("encoding must be None")\n', '        if errors is not None:\n', '            raise ValueError("errors must be None")\n', '\n', '        protocol = protocol_factory()\n', '        debug_log = None\n', '        if self._debug:\n', "            # don't log parameters: they may contain sensitive information\n", '            # (password) and may be too long\n', "            debug_log = 'run shell command %r' % cmd\n", '            self._log_subprocess(debug_log, stdin, stdout, stderr)\n', '        transport = await self._make_subprocess_transport(\n', '            protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)\n', '        if self._debug and debug_log is not None:\n', "            logger.info('%s: %r', debug_log, transport)\n", '        return transport, protocol\n', '\n', '    async def subprocess_exec(self, protocol_factory, program, *args,\n', '                              stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n', '                              stderr=subprocess.PIPE, universal_newlines=False,\n', '                              shell=False, bufsize=0,\n', '                              encoding=None, errors=None, text=None,\n', '                              **kwargs):\n', '        if universal_newlines:\n', '            raise ValueError("universal_newlines must be False")\n', '        if shell:\n', '            raise ValueError("shell must be False")\n', '        if bufsize != 0:\n', '            raise ValueError("bufsize must be 0")\n', '        if text:\n', '            raise ValueError("text must be False")\n', '        if encoding is not None:\n', '            raise ValueError("encoding must be None")\n', '        if errors is not None:\n', '            raise ValueError("errors must be None")\n', '\n', '        popen_args = (program,) + args\n', '        protocol = protocol_factory()\n', '        debug_log = None\n', '        if self._debug:\n', "            # don't log parameters: they may contain sensitive information\n", '            # (password) and may be too long\n', "            debug_log = f'execute program {program!r}'\n", '            self._log_subprocess(debug_log, stdin, stdout, stderr)\n', '        transport = await self._make_subprocess_transport(\n', '            protocol, popen_args, False, stdin, stdout, stderr,\n', '            bufsize, **kwargs)\n', '        if self._debug and debug_log is not None:\n', "            logger.info('%s: %r', debug_log, transport)\n", '        return transport, protocol\n', '\n', '    def get_exception_handler(self):\n', '        """Return an exception handler, or None if the default one is in use.\n', '        """\n', '        return self._exception_handler\n', '\n', '    def set_exception_handler(self, handler):\n', '        """Set handler as the new event loop exception handler.\n', '\n', '        If handler is None, the default exception handler will\n', '        be set.\n', '\n', '        If handler is a callable object, it should have a\n', "        signature matching '(loop, context)', where 'loop'\n", "        will be a reference to the active event loop, 'context'\n", '        will be a dict object (see `call_exception_handler()`\n', '        documentation for details about context).\n', '        """\n', '        if handler is not None and not callable(handler):\n', "            raise TypeError(f'A callable object or None is expected, '\n", "                            f'got {handler!r}')\n", '        self._exception_handler = handler\n', '\n', '    def default_exception_handler(self, context):\n', '        """Default exception handler.\n', '\n', '        This is called when an exception occurs and no exception\n', '        handler is set, and can be called by a custom exception\n', '        handler that wants to defer to the default behavior.\n', '\n', '        This default handler logs the error message and other\n', '        context-dependent information.  In debug mode, a truncated\n', '        stack trace is also appended showing where the given object\n', '        (e.g. a handle or future or task) was created, if any.\n', '\n', '        The context parameter has the same meaning as in\n', '        `call_exception_handler()`.\n', '        """\n', "        message = context.get('message')\n", '        if not message:\n', "            message = 'Unhandled exception in event loop'\n", '\n', "        exception = context.get('exception')\n", '        if exception is not None:\n', '            exc_info = (type(exception), exception, exception.__traceback__)\n', '        else:\n', '            exc_info = False\n', '\n', "        if ('source_traceback' not in context and\n", '                self._current_handle is not None and\n', '                self._current_handle._source_traceback):\n', "            context['handle_traceback'] = \\\n", '                self._current_handle._source_traceback\n', '\n', '        log_lines = [message]\n', '        for key in sorted(context):\n', "            if key in {'message', 'exception'}:\n", '                continue\n', '            value = context[key]\n', "            if key == 'source_traceback':\n", "                tb = ''.join(traceback.format_list(value))\n", "                value = 'Object created at (most recent call last):\\n'\n", '                value += tb.rstrip()\n', "            elif key == 'handle_traceback':\n", "                tb = ''.join(traceback.format_list(value))\n", "                value = 'Handle created at (most recent call last):\\n'\n", '                value += tb.rstrip()\n', '            else:\n', '                value = repr(value)\n', "            log_lines.append(f'{key}: {value}')\n", '\n', "        logger.error('\\n'.join(log_lines), exc_info=exc_info)\n", '\n', '    def call_exception_handler(self, context):\n', '        """Call the current event loop\'s exception handler.\n', '\n', '        The context argument is a dict containing the following keys:\n', '\n', "        - 'message': Error message;\n", "        - 'exception' (optional): Exception object;\n", "        - 'future' (optional): Future instance;\n", "        - 'task' (optional): Task instance;\n", "        - 'handle' (optional): Handle instance;\n", "        - 'protocol' (optional): Protocol instance;\n", "        - 'transport' (optional): Transport instance;\n", "        - 'socket' (optional): Socket instance;\n", "        - 'asyncgen' (optional): Asynchronous generator that caused\n", '                                 the exception.\n', '\n', '        New keys maybe introduced in the future.\n', '\n', '        Note: do not overload this method in an event loop subclass.\n', '        For custom exception handling, use the\n', '        `set_exception_handler()` method.\n', '        """\n', '        if self._exception_handler is None:\n', '            try:\n', '                self.default_exception_handler(context)\n', '            except (SystemExit, KeyboardInterrupt):\n', '                raise\n', '            except BaseException:\n', '                # Second protection layer for unexpected errors\n', '                # in the default implementation, as well as for subclassed\n', '                # event loops with overloaded "default_exception_handler".\n', "                logger.error('Exception in default exception handler',\n", '                             exc_info=True)\n', '        else:\n', '            try:\n', '                self._exception_handler(self, context)\n', '            except (SystemExit, KeyboardInterrupt):\n', '                raise\n', '            except BaseException as exc:\n', '                # Exception in the user set custom exception handler.\n', '                try:\n', "                    # Let's try default handler.\n", '                    self.default_exception_handler({\n', "                        'message': 'Unhandled error in exception handler',\n", "                        'exception': exc,\n", "                        'context': context,\n", '                    })\n', '                except (SystemExit, KeyboardInterrupt):\n', '                    raise\n', '                except BaseException:\n', "                    # Guard 'default_exception_handler' in case it is\n", '                    # overloaded.\n', "                    logger.error('Exception in default exception handler '\n", "                                 'while handling an unexpected error '\n", "                                 'in custom exception handler',\n", '                                 exc_info=True)\n', '\n', '    def _add_callback(self, handle):\n', '        """Add a Handle to _scheduled (TimerHandle) or _ready."""\n', "        assert isinstance(handle, events.Handle), 'A Handle is required here'\n", '        if handle._cancelled:\n', '            return\n', '        assert not isinstance(handle, events.TimerHandle)\n', '        self._ready.append(handle)\n', '\n', '    def _add_callback_signalsafe(self, handle):\n', '        """Like _add_callback() but called from a signal handler."""\n', '        self._add_callback(handle)\n', '        self._write_to_self()\n', '\n', '    def _timer_handle_cancelled(self, handle):\n', '        """Notification that a TimerHandle has been cancelled."""\n', '        if handle._scheduled:\n', '            self._timer_cancelled_count += 1\n', '\n', '    def _run_once(self):\n', '        """Run one full iteration of the event loop.\n', '\n', '        This calls all currently ready callbacks, polls for I/O,\n', '        schedules the resulting callbacks, and finally schedules\n', "        'call_later' callbacks.\n", '        """\n', '\n', '        sched_count = len(self._scheduled)\n', '        if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and\n', '            self._timer_cancelled_count / sched_count >\n', '                _MIN_CANCELLED_TIMER_HANDLES_FRACTION):\n', '            # Remove delayed calls that were cancelled if their number\n', '            # is too high\n', '            new_scheduled = []\n', '            for handle in self._scheduled:\n', '                if handle._cancelled:\n', '                    handle._scheduled = False\n', '                else:\n', '                    new_scheduled.append(handle)\n', '\n', '            heapq.heapify(new_scheduled)\n', '            self._scheduled = new_scheduled\n', '            self._timer_cancelled_count = 0\n', '        else:\n', '            # Remove delayed calls that were cancelled from head of queue.\n', '            while self._scheduled and self._scheduled[0]._cancelled:\n', '                self._timer_cancelled_count -= 1\n', '                handle = heapq.heappop(self._scheduled)\n', '                handle._scheduled = False\n', '\n', '        timeout = None\n', '        if self._ready or self._stopping:\n', '            timeout = 0\n', '        elif self._scheduled:\n', '            # Compute the desired timeout.\n', '            when = self._scheduled[0]._when\n', '            timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)\n', '\n', '        event_list = self._selector.select(timeout)\n', '        self._process_events(event_list)\n', '\n', "        # Handle 'later' callbacks that are ready.\n", '        end_time = self.time() + self._clock_resolution\n', '        while self._scheduled:\n', '            handle = self._scheduled[0]\n', '            if handle._when >= end_time:\n', '                break\n', '            handle = heapq.heappop(self._scheduled)\n', '            handle._scheduled = False\n', '            self._ready.append(handle)\n', '\n', '        # This is the only place where callbacks are actually *called*.\n', '        # All other places just add them to ready.\n', '        # Note: We run all currently scheduled callbacks, but not any\n', '        # callbacks scheduled by callbacks run this time around --\n', '        # they will be run the next time (after another I/O poll).\n', '        # Use an idiom that is thread-safe without using locks.\n', '        ntodo = len(self._ready)\n', '        for i in range(ntodo):\n', '            handle = self._ready.popleft()\n', '            if handle._cancelled:\n', '                continue\n', '            if self._debug:\n', '                try:\n', '                    self._current_handle = handle\n', '                    t0 = self.time()\n', '                    handle._run()\n', '                    dt = self.time() - t0\n', '                    if dt >= self.slow_callback_duration:\n', "                        logger.warning('Executing %s took %.3f seconds',\n", '                                       _format_handle(handle), dt)\n', '                finally:\n', '                    self._current_handle = None\n', '            else:\n', '                handle._run()\n', '        handle = None  # Needed to break cycles when an exception occurs.\n', '\n', '    def _set_coroutine_origin_tracking(self, enabled):\n', '        if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):\n', '            return\n', '\n', '        if enabled:\n', '            self._coroutine_origin_tracking_saved_depth = (\n', '                sys.get_coroutine_origin_tracking_depth())\n', '            sys.set_coroutine_origin_tracking_depth(\n', '                constants.DEBUG_STACK_DEPTH)\n', '        else:\n', '            sys.set_coroutine_origin_tracking_depth(\n', '                self._coroutine_origin_tracking_saved_depth)\n', '\n', '        self._coroutine_origin_tracking_enabled = enabled\n', '\n', '    def get_debug(self):\n', '        return self._debug\n', '\n', '    def set_debug(self, enabled):\n', '        self._debug = enabled\n', '\n', '        if self.is_running():\n', '            self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)\n'], '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/asyncio/base_events.py'), '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/kindergarten.py': (1100, 1.0, ['from .config import Config\n', 'from .server import Server\n', '\n', 'import asyncio\n', 'import json\n', 'import logging\n', 'import signal\n', 'import sys\n', '\n', 'log = logging.getLogger("amethyst.kindergarten")\n', '\n', '\n', 'class ServerManager():\n', '    def __init__(self, config_path):\n', '        self.config_path = config_path\n', '        self.config = Config.from_config(self._get_config())\n', '        self.server = Server(self.config)\n', '\n', '    def _get_config(self):\n', '        with open(self.config_path) as f:\n', '            return json.load(f)\n', '\n', '    def reconfigure(self):\n', '        log.info("Received HUP; reloading configuration.")\n', '\n', '        self.config.load(self._get_config())\n', '\n', '        for host in self.config.hosts:\n', '            host.tls.clear_context_cache()\n', '\n', '    def start(self):\n', '        loop = asyncio.get_event_loop()\n', '        loop.add_signal_handler(signal.SIGHUP, self.reconfigure)\n', '\n', '        log.info(f"Starting server on port {self.config.port}")\n', '\n', '        loop.run_until_complete(self.server.server)\n', '        loop.run_forever()\n', '\n', '\n', 'def cli():\n', '    logging.basicConfig(level=logging.INFO)\n', '    ServerManager(sys.argv[1]).start()\n', '\n', '\n', 'if __name__ == "__main__":\n', '    cli()\n'], '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/kindergarten.py'), '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/bin/.amethyst-wrapped': (1272, 1.0, ['#!/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/bin/python3.9\n', '# -*- coding: utf-8 -*-\n', "import sys;import site;import functools;sys.argv[0] = '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/bin/amethyst';functools.reduce(lambda k, p: site.addsitedir(p, k), ['/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/lib/python3.9/site-packages','/nix/store/09a9xkhnfgpn6sfq2ryb3g81i0vikkkp-python3.9-packaging-20.9/lib/python3.9/site-packages','/nix/store/l466cxk1dkpn3jbx3wirraf8abfl183l-python3.9-pyparsing-2.4.7/lib/python3.9/site-packages','/nix/store/ag9b795lsvi7n5lys6zjpr03i5wy6cgq-python3.9-six-1.16.0/lib/python3.9/site-packages','/nix/store/0hgwshx1mb9v68rm1jrxclvww6hlqcrw-python3.9-pycparser-2.20/lib/python3.9/site-packages','/nix/store/py7c0mw8dgz9m00nsliglsbw5ns0f0pf-python3.9-cffi-1.14.6/lib/python3.9/site-packages','/nix/store/xf8caqay0z8rzqjb634nflk6n8qg53fg-python3.9-cryptography-3.4.8/lib/python3.9/site-packages','/nix/store/inim7ysrgjydvfwzdlh373mr5yg5993m-python3.9-amethyst_extensions-0.0.1/lib/python3.9/site-packages'], site._init_pathinfo());\n", 'import re\n', 'import sys\n', 'from amethyst.kindergarten import cli\n', "if __name__ == '__main__':\n", "    sys.argv[0] = re.sub(r'(-script\\.pyw|\\.exe)?


, '', sys.argv[0])\n", '    sys.exit(cli())\n'], '/nix/store/42a8c7fk04zjmk0ckvf6ljiggn0hmf4f-amethyst-0.0.1/bin/.amethyst-wrapped'), '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/asyncio/streams.py': (26656, 1.0, ['__all__ = (\n', "    'StreamReader', 'StreamWriter', 'StreamReaderProtocol',\n", "    'open_connection', 'start_server')\n", '\n', 'import socket\n', 'import sys\n', 'import warnings\n', 'import weakref\n', '\n', "if hasattr(socket, 'AF_UNIX'):\n", "    __all__ += ('open_unix_connection', 'start_unix_server')\n", '\n', 'from . import coroutines\n', 'from . import events\n', 'from . import exceptions\n', 'from . import format_helpers\n', 'from . import protocols\n', 'from .log import logger\n', 'from .tasks import sleep\n', '\n', '\n', '_DEFAULT_LIMIT = 2 ** 16  # 64 KiB\n', '\n', '\n', 'async def open_connection(host=None, port=None, *,\n', '                          loop=None, limit=_DEFAULT_LIMIT, **kwds):\n', '    """A wrapper for create_connection() returning a (reader, writer) pair.\n', '\n', '    The reader returned is a StreamReader instance; the writer is a\n', '    StreamWriter instance.\n', '\n', '    The arguments are all the usual arguments to create_connection()\n', '    except protocol_factory; most common are positional host and port,\n', '    with various optional keyword arguments following.\n', '\n', '    Additional optional keyword arguments are loop (to set the event loop\n', '    instance to use) and limit (to set the buffer limit passed to the\n', '    StreamReader).\n', '\n', '    (If you want to customize the StreamReader and/or\n', "    StreamReaderProtocol classes, just copy the code -- there's\n", '    really nothing special here except some convenience.)\n', '    """\n', '    if loop is None:\n', '        loop = events.get_event_loop()\n', '    else:\n', '        warnings.warn("The loop argument is deprecated since Python 3.8, "\n', '                      "and scheduled for removal in Python 3.10.",\n', '                      DeprecationWarning, stacklevel=2)\n', '    reader = StreamReader(limit=limit, loop=loop)\n', '    protocol = StreamReaderProtocol(reader, loop=loop)\n', '    transport, _ = await loop.create_connection(\n', '        lambda: protocol, host, port, **kwds)\n', '    writer = StreamWriter(transport, protocol, reader, loop)\n', '    return reader, writer\n', '\n', '\n', 'async def start_server(client_connected_cb, host=None, port=None, *,\n', '                       loop=None, limit=_DEFAULT_LIMIT, **kwds):\n', '    """Start a socket server, call back for each client connected.\n', '\n', '    The first parameter, `client_connected_cb`, takes two parameters:\n', '    client_reader, client_writer.  client_reader is a StreamReader\n', '    object, while client_writer is a StreamWriter object.  This\n', '    parameter can either be a plain callback function or a coroutine;\n', '    if it is a coroutine, it will be automatically converted into a\n', '    Task.\n', '\n', '    The rest of the arguments are all the usual arguments to\n', '    loop.create_server() except protocol_factory; most common are\n', '    positional host and port, with various optional keyword arguments\n', '    following.  The return value is the same as loop.create_server().\n', '\n', '    Additional optional keyword arguments are loop (to set the event loop\n', '    instance to use) and limit (to set the buffer limit passed to the\n', '    StreamReader).\n', '\n', '    The return value is the same as loop.create_server(), i.e. a\n', '    Server object which can be used to stop the service.\n', '    """\n', '    if loop is None:\n', '        loop = events.get_event_loop()\n', '    else:\n', '        warnings.warn("The loop argument is deprecated since Python 3.8, "\n', '                      "and scheduled for removal in Python 3.10.",\n', '                      DeprecationWarning, stacklevel=2)\n', '\n', '    def factory():\n', '        reader = StreamReader(limit=limit, loop=loop)\n', '        protocol = StreamReaderProtocol(reader, client_connected_cb,\n', '                                        loop=loop)\n', '        return protocol\n', '\n', '    return await loop.create_server(factory, host, port, **kwds)\n', '\n', '\n', "if hasattr(socket, 'AF_UNIX'):\n", '    # UNIX Domain Sockets are supported on this platform\n', '\n', '    async def open_unix_connection(path=None, *,\n', '                                   loop=None, limit=_DEFAULT_LIMIT, **kwds):\n', '        """Similar to `open_connection` but works with UNIX Domain Sockets."""\n', '        if loop is None:\n', '            loop = events.get_event_loop()\n', '        else:\n', '            warnings.warn("The loop argument is deprecated since Python 3.8, "\n', '                          "and scheduled for removal in Python 3.10.",\n', '                          DeprecationWarning, stacklevel=2)\n', '        reader = StreamReader(limit=limit, loop=loop)\n', '        protocol = StreamReaderProtocol(reader, loop=loop)\n', '        transport, _ = await loop.create_unix_connection(\n', '            lambda: protocol, path, **kwds)\n', '        writer = StreamWriter(transport, protocol, reader, loop)\n', '        return reader, writer\n', '\n', '    async def start_unix_server(client_connected_cb, path=None, *,\n', '                                loop=None, limit=_DEFAULT_LIMIT, **kwds):\n', '        """Similar to `start_server` but works with UNIX Domain Sockets."""\n', '        if loop is None:\n', '            loop = events.get_event_loop()\n', '        else:\n', '            warnings.warn("The loop argument is deprecated since Python 3.8, "\n', '                          "and scheduled for removal in Python 3.10.",\n', '                          DeprecationWarning, stacklevel=2)\n', '\n', '        def factory():\n', '            reader = StreamReader(limit=limit, loop=loop)\n', '            protocol = StreamReaderProtocol(reader, client_connected_cb,\n', '                                            loop=loop)\n', '            return protocol\n', '\n', '        return await loop.create_unix_server(factory, path, **kwds)\n', '\n', '\n', 'class FlowControlMixin(protocols.Protocol):\n', '    """Reusable flow control logic for StreamWriter.drain().\n', '\n', '    This implements the protocol methods pause_writing(),\n', '    resume_writing() and connection_lost().  If the subclass overrides\n', '    these it must call the super methods.\n', '\n', '    StreamWriter.drain() must wait for _drain_helper() coroutine.\n', '    """\n', '\n', '    def __init__(self, loop=None):\n', '        if loop is None:\n', '            self._loop = events.get_event_loop()\n', '        else:\n', '            self._loop = loop\n', '        self._paused = False\n', '        self._drain_waiter = None\n', '        self._connection_lost = False\n', '\n', '    def pause_writing(self):\n', '        assert not self._paused\n', '        self._paused = True\n', '        if self._loop.get_debug():\n', '            logger.debug("%r pauses writing", self)\n', '\n', '    def resume_writing(self):\n', '        assert self._paused\n', '        self._paused = False\n', '        if self._loop.get_debug():\n', '            logger.debug("%r resumes writing", self)\n', '\n', '        waiter = self._drain_waiter\n', '        if waiter is not None:\n', '            self._drain_waiter = None\n', '            if not waiter.done():\n', '                waiter.set_result(None)\n', '\n', '    def connection_lost(self, exc):\n', '        self._connection_lost = True\n', '        # Wake up the writer if currently paused.\n', '        if not self._paused:\n', '            return\n', '        waiter = self._drain_waiter\n', '        if waiter is None:\n', '            return\n', '        self._drain_waiter = None\n', '        if waiter.done():\n', '            return\n', '        if exc is None:\n', '            waiter.set_result(None)\n', '        else:\n', '            waiter.set_exception(exc)\n', '\n', '    async def _drain_helper(self):\n', '        if self._connection_lost:\n', "            raise ConnectionResetError('Connection lost')\n", '        if not self._paused:\n', '            return\n', '        waiter = self._drain_waiter\n', '        assert waiter is None or waiter.cancelled()\n', '        waiter = self._loop.create_future()\n', '        self._drain_waiter = waiter\n', '        await waiter\n', '\n', '    def _get_close_waiter(self, stream):\n', '        raise NotImplementedError\n', '\n', '\n', 'class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):\n', '    """Helper class to adapt between Protocol and StreamReader.\n', '\n', '    (This is a helper class instead of making StreamReader itself a\n', '    Protocol subclass, because the StreamReader has other potential\n', '    uses, and to prevent the user of the StreamReader to accidentally\n', '    call inappropriate methods of the protocol.)\n', '    """\n', '\n', '    _source_traceback = None\n', '\n', '    def __init__(self, stream_reader, client_connected_cb=None, loop=None):\n', '        super().__init__(loop=loop)\n', '        if stream_reader is not None:\n', '            self._stream_reader_wr = weakref.ref(stream_reader)\n', '            self._source_traceback = stream_reader._source_traceback\n', '        else:\n', '            self._stream_reader_wr = None\n', '        if client_connected_cb is not None:\n', '            # This is a stream created by the `create_server()` function.\n', '            # Keep a strong reference to the reader until a connection\n', '            # is established.\n', '            self._strong_reader = stream_reader\n', '        self._reject_connection = False\n', '        self._stream_writer = None\n', '        self._transport = None\n', '        self._client_connected_cb = client_connected_cb\n', '        self._over_ssl = False\n', '        self._closed = self._loop.create_future()\n', '\n', '    @property\n', '    def _stream_reader(self):\n', '        if self._stream_reader_wr is None:\n', '            return None\n', '        return self._stream_reader_wr()\n', '\n', '    def connection_made(self, transport):\n', '        if self._reject_connection:\n', '            context = {\n', "                'message': ('An open stream was garbage collected prior to '\n", "                            'establishing network connection; '\n", '                            \'call "stream.close()" explicitly.\')\n', '            }\n', '            if self._source_traceback:\n', "                context['source_traceback'] = self._source_traceback\n", '            self._loop.call_exception_handler(context)\n', '            transport.abort()\n', '            return\n', '        self._transport = transport\n', '        reader = self._stream_reader\n', '        if reader is not None:\n', '            reader.set_transport(transport)\n', "        self._over_ssl = transport.get_extra_info('sslcontext') is not None\n", '        if self._client_connected_cb is not None:\n', '            self._stream_writer = StreamWriter(transport, self,\n', '                                               reader,\n', '                                               self._loop)\n', '            res = self._client_connected_cb(reader,\n', '                                            self._stream_writer)\n', '            if coroutines.iscoroutine(res):\n', '                self._loop.create_task(res)\n', '            self._strong_reader = None\n', '\n', '    def connection_lost(self, exc):\n', '        reader = self._stream_reader\n', '        if reader is not None:\n', '            if exc is None:\n', '                reader.feed_eof()\n', '            else:\n', '                reader.set_exception(exc)\n', '        if not self._closed.done():\n', '            if exc is None:\n', '                self._closed.set_result(None)\n', '            else:\n', '                self._closed.set_exception(exc)\n', '        super().connection_lost(exc)\n', '        self._stream_reader_wr = None\n', '        self._stream_writer = None\n', '        self._transport = None\n', '\n', '    def data_received(self, data):\n', '        reader = self._stream_reader\n', '        if reader is not None:\n', '            reader.feed_data(data)\n', '\n', '    def eof_received(self):\n', '        reader = self._stream_reader\n', '        if reader is not None:\n', '            reader.feed_eof()\n', '        if self._over_ssl:\n', '            # Prevent a warning in SSLProtocol.eof_received:\n', '            # "returning true from eof_received()\n', '            # has no effect when using ssl"\n', '            return False\n', '        return True\n', '\n', '    def _get_close_waiter(self, stream):\n', '        return self._closed\n', '\n', '    def __del__(self):\n', '        # Prevent reports about unhandled exceptions.\n', '        # Better than self._closed._log_traceback = False hack\n', '        closed = self._closed\n', '        if closed.done() and not closed.cancelled():\n', '            closed.exception()\n', '\n', '\n', 'class StreamWriter:\n', '    """Wraps a Transport.\n', '\n', '    This exposes write(), writelines(), [can_]write_eof(),\n', '    get_extra_info() and close().  It adds drain() which returns an\n', '    optional Future on which you can wait for flow control.  It also\n', '    adds a transport property which references the Transport\n', '    directly.\n', '    """\n', '\n', '    def __init__(self, transport, protocol, reader, loop):\n', '        self._transport = transport\n', '        self._protocol = protocol\n', '        # drain() expects that the reader has an exception() method\n', '        assert reader is None or isinstance(reader, StreamReader)\n', '        self._reader = reader\n', '        self._loop = loop\n', '        self._complete_fut = self._loop.create_future()\n', '        self._complete_fut.set_result(None)\n', '\n', '    def __repr__(self):\n', "        info = [self.__class__.__name__, f'transport={self._transport!r}']\n", '        if self._reader is not None:\n', "            info.append(f'reader={self._reader!r}')\n", "        return '<{}>'.format(' '.join(info))\n", '\n', '    @property\n', '    def transport(self):\n', '        return self._transport\n', '\n', '    def write(self, data):\n', '        self._transport.write(data)\n', '\n', '    def writelines(self, data):\n', '        self._transport.writelines(data)\n', '\n', '    def write_eof(self):\n', '        return self._transport.write_eof()\n', '\n', '    def can_write_eof(self):\n', '        return self._transport.can_write_eof()\n', '\n', '    def close(self):\n', '        return self._transport.close()\n', '\n', '    def is_closing(self):\n', '        return self._transport.is_closing()\n', '\n', '    async def wait_closed(self):\n', '        await self._protocol._get_close_waiter(self)\n', '\n', '    def get_extra_info(self, name, default=None):\n', '        return self._transport.get_extra_info(name, default)\n', '\n', '    async def drain(self):\n', '        """Flush the write buffer.\n', '\n', '        The intended use is to write\n', '\n', '          w.write(data)\n', '          await w.drain()\n', '        """\n', '        if self._reader is not None:\n', '            exc = self._reader.exception()\n', '            if exc is not None:\n', '                raise exc\n', '        if self._transport.is_closing():\n', '            # Wait for protocol.connection_lost() call\n', '            # Raise connection closing error if any,\n', '            # ConnectionResetError otherwise\n', '            # Yield to the event loop so connection_lost() may be\n', '            # called.  Without this, _drain_helper() would return\n', '            # immediately, and code that calls\n', '            #     write(...); await drain()\n', '            # in a loop would never call connection_lost(), so it\n', '            # would not see an error when the socket is closed.\n', '            await sleep(0)\n', '        await self._protocol._drain_helper()\n', '\n', '\n', 'class StreamReader:\n', '\n', '    _source_traceback = None\n', '\n', '    def __init__(self, limit=_DEFAULT_LIMIT, loop=None):\n', '        # The line length limit is  a security feature;\n', '        # it also doubles as half the buffer limit.\n', '\n', '        if limit <= 0:\n', "            raise ValueError('Limit cannot be <= 0')\n", '\n', '        self._limit = limit\n', '        if loop is None:\n', '            self._loop = events.get_event_loop()\n', '        else:\n', '            self._loop = loop\n', '        self._buffer = bytearray()\n', "        self._eof = False    # Whether we're done.\n", '        self._waiter = None  # A future used by _wait_for_data()\n', '        self._exception = None\n', '        self._transport = None\n', '        self._paused = False\n', '        if self._loop.get_debug():\n', '            self._source_traceback = format_helpers.extract_stack(\n', '                sys._getframe(1))\n', '\n', '    def __repr__(self):\n', "        info = ['StreamReader']\n", '        if self._buffer:\n', "            info.append(f'{len(self._buffer)} bytes')\n", '        if self._eof:\n', "            info.append('eof')\n", '        if self._limit != _DEFAULT_LIMIT:\n', "            info.append(f'limit={self._limit}')\n", '        if self._waiter:\n', "            info.append(f'waiter={self._waiter!r}')\n", '        if self._exception:\n', "            info.append(f'exception={self._exception!r}')\n", '        if self._transport:\n', "            info.append(f'transport={self._transport!r}')\n", '        if self._paused:\n', "            info.append('paused')\n", "        return '<{}>'.format(' '.join(info))\n", '\n', '    def exception(self):\n', '        return self._exception\n', '\n', '    def set_exception(self, exc):\n', '        self._exception = exc\n', '\n', '        waiter = self._waiter\n', '        if waiter is not None:\n', '            self._waiter = None\n', '            if not waiter.cancelled():\n', '                waiter.set_exception(exc)\n', '\n', '    def _wakeup_waiter(self):\n', '        """Wakeup read*() functions waiting for data or EOF."""\n', '        waiter = self._waiter\n', '        if waiter is not None:\n', '            self._waiter = None\n', '            if not waiter.cancelled():\n', '                waiter.set_result(None)\n', '\n', '    def set_transport(self, transport):\n', "        assert self._transport is None, 'Transport already set'\n", '        self._transport = transport\n', '\n', '    def _maybe_resume_transport(self):\n', '        if self._paused and len(self._buffer) <= self._limit:\n', '            self._paused = False\n', '            self._transport.resume_reading()\n', '\n', '    def feed_eof(self):\n', '        self._eof = True\n', '        self._wakeup_waiter()\n', '\n', '    def at_eof(self):\n', '        """Return True if the buffer is empty and \'feed_eof\' was called."""\n', '        return self._eof and not self._buffer\n', '\n', '    def feed_data(self, data):\n', "        assert not self._eof, 'feed_data after feed_eof'\n", '\n', '        if not data:\n', '            return\n', '\n', '        self._buffer.extend(data)\n', '        self._wakeup_waiter()\n', '\n', '        if (self._transport is not None and\n', '                not self._paused and\n', '                len(self._buffer) > 2 * self._limit):\n', '            try:\n', '                self._transport.pause_reading()\n', '            except NotImplementedError:\n', "                # The transport can't be paused.\n", "                # We'll just have to buffer all data.\n", "                # Forget the transport so we don't keep trying.\n", '                self._transport = None\n', '            else:\n', '                self._paused = True\n', '\n', '    async def _wait_for_data(self, func_name):\n', '        """Wait until feed_data() or feed_eof() is called.\n', '\n', '        If stream was paused, automatically resume it.\n', '        """\n', '        # StreamReader uses a future to link the protocol feed_data() method\n', '        # to a read coroutine. Running two read coroutines at the same time\n', '        # would have an unexpected behaviour. It would not possible to know\n', '        # which coroutine would get the next data.\n', '        if self._waiter is not None:\n', '            raise RuntimeError(\n', "                f'{func_name}() called while another coroutine is '\n", "                f'already waiting for incoming data')\n", '\n', "        assert not self._eof, '_wait_for_data after EOF'\n", '\n', '        # Waiting for data while paused will make deadlock, so prevent it.\n', '        # This is essential for readexactly(n) for case when n > self._limit.\n', '        if self._paused:\n', '            self._paused = False\n', '            self._transport.resume_reading()\n', '\n', '        self._waiter = self._loop.create_future()\n', '        try:\n', '            await self._waiter\n', '        finally:\n', '            self._waiter = None\n', '\n', '    async def readline(self):\n', '        """Read chunk of data from the stream until newline (b\'\\n\') is found.\n', '\n', '        On success, return chunk that ends with newline. If only partial\n', '        line can be read due to EOF, return incomplete line without\n', '        terminating newline. When EOF was reached while no bytes read, empty\n', '        bytes object is returned.\n', '\n', '        If limit is reached, ValueError will be raised. In that case, if\n', '        newline was found, complete line including newline will be removed\n', '        from internal buffer. Else, internal buffer will be cleared. Limit is\n', '        compared against part of the line without newline.\n', '\n', '        If stream was paused, this function will automatically resume it if\n', '        needed.\n', '        """\n', "        sep = b'\\n'\n", '        seplen = len(sep)\n', '        try:\n', '            line = await self.readuntil(sep)\n', '        except exceptions.IncompleteReadError as e:\n', '            return e.partial\n', '        except exceptions.LimitOverrunError as e:\n', '            if self._buffer.startswith(sep, e.consumed):\n', '                del self._buffer[:e.consumed + seplen]\n', '            else:\n', '                self._buffer.clear()\n', '            self._maybe_resume_transport()\n', '            raise ValueError(e.args[0])\n', '        return line\n', '\n', "    async def readuntil(self, separator=b'\\n'):\n", '        """Read data from the stream until ``separator`` is found.\n', '\n', '        On success, the data and separator will be removed from the\n', '        internal buffer (consumed). Returned data will include the\n', '        separator at the end.\n', '\n', '        Configured stream limit is used to check result. Limit sets the\n', '        maximal length of data that can be returned, not counting the\n', '        separator.\n', '\n', '        If an EOF occurs and the complete separator is still not found,\n', '        an IncompleteReadError exception will be raised, and the internal\n', '        buffer will be reset.  The IncompleteReadError.partial attribute\n', '        may contain the separator partially.\n', '\n', '        If the data cannot be read because of over limit, a\n', '        LimitOverrunError exception  will be raised, and the data\n', '        will be left in the internal buffer, so it can be read again.\n', '        """\n', '        seplen = len(separator)\n', '        if seplen == 0:\n', "            raise ValueError('Separator should be at least one-byte string')\n", '\n', '        if self._exception is not None:\n', '            raise self._exception\n', '\n', '        # Consume whole buffer except last bytes, which length is\n', "        # one less than seplen. Let's check corner cases with\n", "        # separator='SEPARATOR':\n", '        # * we have received almost complete separator (without last\n', "        #   byte). i.e buffer='some textSEPARATO'. In this case we\n", '        #   can safely consume len(separator) - 1 bytes.\n', '        # * last byte of buffer is first byte of separator, i.e.\n', "        #   buffer='abcdefghijklmnopqrS'. We may safely consume\n", '        #   everything except that last byte, but this require to\n', '        #   analyze bytes of buffer that match partial separator.\n', '        #   This is slow and/or require FSM. For this case our\n', '        #   implementation is not optimal, since require rescanning\n', '        #   of data that is known to not belong to separator. In\n', '        #   real world, separator will not be so long to notice\n', '        #   performance problems. Even when reading MIME-encoded\n', '        #   messages :)\n', '\n', '        # `offset` is the number of bytes from the beginning of the buffer\n', '        # where there is no occurrence of `separator`.\n', '        offset = 0\n', '\n', '        # Loop until we find `separator` in the buffer, exceed the buffer size,\n', '        # or an EOF has happened.\n', '        while True:\n', '            buflen = len(self._buffer)\n', '\n', '            # Check if we now have enough data in the buffer for `separator` to\n', '            # fit.\n', '            if buflen - offset >= seplen:\n', '                isep = self._buffer.find(separator, offset)\n', '\n', '                if isep != -1:\n', '                    # `separator` is in the buffer. `isep` will be used later\n', '                    # to retrieve the data.\n', '                    break\n', '\n', '                # see upper comment for explanation.\n', '                offset = buflen + 1 - seplen\n', '                if offset > self._limit:\n', '                    raise exceptions.LimitOverrunError(\n', "                        'Separator is not found, and chunk exceed the limit',\n", '                        offset)\n', '\n', '            # Complete message (with full separator) may be present in buffer\n', '            # even when EOF flag is set. This may happen when the last chunk\n', "            # adds data which makes separator be found. That's why we check for\n", '            # EOF *ater* inspecting the buffer.\n', '            if self._eof:\n', '                chunk = bytes(self._buffer)\n', '                self._buffer.clear()\n', '                raise exceptions.IncompleteReadError(chunk, None)\n', '\n', '            # _wait_for_data() will resume reading if stream was paused.\n', "            await self._wait_for_data('readuntil')\n", '\n', '        if isep > self._limit:\n', '            raise exceptions.LimitOverrunError(\n', "                'Separator is found, but chunk is longer than limit', isep)\n", '\n', '        chunk = self._buffer[:isep + seplen]\n', '        del self._buffer[:isep + seplen]\n', '        self._maybe_resume_transport()\n', '        return bytes(chunk)\n', '\n', '    async def read(self, n=-1):\n', '        """Read up to `n` bytes from the stream.\n', '\n', '        If n is not provided, or set to -1, read until EOF and return all read\n', '        bytes. If the EOF was received and the internal buffer is empty, return\n', '        an empty bytes object.\n', '\n', '        If n is zero, return empty bytes object immediately.\n', '\n', '        If n is positive, this function try to read `n` bytes, and may return\n', '        less or equal bytes than requested, but at least one byte. If EOF was\n', '        received before any byte is read, this function returns empty byte\n', '        object.\n', '\n', '        Returned value is not limited with limit, configured at stream\n', '        creation.\n', '\n', '        If stream was paused, this function will automatically resume it if\n', '        needed.\n', '        """\n', '\n', '        if self._exception is not None:\n', '            raise self._exception\n', '\n', '        if n == 0:\n', "            return b''\n", '\n', '        if n < 0:\n', '            # This used to just loop creating a new waiter hoping to\n', '            # collect everything in self._buffer, but that would\n', '            # deadlock if the subprocess sends more than self.limit\n', '            # bytes.  So just call self.read(self._limit) until EOF.\n', '            blocks = []\n', '            while True:\n', '                block = await self.read(self._limit)\n', '                if not block:\n', '                    break\n', '                blocks.append(block)\n', "            return b''.join(blocks)\n", '\n', '        if not self._buffer and not self._eof:\n', "            await self._wait_for_data('read')\n", '\n', '        # This will work right even if buffer is less than n bytes\n', '        data = bytes(self._buffer[:n])\n', '        del self._buffer[:n]\n', '\n', '        self._maybe_resume_transport()\n', '        return data\n', '\n', '    async def readexactly(self, n):\n', '        """Read exactly `n` bytes.\n', '\n', '        Raise an IncompleteReadError if EOF is reached before `n` bytes can be\n', '        read. The IncompleteReadError.partial attribute of the exception will\n', '        contain the partial read bytes.\n', '\n', '        if n is zero, return empty bytes object.\n', '\n', '        Returned value is not limited with limit, configured at stream\n', '        creation.\n', '\n', '        If stream was paused, this function will automatically resume it if\n', '        needed.\n', '        """\n', '        if n < 0:\n', "            raise ValueError('readexactly size can not be less than zero')\n", '\n', '        if self._exception is not None:\n', '            raise self._exception\n', '\n', '        if n == 0:\n', "            return b''\n", '\n', '        while len(self._buffer) < n:\n', '            if self._eof:\n', '                incomplete = bytes(self._buffer)\n', '                self._buffer.clear()\n', '                raise exceptions.IncompleteReadError(incomplete, n)\n', '\n', "            await self._wait_for_data('readexactly')\n", '\n', '        if len(self._buffer) == n:\n', '            data = bytes(self._buffer)\n', '            self._buffer.clear()\n', '        else:\n', '            data = bytes(self._buffer[:n])\n', '            del self._buffer[:n]\n', '        self._maybe_resume_transport()\n', '        return data\n', '\n', '    def __aiter__(self):\n', '        return self\n', '\n', '    async def __anext__(self):\n', '        val = await self.readline()\n', "        if val == b'':\n", '            raise StopAsyncIteration\n', '        return val\n'], '/nix/store/cgxc3jz7idrb1wnb2lard9rvcx6aw2si-python3-3.9.6/lib/python3.9/asyncio/streams.py')}

Modules

functools

os

sys

tokenize