💾 Archived View for tris.fyi › pydoc › linecache captured on 2022-04-28 at 17:31:40. Gemini links have been rewritten to link to archived content
⬅️ Previous capture (2022-03-01)
-=-=-=-=-=-=-
Cache lines from Python source files. This is intended to read lines from modules imported -- hence if a filename is not found, it will look down the module search path for a file by that name.
checkcache(filename=None) Discard cache entries that are out of date. (This is not checked upon each call!)
clearcache() Clear the cache entirely.
getline(filename, lineno, module_globals=None) Get a line for a Python source file from the cache. Update the cache if it doesn't contain an entry for this file already.
getlines(filename, module_globals=None) Get the lines for a Python source file from the cache. Update the cache if it doesn't contain an entry for this file already.
lazycache(filename, module_globals) Seed the cache for filename with module_globals. The module loader will be asked for the source only when getlines is called, not immediately. If there is an entry in the cache already, it is not altered. :return: True if a lazy load is registered in the cache, otherwise False. To register such a load a module loader with a get_source method must be found, the filename must be a cachable filename, and the filename must not be already cached.
updatecache(filename, module_globals=None) Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.
cache = {'/nix/store/r2cl0bzi7p4pi83iyc9i3bak32ph5bkd-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/tls.py': (3130, 1.0, ['import datetime\n', 'import os.path\n', 'import logging\n', 'import ssl\n', 'import traceback\n', '\n', 'from cryptography import x509\n', 'from cryptography.x509.oid import NameOID\n', 'from cryptography.hazmat.primitives import hashes, serialization\n', 'from cryptography.hazmat.primitives.asymmetric import rsa\n', '\n', 'from typing import List, TYPE_CHECKING\n', '\n', 'if TYPE_CHECKING:\n', ' from .config import Config\n', '\n', '\n', 'log = logging.getLogger("amethyst.tls")\n', '\n', '\n', 'def make_partial_context():\n', ' c = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\n', ' c.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n', ' c.options |= ssl.OP_SINGLE_DH_USE | ssl.OP_SINGLE_ECDH_USE\n', ' c.check_hostname = False\n', ' c.verify_mode = ssl.VerifyMode.CERT_OPTIONAL\n', ' return c\n', '\n', '\n', 'def make_context(cert_path: str, key_path: str):\n', ' c = make_partial_context()\n', ' c.load_cert_chain(cert_path, keyfile=key_path)\n', ' return c\n', '\n', '\n', 'def make_sni_context(config: "Config"):\n', ' def sni_callback(sock, host, _original_ctx):\n', ' for host_cfg in config.hosts:\n', ' if host_cfg.host == host:\n', ' break\n', ' else:\n', ' return ssl.ALERT_DESCRIPTION_HANDSHAKE_FAILURE\n', '\n', ' try:\n', ' sock.context = host_cfg.tls.get_ssl_context()\n', ' except Exception:\n', ' log.warn(f"When setting context after SNI; {traceback.format_exc()}")\n', '\n', ' c = make_partial_context()\n', ' c.sni_callback = sni_callback\n', ' return c\n', '\n', '\n', 'def update_certificate(cert_path: str, key_path: str, hosts: List[str]):\n', ' if os.path.exists(cert_path):\n', ' with open(cert_path, "rb") as f:\n', ' cert = x509.load_pem_x509_certificate(f.read())\n', '\n', ' if cert.not_valid_after > datetime.datetime.now():\n', ' log.info("Certificate exists and is unexpired; skipping regeneration.")\n', ' return cert.not_valid_after\n', '\n', ' else:\n', ' log.info("Certificate expired; regenerating.")\n', '\n', ' else:\n', ' log.info("Certificate does not exist yet, generating one now.")\n', '\n', ' key = rsa.generate_private_key(\n', ' public_exponent=65537,\n', ' key_size=4096,\n', ' )\n', '\n', ' with open(key_path, "wb") as f:\n', ' f.write(\n', ' key.private_bytes(\n', ' encoding=serialization.Encoding.PEM,\n', ' format=serialization.PrivateFormat.TraditionalOpenSSL,\n', ' encryption_algorithm=serialization.NoEncryption(),\n', ' )\n', ' )\n', '\n', ' subject = issuer = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, hosts[0])])\n', '\n', ' cert = (\n', ' x509.CertificateBuilder()\n', ' .subject_name(subject)\n', ' .issuer_name(issuer)\n', ' .public_key(key.public_key())\n', ' .serial_number(x509.random_serial_number())\n', ' .not_valid_before(datetime.datetime.utcnow() - datetime.timedelta(days=1))\n', ' .not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=30))\n', ' .add_extension(\n', ' x509.SubjectAlternativeName([x509.DNSName(host) for host in hosts]),\n', ' critical=False,\n', ' )\n', ' .sign(key, hashes.SHA256())\n', ' )\n', '\n', ' with open(cert_path, "wb") as f:\n', ' f.write(cert.public_bytes(serialization.Encoding.PEM))\n', '\n', ' log.info("Success! Certificate generated and saved.")\n', ' return cert.not_valid_after\n'], '/nix/store/r2cl0bzi7p4pi83iyc9i3bak32ph5bkd-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/tls.py'), '/nix/store/r2cl0bzi7p4pi83iyc9i3bak32ph5bkd-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/config.py': (2800, 1.0, ['import datetime\n', 'import ssl\n', '\n', 'from dataclasses import dataclass\n', 'from typing import Dict, List, Optional, Tuple\n', '\n', 'from .handler import GenericHandler, Handler\n', 'from .resource import Resource\n', 'from .resource_registry import registry\n', '\n', 'import os\n', '\n', '\n', '@dataclass\n', 'class TLSConfig:\n', ' host: str\n', ' auto: bool = False\n', ' cert_path: Optional[str] = None\n', ' key_path: Optional[str] = None\n', '\n', ' _context_cache: Optional[Tuple[datetime.datetime, ssl.SSLContext]] = None\n', '\n', ' @classmethod\n', ' def from_config(cls, host, cfg):\n', ' o = cls(host)\n', '\n', ' state = os.getenv("STATE_DIRECTORY", ".")\n', '\n', ' o.auto = cfg.get("auto", True)\n', '\n', ' o.cert_path = cfg.get("cert_path", None)\n', ' if o.cert_path is None:\n', ' o.cert_path = os.path.join(state, f"{host}.cert.pem")\n', '\n', ' o.key_path = cfg.get("key_path", None)\n', ' if o.key_path is None:\n', ' o.key_path = os.path.join(state, f"{host}.key.pem")\n', '\n', ' return o\n', '\n', ' def clear_context_cache(self):\n', ' self._context_cache = None\n', '\n', ' def get_ssl_context(self):\n', ' from . import tls\n', '\n', ' if self._context_cache is not None:\n', ' expires, context = self._context_cache\n', '\n', ' if expires is None or expires > datetime.datetime.now():\n', ' return context\n', '\n', ' if self.auto:\n', ' expires = tls.update_certificate(self.cert_path, self.key_path, [self.host])\n', '\n', ' else:\n', ' # We want to keep using a manually-specified certificate forever\n', ' # or at least until the server is restarted / HUPed.\n', ' expires = None\n', '\n', ' context = tls.make_context(self.cert_path, self.key_path)\n', '\n', ' self._context_cache = expires, context\n', ' return context\n', '\n', '\n', '@dataclass\n', 'class HostConfig:\n', ' host: str\n', ' tls: TLSConfig\n', ' path_map: Dict[str, Resource]\n', '\n', ' @classmethod\n', ' def _construct_resource(cls, cfg) -> Resource:\n', ' resource_type = cfg.pop("type", "filesystem")\n', ' return registry[resource_type](**cfg)\n', '\n', ' @classmethod\n', ' def from_config(cls, cfg):\n', ' host = cfg["name"]\n', ' tls = TLSConfig.from_config(host, cfg.get("tls", {}))\n', ' path_map = {\n', ' path: cls._construct_resource(config)\n', ' for path, config in cfg["paths"].items()\n', ' }\n', '\n', ' return cls(host, tls, path_map)\n', '\n', '\n', '@dataclass\n', 'class Config:\n', ' hosts: List[HostConfig]\n', ' handler: Handler\n', ' port: int = 1965\n', '\n', ' def load(self, cfg):\n', ' self.hosts = [HostConfig.from_config(host) for host in cfg.get("hosts", [])]\n', '\n', ' if not self.hosts:\n', ' raise ValueError("Server can\'t run without any hosts!")\n', '\n', ' self.handler = GenericHandler({host.host: host.path_map for host in self.hosts})\n', '\n', ' @classmethod\n', ' def from_config(cls, cfg):\n', ' o = cls([], None, cfg.get("port", 1965))\n', ' o.load(cfg)\n', ' return o\n'], '/nix/store/r2cl0bzi7p4pi83iyc9i3bak32ph5bkd-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/config.py'), '/nix/store/r2cl0bzi7p4pi83iyc9i3bak32ph5bkd-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/server.py': (2435, 1.0, ['#!/usr/bin/env python3\n', '\n', 'import asyncio\n', 'import logging\n', 'import signal\n', 'import traceback\n', 'from typing import TYPE_CHECKING\n', '\n', 'from .response import Response, Status\n', 'from .tls import make_sni_context\n', '\n', 'if TYPE_CHECKING:\n', ' from .config import Config\n', '\n', '\n', 'class Server:\n', ' def __init__(\n', ' self,\n', ' config: "Config",\n', ' ):\n', ' self.log = logging.getLogger("amethyst.server")\n', ' self.access_log = logging.getLogger("amethyst.access")\n', '\n', ' self.server = None\n', ' self.config = config\n', '\n', ' self.ssl_context = make_sni_context(config)\n', ' self.server = self.get_server()\n', '\n', ' def get_server(self):\n', ' loop = asyncio.get_event_loop()\n', '\n', ' return asyncio.start_server(\n', ' self.handle_connection,\n', ' port=self.config.port,\n', ' ssl=self.ssl_context,\n', ' loop=loop,\n', ' )\n', '\n', ' async def handle_connection(self, reader, writer):\n', ' from .request import Connection\n', '\n', ' peer_addr = writer.get_extra_info("peername")\n', ' peer_cert = writer.get_extra_info("peercert")\n', '\n', ' self.log.debug(f"Received connection from {peer_addr}")\n', '\n', ' url = "-"\n', ' try:\n', ' url = (await reader.readuntil(b"\\r\\n")).rstrip(b"\\r\\n").decode()\n', '\n', ' if len(url) > 1024:\n', ' response = Response(Status.BAD_REQUEST, "URL too long!")\n', ' else:\n', ' response = await self.config.handler(\n', ' url, Connection(self, peer_addr, peer_cert)\n', ' )\n', '\n', ' except UnicodeDecodeError:\n', ' response = Response(Status.BAD_REQUEST, "URL must be UTF-8")\n', '\n', ' except Exception:\n', ' self.log.error(f"While generating response; {traceback.format_exc()}")\n', '\n', ' response = Response(\n', ' Status.TEMPORARY_FAILURE,\n', ' "Exception thrown during request processing; see server logs for details.",\n', ' )\n', '\n', ' self.access_log.info(\n', ' f"{url} {response.status_code.value}[{response.status_code.name}]"\n', ' f" {response.meta}"\n', ' )\n', '\n', ' try:\n', ' line = f"{response.status_code.value} {response.meta}\\r\\n".encode()\n', ' writer.write(line)\n', '\n', ' if response.status_code.is_success() and response.content is not None:\n', ' writer.write(response.content)\n', '\n', ' except Exception:\n', ' self.log.error(f"While writing response; {traceback.format_exc()}")\n', '\n', ' finally:\n', ' writer.close()\n'], '/nix/store/r2cl0bzi7p4pi83iyc9i3bak32ph5bkd-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/server.py'), '/nix/store/hrq2jcbjdwkj39xzahvnnjk93ccrz1pr-python3-3.9.6/lib/python3.9/asyncio/streams.py': (26656, 1.0, ['__all__ = (\n', " 'StreamReader', 'StreamWriter', 'StreamReaderProtocol',\n", " 'open_connection', 'start_server')\n", '\n', 'import socket\n', 'import sys\n', 'import warnings\n', 'import weakref\n', '\n', "if hasattr(socket, 'AF_UNIX'):\n", " __all__ += ('open_unix_connection', 'start_unix_server')\n", '\n', 'from . import coroutines\n', 'from . import events\n', 'from . import exceptions\n', 'from . import format_helpers\n', 'from . import protocols\n', 'from .log import logger\n', 'from .tasks import sleep\n', '\n', '\n', '_DEFAULT_LIMIT = 2 ** 16 # 64 KiB\n', '\n', '\n', 'async def open_connection(host=None, port=None, *,\n', ' loop=None, limit=_DEFAULT_LIMIT, **kwds):\n', ' """A wrapper for create_connection() returning a (reader, writer) pair.\n', '\n', ' The reader returned is a StreamReader instance; the writer is a\n', ' StreamWriter instance.\n', '\n', ' The arguments are all the usual arguments to create_connection()\n', ' except protocol_factory; most common are positional host and port,\n', ' with various optional keyword arguments following.\n', '\n', ' Additional optional keyword arguments are loop (to set the event loop\n', ' instance to use) and limit (to set the buffer limit passed to the\n', ' StreamReader).\n', '\n', ' (If you want to customize the StreamReader and/or\n', " StreamReaderProtocol classes, just copy the code -- there's\n", ' really nothing special here except some convenience.)\n', ' """\n', ' if loop is None:\n', ' loop = events.get_event_loop()\n', ' else:\n', ' warnings.warn("The loop argument is deprecated since Python 3.8, "\n', ' "and scheduled for removal in Python 3.10.",\n', ' DeprecationWarning, stacklevel=2)\n', ' reader = StreamReader(limit=limit, loop=loop)\n', ' protocol = StreamReaderProtocol(reader, loop=loop)\n', ' transport, _ = await loop.create_connection(\n', ' lambda: protocol, host, port, **kwds)\n', ' writer = StreamWriter(transport, protocol, reader, loop)\n', ' return reader, writer\n', '\n', '\n', 'async def start_server(client_connected_cb, host=None, port=None, *,\n', ' loop=None, limit=_DEFAULT_LIMIT, **kwds):\n', ' """Start a socket server, call back for each client connected.\n', '\n', ' The first parameter, `client_connected_cb`, takes two parameters:\n', ' client_reader, client_writer. client_reader is a StreamReader\n', ' object, while client_writer is a StreamWriter object. This\n', ' parameter can either be a plain callback function or a coroutine;\n', ' if it is a coroutine, it will be automatically converted into a\n', ' Task.\n', '\n', ' The rest of the arguments are all the usual arguments to\n', ' loop.create_server() except protocol_factory; most common are\n', ' positional host and port, with various optional keyword arguments\n', ' following. The return value is the same as loop.create_server().\n', '\n', ' Additional optional keyword arguments are loop (to set the event loop\n', ' instance to use) and limit (to set the buffer limit passed to the\n', ' StreamReader).\n', '\n', ' The return value is the same as loop.create_server(), i.e. a\n', ' Server object which can be used to stop the service.\n', ' """\n', ' if loop is None:\n', ' loop = events.get_event_loop()\n', ' else:\n', ' warnings.warn("The loop argument is deprecated since Python 3.8, "\n', ' "and scheduled for removal in Python 3.10.",\n', ' DeprecationWarning, stacklevel=2)\n', '\n', ' def factory():\n', ' reader = StreamReader(limit=limit, loop=loop)\n', ' protocol = StreamReaderProtocol(reader, client_connected_cb,\n', ' loop=loop)\n', ' return protocol\n', '\n', ' return await loop.create_server(factory, host, port, **kwds)\n', '\n', '\n', "if hasattr(socket, 'AF_UNIX'):\n", ' # UNIX Domain Sockets are supported on this platform\n', '\n', ' async def open_unix_connection(path=None, *,\n', ' loop=None, limit=_DEFAULT_LIMIT, **kwds):\n', ' """Similar to `open_connection` but works with UNIX Domain Sockets."""\n', ' if loop is None:\n', ' loop = events.get_event_loop()\n', ' else:\n', ' warnings.warn("The loop argument is deprecated since Python 3.8, "\n', ' "and scheduled for removal in Python 3.10.",\n', ' DeprecationWarning, stacklevel=2)\n', ' reader = StreamReader(limit=limit, loop=loop)\n', ' protocol = StreamReaderProtocol(reader, loop=loop)\n', ' transport, _ = await loop.create_unix_connection(\n', ' lambda: protocol, path, **kwds)\n', ' writer = StreamWriter(transport, protocol, reader, loop)\n', ' return reader, writer\n', '\n', ' async def start_unix_server(client_connected_cb, path=None, *,\n', ' loop=None, limit=_DEFAULT_LIMIT, **kwds):\n', ' """Similar to `start_server` but works with UNIX Domain Sockets."""\n', ' if loop is None:\n', ' loop = events.get_event_loop()\n', ' else:\n', ' warnings.warn("The loop argument is deprecated since Python 3.8, "\n', ' "and scheduled for removal in Python 3.10.",\n', ' DeprecationWarning, stacklevel=2)\n', '\n', ' def factory():\n', ' reader = StreamReader(limit=limit, loop=loop)\n', ' protocol = StreamReaderProtocol(reader, client_connected_cb,\n', ' loop=loop)\n', ' return protocol\n', '\n', ' return await loop.create_unix_server(factory, path, **kwds)\n', '\n', '\n', 'class FlowControlMixin(protocols.Protocol):\n', ' """Reusable flow control logic for StreamWriter.drain().\n', '\n', ' This implements the protocol methods pause_writing(),\n', ' resume_writing() and connection_lost(). If the subclass overrides\n', ' these it must call the super methods.\n', '\n', ' StreamWriter.drain() must wait for _drain_helper() coroutine.\n', ' """\n', '\n', ' def __init__(self, loop=None):\n', ' if loop is None:\n', ' self._loop = events.get_event_loop()\n', ' else:\n', ' self._loop = loop\n', ' self._paused = False\n', ' self._drain_waiter = None\n', ' self._connection_lost = False\n', '\n', ' def pause_writing(self):\n', ' assert not self._paused\n', ' self._paused = True\n', ' if self._loop.get_debug():\n', ' logger.debug("%r pauses writing", self)\n', '\n', ' def resume_writing(self):\n', ' assert self._paused\n', ' self._paused = False\n', ' if self._loop.get_debug():\n', ' logger.debug("%r resumes writing", self)\n', '\n', ' waiter = self._drain_waiter\n', ' if waiter is not None:\n', ' self._drain_waiter = None\n', ' if not waiter.done():\n', ' waiter.set_result(None)\n', '\n', ' def connection_lost(self, exc):\n', ' self._connection_lost = True\n', ' # Wake up the writer if currently paused.\n', ' if not self._paused:\n', ' return\n', ' waiter = self._drain_waiter\n', ' if waiter is None:\n', ' return\n', ' self._drain_waiter = None\n', ' if waiter.done():\n', ' return\n', ' if exc is None:\n', ' waiter.set_result(None)\n', ' else:\n', ' waiter.set_exception(exc)\n', '\n', ' async def _drain_helper(self):\n', ' if self._connection_lost:\n', " raise ConnectionResetError('Connection lost')\n", ' if not self._paused:\n', ' return\n', ' waiter = self._drain_waiter\n', ' assert waiter is None or waiter.cancelled()\n', ' waiter = self._loop.create_future()\n', ' self._drain_waiter = waiter\n', ' await waiter\n', '\n', ' def _get_close_waiter(self, stream):\n', ' raise NotImplementedError\n', '\n', '\n', 'class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):\n', ' """Helper class to adapt between Protocol and StreamReader.\n', '\n', ' (This is a helper class instead of making StreamReader itself a\n', ' Protocol subclass, because the StreamReader has other potential\n', ' uses, and to prevent the user of the StreamReader to accidentally\n', ' call inappropriate methods of the protocol.)\n', ' """\n', '\n', ' _source_traceback = None\n', '\n', ' def __init__(self, stream_reader, client_connected_cb=None, loop=None):\n', ' super().__init__(loop=loop)\n', ' if stream_reader is not None:\n', ' self._stream_reader_wr = weakref.ref(stream_reader)\n', ' self._source_traceback = stream_reader._source_traceback\n', ' else:\n', ' self._stream_reader_wr = None\n', ' if client_connected_cb is not None:\n', ' # This is a stream created by the `create_server()` function.\n', ' # Keep a strong reference to the reader until a connection\n', ' # is established.\n', ' self._strong_reader = stream_reader\n', ' self._reject_connection = False\n', ' self._stream_writer = None\n', ' self._transport = None\n', ' self._client_connected_cb = client_connected_cb\n', ' self._over_ssl = False\n', ' self._closed = self._loop.create_future()\n', '\n', ' @property\n', ' def _stream_reader(self):\n', ' if self._stream_reader_wr is None:\n', ' return None\n', ' return self._stream_reader_wr()\n', '\n', ' def connection_made(self, transport):\n', ' if self._reject_connection:\n', ' context = {\n', " 'message': ('An open stream was garbage collected prior to '\n", " 'establishing network connection; '\n", ' \'call "stream.close()" explicitly.\')\n', ' }\n', ' if self._source_traceback:\n', " context['source_traceback'] = self._source_traceback\n", ' self._loop.call_exception_handler(context)\n', ' transport.abort()\n', ' return\n', ' self._transport = transport\n', ' reader = self._stream_reader\n', ' if reader is not None:\n', ' reader.set_transport(transport)\n', " self._over_ssl = transport.get_extra_info('sslcontext') is not None\n", ' if self._client_connected_cb is not None:\n', ' self._stream_writer = StreamWriter(transport, self,\n', ' reader,\n', ' self._loop)\n', ' res = self._client_connected_cb(reader,\n', ' self._stream_writer)\n', ' if coroutines.iscoroutine(res):\n', ' self._loop.create_task(res)\n', ' self._strong_reader = None\n', '\n', ' def connection_lost(self, exc):\n', ' reader = self._stream_reader\n', ' if reader is not None:\n', ' if exc is None:\n', ' reader.feed_eof()\n', ' else:\n', ' reader.set_exception(exc)\n', ' if not self._closed.done():\n', ' if exc is None:\n', ' self._closed.set_result(None)\n', ' else:\n', ' self._closed.set_exception(exc)\n', ' super().connection_lost(exc)\n', ' self._stream_reader_wr = None\n', ' self._stream_writer = None\n', ' self._transport = None\n', '\n', ' def data_received(self, data):\n', ' reader = self._stream_reader\n', ' if reader is not None:\n', ' reader.feed_data(data)\n', '\n', ' def eof_received(self):\n', ' reader = self._stream_reader\n', ' if reader is not None:\n', ' reader.feed_eof()\n', ' if self._over_ssl:\n', ' # Prevent a warning in SSLProtocol.eof_received:\n', ' # "returning true from eof_received()\n', ' # has no effect when using ssl"\n', ' return False\n', ' return True\n', '\n', ' def _get_close_waiter(self, stream):\n', ' return self._closed\n', '\n', ' def __del__(self):\n', ' # Prevent reports about unhandled exceptions.\n', ' # Better than self._closed._log_traceback = False hack\n', ' closed = self._closed\n', ' if closed.done() and not closed.cancelled():\n', ' closed.exception()\n', '\n', '\n', 'class StreamWriter:\n', ' """Wraps a Transport.\n', '\n', ' This exposes write(), writelines(), [can_]write_eof(),\n', ' get_extra_info() and close(). It adds drain() which returns an\n', ' optional Future on which you can wait for flow control. It also\n', ' adds a transport property which references the Transport\n', ' directly.\n', ' """\n', '\n', ' def __init__(self, transport, protocol, reader, loop):\n', ' self._transport = transport\n', ' self._protocol = protocol\n', ' # drain() expects that the reader has an exception() method\n', ' assert reader is None or isinstance(reader, StreamReader)\n', ' self._reader = reader\n', ' self._loop = loop\n', ' self._complete_fut = self._loop.create_future()\n', ' self._complete_fut.set_result(None)\n', '\n', ' def __repr__(self):\n', " info = [self.__class__.__name__, f'transport={self._transport!r}']\n", ' if self._reader is not None:\n', " info.append(f'reader={self._reader!r}')\n", " return '<{}>'.format(' '.join(info))\n", '\n', ' @property\n', ' def transport(self):\n', ' return self._transport\n', '\n', ' def write(self, data):\n', ' self._transport.write(data)\n', '\n', ' def writelines(self, data):\n', ' self._transport.writelines(data)\n', '\n', ' def write_eof(self):\n', ' return self._transport.write_eof()\n', '\n', ' def can_write_eof(self):\n', ' return self._transport.can_write_eof()\n', '\n', ' def close(self):\n', ' return self._transport.close()\n', '\n', ' def is_closing(self):\n', ' return self._transport.is_closing()\n', '\n', ' async def wait_closed(self):\n', ' await self._protocol._get_close_waiter(self)\n', '\n', ' def get_extra_info(self, name, default=None):\n', ' return self._transport.get_extra_info(name, default)\n', '\n', ' async def drain(self):\n', ' """Flush the write buffer.\n', '\n', ' The intended use is to write\n', '\n', ' w.write(data)\n', ' await w.drain()\n', ' """\n', ' if self._reader is not None:\n', ' exc = self._reader.exception()\n', ' if exc is not None:\n', ' raise exc\n', ' if self._transport.is_closing():\n', ' # Wait for protocol.connection_lost() call\n', ' # Raise connection closing error if any,\n', ' # ConnectionResetError otherwise\n', ' # Yield to the event loop so connection_lost() may be\n', ' # called. Without this, _drain_helper() would return\n', ' # immediately, and code that calls\n', ' # write(...); await drain()\n', ' # in a loop would never call connection_lost(), so it\n', ' # would not see an error when the socket is closed.\n', ' await sleep(0)\n', ' await self._protocol._drain_helper()\n', '\n', '\n', 'class StreamReader:\n', '\n', ' _source_traceback = None\n', '\n', ' def __init__(self, limit=_DEFAULT_LIMIT, loop=None):\n', ' # The line length limit is a security feature;\n', ' # it also doubles as half the buffer limit.\n', '\n', ' if limit <= 0:\n', " raise ValueError('Limit cannot be <= 0')\n", '\n', ' self._limit = limit\n', ' if loop is None:\n', ' self._loop = events.get_event_loop()\n', ' else:\n', ' self._loop = loop\n', ' self._buffer = bytearray()\n', " self._eof = False # Whether we're done.\n", ' self._waiter = None # A future used by _wait_for_data()\n', ' self._exception = None\n', ' self._transport = None\n', ' self._paused = False\n', ' if self._loop.get_debug():\n', ' self._source_traceback = format_helpers.extract_stack(\n', ' sys._getframe(1))\n', '\n', ' def __repr__(self):\n', " info = ['StreamReader']\n", ' if self._buffer:\n', " info.append(f'{len(self._buffer)} bytes')\n", ' if self._eof:\n', " info.append('eof')\n", ' if self._limit != _DEFAULT_LIMIT:\n', " info.append(f'limit={self._limit}')\n", ' if self._waiter:\n', " info.append(f'waiter={self._waiter!r}')\n", ' if self._exception:\n', " info.append(f'exception={self._exception!r}')\n", ' if self._transport:\n', " info.append(f'transport={self._transport!r}')\n", ' if self._paused:\n', " info.append('paused')\n", " return '<{}>'.format(' '.join(info))\n", '\n', ' def exception(self):\n', ' return self._exception\n', '\n', ' def set_exception(self, exc):\n', ' self._exception = exc\n', '\n', ' waiter = self._waiter\n', ' if waiter is not None:\n', ' self._waiter = None\n', ' if not waiter.cancelled():\n', ' waiter.set_exception(exc)\n', '\n', ' def _wakeup_waiter(self):\n', ' """Wakeup read*() functions waiting for data or EOF."""\n', ' waiter = self._waiter\n', ' if waiter is not None:\n', ' self._waiter = None\n', ' if not waiter.cancelled():\n', ' waiter.set_result(None)\n', '\n', ' def set_transport(self, transport):\n', " assert self._transport is None, 'Transport already set'\n", ' self._transport = transport\n', '\n', ' def _maybe_resume_transport(self):\n', ' if self._paused and len(self._buffer) <= self._limit:\n', ' self._paused = False\n', ' self._transport.resume_reading()\n', '\n', ' def feed_eof(self):\n', ' self._eof = True\n', ' self._wakeup_waiter()\n', '\n', ' def at_eof(self):\n', ' """Return True if the buffer is empty and \'feed_eof\' was called."""\n', ' return self._eof and not self._buffer\n', '\n', ' def feed_data(self, data):\n', " assert not self._eof, 'feed_data after feed_eof'\n", '\n', ' if not data:\n', ' return\n', '\n', ' self._buffer.extend(data)\n', ' self._wakeup_waiter()\n', '\n', ' if (self._transport is not None and\n', ' not self._paused and\n', ' len(self._buffer) > 2 * self._limit):\n', ' try:\n', ' self._transport.pause_reading()\n', ' except NotImplementedError:\n', " # The transport can't be paused.\n", " # We'll just have to buffer all data.\n", " # Forget the transport so we don't keep trying.\n", ' self._transport = None\n', ' else:\n', ' self._paused = True\n', '\n', ' async def _wait_for_data(self, func_name):\n', ' """Wait until feed_data() or feed_eof() is called.\n', '\n', ' If stream was paused, automatically resume it.\n', ' """\n', ' # StreamReader uses a future to link the protocol feed_data() method\n', ' # to a read coroutine. Running two read coroutines at the same time\n', ' # would have an unexpected behaviour. It would not possible to know\n', ' # which coroutine would get the next data.\n', ' if self._waiter is not None:\n', ' raise RuntimeError(\n', " f'{func_name}() called while another coroutine is '\n", " f'already waiting for incoming data')\n", '\n', " assert not self._eof, '_wait_for_data after EOF'\n", '\n', ' # Waiting for data while paused will make deadlock, so prevent it.\n', ' # This is essential for readexactly(n) for case when n > self._limit.\n', ' if self._paused:\n', ' self._paused = False\n', ' self._transport.resume_reading()\n', '\n', ' self._waiter = self._loop.create_future()\n', ' try:\n', ' await self._waiter\n', ' finally:\n', ' self._waiter = None\n', '\n', ' async def readline(self):\n', ' """Read chunk of data from the stream until newline (b\'\\n\') is found.\n', '\n', ' On success, return chunk that ends with newline. If only partial\n', ' line can be read due to EOF, return incomplete line without\n', ' terminating newline. When EOF was reached while no bytes read, empty\n', ' bytes object is returned.\n', '\n', ' If limit is reached, ValueError will be raised. In that case, if\n', ' newline was found, complete line including newline will be removed\n', ' from internal buffer. Else, internal buffer will be cleared. Limit is\n', ' compared against part of the line without newline.\n', '\n', ' If stream was paused, this function will automatically resume it if\n', ' needed.\n', ' """\n', " sep = b'\\n'\n", ' seplen = len(sep)\n', ' try:\n', ' line = await self.readuntil(sep)\n', ' except exceptions.IncompleteReadError as e:\n', ' return e.partial\n', ' except exceptions.LimitOverrunError as e:\n', ' if self._buffer.startswith(sep, e.consumed):\n', ' del self._buffer[:e.consumed + seplen]\n', ' else:\n', ' self._buffer.clear()\n', ' self._maybe_resume_transport()\n', ' raise ValueError(e.args[0])\n', ' return line\n', '\n', " async def readuntil(self, separator=b'\\n'):\n", ' """Read data from the stream until ``separator`` is found.\n', '\n', ' On success, the data and separator will be removed from the\n', ' internal buffer (consumed). Returned data will include the\n', ' separator at the end.\n', '\n', ' Configured stream limit is used to check result. Limit sets the\n', ' maximal length of data that can be returned, not counting the\n', ' separator.\n', '\n', ' If an EOF occurs and the complete separator is still not found,\n', ' an IncompleteReadError exception will be raised, and the internal\n', ' buffer will be reset. The IncompleteReadError.partial attribute\n', ' may contain the separator partially.\n', '\n', ' If the data cannot be read because of over limit, a\n', ' LimitOverrunError exception will be raised, and the data\n', ' will be left in the internal buffer, so it can be read again.\n', ' """\n', ' seplen = len(separator)\n', ' if seplen == 0:\n', " raise ValueError('Separator should be at least one-byte string')\n", '\n', ' if self._exception is not None:\n', ' raise self._exception\n', '\n', ' # Consume whole buffer except last bytes, which length is\n', " # one less than seplen. Let's check corner cases with\n", " # separator='SEPARATOR':\n", ' # * we have received almost complete separator (without last\n', " # byte). i.e buffer='some textSEPARATO'. In this case we\n", ' # can safely consume len(separator) - 1 bytes.\n', ' # * last byte of buffer is first byte of separator, i.e.\n', " # buffer='abcdefghijklmnopqrS'. We may safely consume\n", ' # everything except that last byte, but this require to\n', ' # analyze bytes of buffer that match partial separator.\n', ' # This is slow and/or require FSM. For this case our\n', ' # implementation is not optimal, since require rescanning\n', ' # of data that is known to not belong to separator. In\n', ' # real world, separator will not be so long to notice\n', ' # performance problems. Even when reading MIME-encoded\n', ' # messages :)\n', '\n', ' # `offset` is the number of bytes from the beginning of the buffer\n', ' # where there is no occurrence of `separator`.\n', ' offset = 0\n', '\n', ' # Loop until we find `separator` in the buffer, exceed the buffer size,\n', ' # or an EOF has happened.\n', ' while True:\n', ' buflen = len(self._buffer)\n', '\n', ' # Check if we now have enough data in the buffer for `separator` to\n', ' # fit.\n', ' if buflen - offset >= seplen:\n', ' isep = self._buffer.find(separator, offset)\n', '\n', ' if isep != -1:\n', ' # `separator` is in the buffer. `isep` will be used later\n', ' # to retrieve the data.\n', ' break\n', '\n', ' # see upper comment for explanation.\n', ' offset = buflen + 1 - seplen\n', ' if offset > self._limit:\n', ' raise exceptions.LimitOverrunError(\n', " 'Separator is not found, and chunk exceed the limit',\n", ' offset)\n', '\n', ' # Complete message (with full separator) may be present in buffer\n', ' # even when EOF flag is set. This may happen when the last chunk\n', " # adds data which makes separator be found. That's why we check for\n", ' # EOF *ater* inspecting the buffer.\n', ' if self._eof:\n', ' chunk = bytes(self._buffer)\n', ' self._buffer.clear()\n', ' raise exceptions.IncompleteReadError(chunk, None)\n', '\n', ' # _wait_for_data() will resume reading if stream was paused.\n', " await self._wait_for_data('readuntil')\n", '\n', ' if isep > self._limit:\n', ' raise exceptions.LimitOverrunError(\n', " 'Separator is found, but chunk is longer than limit', isep)\n", '\n', ' chunk = self._buffer[:isep + seplen]\n', ' del self._buffer[:isep + seplen]\n', ' self._maybe_resume_transport()\n', ' return bytes(chunk)\n', '\n', ' async def read(self, n=-1):\n', ' """Read up to `n` bytes from the stream.\n', '\n', ' If n is not provided, or set to -1, read until EOF and return all read\n', ' bytes. If the EOF was received and the internal buffer is empty, return\n', ' an empty bytes object.\n', '\n', ' If n is zero, return empty bytes object immediately.\n', '\n', ' If n is positive, this function try to read `n` bytes, and may return\n', ' less or equal bytes than requested, but at least one byte. If EOF was\n', ' received before any byte is read, this function returns empty byte\n', ' object.\n', '\n', ' Returned value is not limited with limit, configured at stream\n', ' creation.\n', '\n', ' If stream was paused, this function will automatically resume it if\n', ' needed.\n', ' """\n', '\n', ' if self._exception is not None:\n', ' raise self._exception\n', '\n', ' if n == 0:\n', " return b''\n", '\n', ' if n < 0:\n', ' # This used to just loop creating a new waiter hoping to\n', ' # collect everything in self._buffer, but that would\n', ' # deadlock if the subprocess sends more than self.limit\n', ' # bytes. So just call self.read(self._limit) until EOF.\n', ' blocks = []\n', ' while True:\n', ' block = await self.read(self._limit)\n', ' if not block:\n', ' break\n', ' blocks.append(block)\n', " return b''.join(blocks)\n", '\n', ' if not self._buffer and not self._eof:\n', " await self._wait_for_data('read')\n", '\n', ' # This will work right even if buffer is less than n bytes\n', ' data = bytes(self._buffer[:n])\n', ' del self._buffer[:n]\n', '\n', ' self._maybe_resume_transport()\n', ' return data\n', '\n', ' async def readexactly(self, n):\n', ' """Read exactly `n` bytes.\n', '\n', ' Raise an IncompleteReadError if EOF is reached before `n` bytes can be\n', ' read. The IncompleteReadError.partial attribute of the exception will\n', ' contain the partial read bytes.\n', '\n', ' if n is zero, return empty bytes object.\n', '\n', ' Returned value is not limited with limit, configured at stream\n', ' creation.\n', '\n', ' If stream was paused, this function will automatically resume it if\n', ' needed.\n', ' """\n', ' if n < 0:\n', " raise ValueError('readexactly size can not be less than zero')\n", '\n', ' if self._exception is not None:\n', ' raise self._exception\n', '\n', ' if n == 0:\n', " return b''\n", '\n', ' while len(self._buffer) < n:\n', ' if self._eof:\n', ' incomplete = bytes(self._buffer)\n', ' self._buffer.clear()\n', ' raise exceptions.IncompleteReadError(incomplete, n)\n', '\n', " await self._wait_for_data('readexactly')\n", '\n', ' if len(self._buffer) == n:\n', ' data = bytes(self._buffer)\n', ' self._buffer.clear()\n', ' else:\n', ' data = bytes(self._buffer[:n])\n', ' del self._buffer[:n]\n', ' self._maybe_resume_transport()\n', ' return data\n', '\n', ' def __aiter__(self):\n', ' return self\n', '\n', ' async def __anext__(self):\n', ' val = await self.readline()\n', " if val == b'':\n", ' raise StopAsyncIteration\n', ' return val\n'], '/nix/store/hrq2jcbjdwkj39xzahvnnjk93ccrz1pr-python3-3.9.6/lib/python3.9/asyncio/streams.py'), '/nix/store/r2cl0bzi7p4pi83iyc9i3bak32ph5bkd-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/handler.py': (2526, 1.0, ['from .resource import Resource\n', 'from .response import Status, Response\n', 'from .request import Connection, Context\n', 'from .util import get_path_components\n', 'from urllib.parse import urlparse\n', 'from typing import Dict, Callable, Awaitable\n', '\n', 'import logging\n', 'import re\n', '\n', 'Handler = Callable[[str, Connection], Awaitable[Response]]\n', 'PORT_RE = re.compile(r":([0-9]{1,5})$")\n', '\n', '\n', 'class GenericHandler:\n', ' def __init__(self, url_map: Dict[str, Dict[str, Resource]]):\n', ' self.url_map = url_map\n', ' self.log = logging.getLogger("amethyst.handler.GenericHandler")\n', '\n', ' async def __call__(self, url: str, conn: Connection) -> Response:\n', ' result = urlparse(url)\n', '\n', ' if not result.scheme:\n', ' return Response(Status.BAD_REQUEST, f"Requested URL must have a scheme.")\n', '\n', ' if result.scheme != "gemini":\n', ' # This is exclusively a Gemini server.\n', ' return Response(\n', ' Status.PROXY_REQUEST_REFUSED,\n', ' f"This server does not proxy non-Gemini URLs.",\n', ' )\n', '\n', ' host = result.netloc\n', '\n', ' if port_match := PORT_RE.search(host):\n', ' if int(port_match.group(1)) != conn.server.config.port:\n', ' return Response(\n', ' Status.PROXY_REQUEST_REFUSED, f"{host} is not served here."\n', ' )\n', '\n', ' host = PORT_RE.sub("", host)\n', '\n', ' if host not in self.url_map:\n', ' self.log.warn(f"Received request for host {host} not in URL map")\n', '\n', ' return Response(\n', ' Status.PROXY_REQUEST_REFUSED,\n', ' f"{host} is not served here.",\n', ' )\n', '\n', ' req_path = result.path\n', ' try:\n', ' req_path = get_path_components(req_path)\n', ' except ValueError:\n', ' return Response(Status.BAD_REQUEST, "Invalid URL")\n', '\n', ' paths = [(get_path_components(i), v) for i, v in self.url_map[host].items()]\n', '\n', ' for path, resource in sorted(paths, key=lambda k: len(k[0]), reverse=True):\n', ' if len(req_path) < len(path) or req_path[: len(path)] != path:\n', ' continue\n', '\n', ' truncated_path = "/".join(req_path[len(path) :])\n', ' if result.path.endswith("/"):\n', ' truncated_path += "/"\n', '\n', ' return await resource(\n', ' Context(\n', ' result.netloc,\n', ' result.path,\n', ' truncated_path,\n', ' result.query,\n', ' conn,\n', ' )\n', ' )\n', '\n', ' return Response(Status.NOT_FOUND, f"{req_path} was not found on this server.")\n'], '/nix/store/r2cl0bzi7p4pi83iyc9i3bak32ph5bkd-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/handler.py'), '/nix/store/yn3ydn377sbngdx1a74ikjg098dq5nhy-python3.9-amethyst_extensions-0.0.1/lib/python3.9/site-packages/amethyst_ext/pydoc.py': (6841, 1.0, ['from amethyst.response import Response, Status\n', '\n', 'import importlib\n', 'import inspect\n', 'import pkgutil\n', 'import re\n', 'import sys\n', 'import textwrap\n', '\n', 'SITE_PACKAGES_RE = re.compile(r"lib/python[^/]+/site-packages")\n', 'PYTHON3_RE = re.compile(r"python3[^-]*")\n', '\n', '\n', 'class PydocResource():\n', ' @staticmethod\n', ' def classify(thing):\n', ' if inspect.ismodule(thing):\n', ' return "module"\n', ' elif inspect.isclass(thing):\n', ' return "class"\n', ' elif (inspect.isfunction(thing) or inspect.ismethod(thing) or\n', ' inspect.ismethoddescriptor(thing) or inspect.isroutine(thing)):\n', ' return "function"\n', ' else:\n', ' return "other"\n', '\n', ' def doc_class(self, cls, name=None):\n', ' lines = []\n', '\n', ' if name is None:\n', ' name = cls.__name__\n', ' else:\n', ' name = f"{name}.{cls.__name__}"\n', '\n', ' lines.append(f"### {name}")\n', ' if (clsdoc := getattr(cls, "__doc__")):\n', ' lines.append(f"```\\n{clsdoc}\\n```\\n")\n', '\n', ' members = {}\n', ' members = {"class": [], "function": [], "other": []}\n', '\n', ' for name, member in inspect.getmembers(cls):\n', ' if name.startswith("_"):\n', ' continue\n', '\n', ' if (classification := self.classify(member)) in {"class", "function", "other"}:\n', ' members[classification].append((name, member))\n', '\n', ' members["class"].sort()\n', ' for _, scls in members["class"]:\n', ' lines.append(self.doc_class(scls, name))\n', '\n', ' members["function"].sort()\n', ' for name, func in members["function"]:\n', ' lines.append(self.doc_func(func))\n', '\n', ' members["other"].sort()\n', ' for name, other in members["other"]:\n', ' lines.append(self.doc_other(name, other))\n', '\n', ' return "\\n".join(lines)\n', '\n', ' def doc_func(self, func):\n', ' lines = []\n', '\n', ' lines.append("```")\n', ' try:\n', ' lines.append(f"{func.__name__}{inspect.signature(func)}")\n', ' except ValueError:\n', ' lines.append(f"{func.__name__}(...)")\n', '\n', ' if (funcdoc := getattr(func, "__doc__")):\n', ' lines.append(f"\\n{textwrap.indent(funcdoc, \' \')}\\n```\\n")\n', ' else:\n', ' lines.append("```\\n")\n', '\n', ' return "\\n".join(lines)\n', '\n', ' def doc_other(self, name, other):\n', ' doc = getattr(other, "__doc__", "")\n', ' if doc and doc != type(other).__doc__:\n', ' doc = textwrap.indent(doc, " ")\n', ' doc += "\\n```\\n"\n', ' else:\n', ' doc = "```"\n', '\n', ' return f"```\\n{name} = {other!r}\\n{doc}"\n', '\n', ' def doc_mod(self, modname):\n', ' lines = []\n', '\n', ' try:\n', ' module = importlib.import_module(modname)\n', ' except ImportError:\n', ' return None\n', '\n', ' ispkg = (getattr(module, "__package__", "") == modname)\n', '\n', ' lines.append("=> _ Back to module index")\n', ' lines.append("=> _/search Go to module by name")\n', ' if "." in modname:\n', ' components = modname.split(".")\n', ' for i in range(len(components) - 1, 0, -1):\n', ' lines.append("=> " + ".".join(components[:i]))\n', '\n', ' if ispkg:\n', ' lines.append(f"# {modname} (package)")\n', ' else:\n', ' lines.append(f"# {modname}")\n', '\n', ' if (moddoc := getattr(module, "__doc__")):\n', ' lines.append(f"```\\n{moddoc}\\n```")\n', ' else:\n', ' lines.append("This module has no docstring.")\n', '\n', ' members = {"module": [], "class": [], "function": [], "other": []}\n', ' for name, member in inspect.getmembers(module):\n', ' if name.startswith("_"):\n', ' continue\n', '\n', ' members[self.classify(member)].append((name, member))\n', '\n', ' if members["class"]:\n', ' members["class"].sort()\n', ' lines.append("## Classes")\n', ' for name, cls in members["class"]:\n', ' lines.append(self.doc_class(cls))\n', '\n', ' if members["function"]:\n', ' members["function"].sort()\n', ' lines.append("## Functions")\n', ' for name, func in members["function"]:\n', ' lines.append(f"### {name}")\n', ' lines.append(self.doc_func(func))\n', '\n', ' if members["other"]:\n', ' lines.append("## Other members")\n', ' members["other"].sort()\n', ' for name, other in members["other"]:\n', ' lines.append(self.doc_other(name, other))\n', '\n', ' if members["module"]:\n', ' members["module"].sort()\n', ' lines.append("## Modules")\n', ' for name, mod in members["module"]:\n', ' lines.append(f"=> {mod.__name__} {name}")\n', '\n', ' return "\\n".join(lines)\n', '\n', ' def index(self):\n', ' lines = []\n', '\n', ' lines.append("=> _/search Go to module by name")\n', '\n', ' lines.append("# Built-in modules")\n', ' names = [name for name in sys.builtin_module_names if name != "__main__"]\n', ' for name in sorted(names):\n', ' lines.append(f"=> {name}")\n', '\n', ' lines.append("# Python modules")\n', ' for dirname in sorted(sys.path):\n', ' display = dirname\n', ' if display.startswith("/nix/store/"):\n', ' display = f"(nix)/{display[44:]}"\n', '\n', ' display = SITE_PACKAGES_RE.sub("l/p/s-p", display)\n', ' display = PYTHON3_RE.sub("p3", display)\n', '\n', ' modpkgs = []\n', ' for importer, name, ispkg in pkgutil.iter_modules([dirname]):\n', ' if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):\n', ' # Ignore modules that contain surrogate characters\n', ' # (pydoc does this)\n', ' continue\n', '\n', ' if name == "setup":\n', ' # never import "setup.py"\n', ' continue\n', '\n', ' modpkgs.append((name, ispkg))\n', '\n', ' if modpkgs:\n', ' lines.append(f"## {display}")\n', ' for name, ispkg in sorted(modpkgs):\n', ' if ispkg:\n', ' lines.append(f"=> {name} {name} (package)")\n', ' else:\n', ' lines.append(f"=> {name}")\n', '\n', ' return "\\n".join(lines)\n', '\n', '\n', ' async def __call__(self, ctx):\n', ' path = ctx.path\n', ' if not path:\n', ' return Response(Status.REDIRECT_PERMANENT, ctx.orig_path + "/")\n', '\n', ' path = path.strip("/")\n', ' if not path or path == "_":\n', ' text = self.index()\n', '\n', ' elif path == "_/search":\n', ' if ctx.query:\n', ' try:\n', ' importlib.import_module(ctx.query)\n', ' return Response(Status.REDIRECT_TEMPORARY, "../" + ctx.query)\n', ' except ImportError:\n', ' return Response(Status.INPUT, f"Sorry, I don\'t know about {ctx.query}. Module name?")\n', '\n', ' return Response(Status.INPUT, "Module name?")\n', ' else:\n', ' text = self.doc_mod(path)\n', '\n', ' if text is not None:\n', ' return Response(\n', ' Status.SUCCESS, "text/gemini", text.encode()\n', ' )\n', '\n', ' return Response(Status.NOT_FOUND, "text/gemini")\n'], '/nix/store/yn3ydn377sbngdx1a74ikjg098dq5nhy-python3.9-amethyst_extensions-0.0.1/lib/python3.9/site-packages/amethyst_ext/pydoc.py'), '/nix/store/50avp7k3smmsvqrfpl7bzx35qj5zxwy2-python3.9-pyparsing-2.4.7/lib/python3.9/site-packages/pyparsing.py': (273365, 1.0, ['# -*- coding: utf-8 -*-\n', '# module pyparsing.py\n', '#\n', '# Copyright (c) 2003-2019 Paul T. McGuire\n', '#\n', '# Permission is hereby granted, free of charge, to any person obtaining\n', '# a copy of this software and associated documentation files (the\n', '# "Software"), to deal in the Software without restriction, including\n', '# without limitation the rights to use, copy, modify, merge, publish,\n', '# distribute, sublicense, and/or sell copies of the Software, and to\n', '# permit persons to whom the Software is furnished to do so, subject to\n', '# the following conditions:\n', '#\n', '# The above copyright notice and this permission notice shall be\n', '# included in all copies or substantial portions of the Software.\n', '#\n', '# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\n', '# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n', '# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n', '# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n', '# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n', '# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n', '# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n', '#\n', '\n', '__doc__ = \\\n', '"""\n', 'pyparsing module - Classes and methods to define and execute parsing grammars\n', '=============================================================================\n', '\n', 'The pyparsing module is an alternative approach to creating and\n', 'executing simple grammars, vs. the traditional lex/yacc approach, or the\n', "use of regular expressions. With pyparsing, you don't need to learn\n", 'a new syntax for defining grammars or matching expressions - the parsing\n', 'module provides a library of classes that you use to construct the\n', 'grammar directly in Python.\n', '\n', 'Here is a program to parse "Hello, World!" (or any greeting of the form\n', '``"<salutation>, <addressee>!"``), built up using :class:`Word`,\n', ':class:`Literal`, and :class:`And` elements\n', "(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,\n", 'and the strings are auto-converted to :class:`Literal` expressions)::\n', '\n', ' from pyparsing import Word, alphas\n', '\n', ' # define grammar of a greeting\n', ' greet = Word(alphas) + "," + Word(alphas) + "!"\n', '\n', ' hello = "Hello, World!"\n', ' print (hello, "->", greet.parseString(hello))\n', '\n', 'The program outputs the following::\n', '\n', " Hello, World! -> ['Hello', ',', 'World', '!']\n", '\n', 'The Python representation of the grammar is quite readable, owing to the\n', "self-explanatory class names, and the use of '+', '|' and '^' operators.\n", '\n', 'The :class:`ParseResults` object returned from\n', ':class:`ParserElement.parseString` can be\n', 'accessed as a nested list, a dictionary, or an object with named\n', 'attributes.\n', '\n', 'The pyparsing module handles some of the problems that are typically\n', 'vexing when writing text parsers:\n', '\n', ' - extra or missing whitespace (the above program will also handle\n', ' "Hello,World!", "Hello , World !", etc.)\n', ' - quoted strings\n', ' - embedded comments\n', '\n', '\n', 'Getting Started -\n', '-----------------\n', 'Visit the classes :class:`ParserElement` and :class:`ParseResults` to\n', 'see the base classes that most other pyparsing\n', 'classes inherit from. Use the docstrings for examples of how to:\n', '\n', ' - construct literal match expressions from :class:`Literal` and\n', ' :class:`CaselessLiteral` classes\n', ' - construct character word-group expressions using the :class:`Word`\n', ' class\n', ' - see how to create repetitive expressions using :class:`ZeroOrMore`\n', ' and :class:`OneOrMore` classes\n', " - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,\n", " and :class:`'&'<Each>` operators to combine simple expressions into\n", ' more complex ones\n', ' - associate names with your parsed results using\n', ' :class:`ParserElement.setResultsName`\n', ' - access the parsed data, which is returned as a :class:`ParseResults`\n', ' object\n', ' - find some helpful expression short-cuts like :class:`delimitedList`\n', ' and :class:`oneOf`\n', ' - find more useful common expressions in the :class:`pyparsing_common`\n', ' namespace class\n', '"""\n', '\n', '__version__ = "2.4.7"\n', '__versionTime__ = "30 Mar 2020 00:43 UTC"\n', '__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"\n', '\n', 'import string\n', 'from weakref import ref as wkref\n', 'import copy\n', 'import sys\n', 'import warnings\n', 'import re\n', 'import sre_constants\n', 'import collections\n', 'import pprint\n', 'import traceback\n', 'import types\n', 'from datetime import datetime\n', 'from operator import itemgetter\n', 'import itertools\n', 'from functools import wraps\n', 'from contextlib import contextmanager\n', '\n', 'try:\n', ' # Python 3\n', ' from itertools import filterfalse\n', 'except ImportError:\n', ' from itertools import ifilterfalse as filterfalse\n', '\n', 'try:\n', ' from _thread import RLock\n', 'except ImportError:\n', ' from threading import RLock\n', '\n', 'try:\n', ' # Python 3\n', ' from collections.abc import Iterable\n', ' from collections.abc import MutableMapping, Mapping\n', 'except ImportError:\n', ' # Python 2.7\n', ' from collections import Iterable\n', ' from collections import MutableMapping, Mapping\n', '\n', 'try:\n', ' from collections import OrderedDict as _OrderedDict\n', 'except ImportError:\n', ' try:\n', ' from ordereddict import OrderedDict as _OrderedDict\n', ' except ImportError:\n', ' _OrderedDict = None\n', '\n', 'try:\n', ' from types import SimpleNamespace\n', 'except ImportError:\n', ' class SimpleNamespace: pass\n', '\n', '# version compatibility configuration\n', '__compat__ = SimpleNamespace()\n', '__compat__.__doc__ = """\n', ' A cross-version compatibility configuration for pyparsing features that will be\n', ' released in a future version. By setting values in this configuration to True,\n', ' those features can be enabled in prior versions for compatibility development\n', ' and testing.\n', '\n', ' - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping\n', ' of results names when an And expression is nested within an Or or MatchFirst; set to\n', ' True to enable bugfix released in pyparsing 2.3.0, or False to preserve\n', ' pre-2.3.0 handling of named results\n', '"""\n', '__compat__.collect_all_And_tokens = True\n', '\n', '__diag__ = SimpleNamespace()\n', '__diag__.__doc__ = """\n', 'Diagnostic configuration (all default to False)\n', ' - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results\n', ' name is defined on a MatchFirst or Or expression with one or more And subexpressions\n', ' (only warns if __compat__.collect_all_And_tokens is False)\n', ' - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results\n', ' name is defined on a containing expression with ungrouped subexpressions that also\n', ' have results names\n', ' - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined\n', ' with a results name, but has no contents defined\n', ' - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is\n', ' incorrectly called with multiple str arguments\n', ' - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent\n', ' calls to ParserElement.setName()\n', '"""\n', '__diag__.warn_multiple_tokens_in_named_alternation = False\n', '__diag__.warn_ungrouped_named_tokens_in_collection = False\n', '__diag__.warn_name_set_on_empty_Forward = False\n', '__diag__.warn_on_multiple_string_args_to_oneof = False\n', '__diag__.enable_debug_on_named_expressions = False\n', '__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")]\n', '\n', 'def _enable_all_warnings():\n', ' __diag__.warn_multiple_tokens_in_named_alternation = True\n', ' __diag__.warn_ungrouped_named_tokens_in_collection = True\n', ' __diag__.warn_name_set_on_empty_Forward = True\n', ' __diag__.warn_on_multiple_string_args_to_oneof = True\n', '__diag__.enable_all_warnings = _enable_all_warnings\n', '\n', '\n', "__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__',\n", " 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',\n", " 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',\n", " 'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',\n", " 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',\n", " 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',\n", " 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',\n", " 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char',\n", " 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',\n", " 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',\n", " 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',\n", " 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',\n", " 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',\n", " 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',\n", " 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',\n", " 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',\n", " 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',\n", " 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass',\n", " 'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set',\n", " 'conditionAsParseAction', 're',\n", ' ]\n', '\n', 'system_version = tuple(sys.version_info)[:3]\n', 'PY_3 = system_version[0] == 3\n', 'if PY_3:\n', ' _MAX_INT = sys.maxsize\n', ' basestring = str\n', ' unichr = chr\n', ' unicode = str\n', ' _ustr = str\n', '\n', ' # build list of single arg builtins, that can be used as parse actions\n', ' singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]\n', '\n', 'else:\n', ' _MAX_INT = sys.maxint\n', ' range = xrange\n', '\n', ' def _ustr(obj):\n', ' """Drop-in replacement for str(obj) that tries to be Unicode\n', ' friendly. It first tries str(obj). If that fails with\n', ' a UnicodeEncodeError, then it tries unicode(obj). It then\n', ' < returns the unicode object | encodes it with the default\n', ' encoding | ... >.\n', ' """\n', ' if isinstance(obj, unicode):\n', ' return obj\n', '\n', ' try:\n', ' # If this works, then _ustr(obj) has the same behaviour as str(obj), so\n', " # it won't break any existing code.\n", ' return str(obj)\n', '\n', ' except UnicodeEncodeError:\n', ' # Else encode it\n', " ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')\n", " xmlcharref = Regex(r'&#\\d+;')\n", " xmlcharref.setParseAction(lambda t: '\\\\u' + hex(int(t[0][2:-1]))[2:])\n", ' return xmlcharref.transformString(ret)\n', '\n', ' # build list of single arg builtins, tolerant of Python version, that can be used as parse actions\n', ' singleArgBuiltins = []\n', ' import __builtin__\n', '\n', ' for fname in "sum len sorted reversed list tuple set any all min max".split():\n', ' try:\n', ' singleArgBuiltins.append(getattr(__builtin__, fname))\n', ' except AttributeError:\n', ' continue\n', '\n', '_generatorType = type((y for y in range(1)))\n', '\n', 'def _xml_escape(data):\n', ' """Escape &, <, >, ", \', etc. in a string of data."""\n', '\n', ' # ampersand must be replaced first\n', ' from_symbols = \'&><"\\\'\'\n', ' to_symbols = (\'&\' + s + \';\' for s in "amp gt lt quot apos".split())\n', ' for from_, to_ in zip(from_symbols, to_symbols):\n', ' data = data.replace(from_, to_)\n', ' return data\n', '\n', 'alphas = string.ascii_uppercase + string.ascii_lowercase\n', 'nums = "0123456789"\n', 'hexnums = nums + "ABCDEFabcdef"\n', 'alphanums = alphas + nums\n', '_bslash = chr(92)\n', 'printables = "".join(c for c in string.printable if c not in string.whitespace)\n', '\n', '\n', 'def conditionAsParseAction(fn, message=None, fatal=False):\n', ' msg = message if message is not None else "failed user-defined condition"\n', ' exc_type = ParseFatalException if fatal else ParseException\n', ' fn = _trim_arity(fn)\n', '\n', ' @wraps(fn)\n', ' def pa(s, l, t):\n', ' if not bool(fn(s, l, t)):\n', ' raise exc_type(s, l, msg)\n', '\n', ' return pa\n', '\n', 'class ParseBaseException(Exception):\n', ' """base exception class for all parsing runtime exceptions"""\n', ' # Performance tuning: we construct a *lot* of these, so keep this\n', ' # constructor as small and fast as possible\n', ' def __init__(self, pstr, loc=0, msg=None, elem=None):\n', ' self.loc = loc\n', ' if msg is None:\n', ' self.msg = pstr\n', ' self.pstr = ""\n', ' else:\n', ' self.msg = msg\n', ' self.pstr = pstr\n', ' self.parserElement = elem\n', ' self.args = (pstr, loc, msg)\n', '\n', ' @classmethod\n', ' def _from_exception(cls, pe):\n', ' """\n', ' internal factory method to simplify creating one type of ParseException\n', ' from another - avoids having __init__ signature conflicts among subclasses\n', ' """\n', ' return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)\n', '\n', ' def __getattr__(self, aname):\n', ' """supported attributes by name are:\n', ' - lineno - returns the line number of the exception text\n', ' - col - returns the column number of the exception text\n', ' - line - returns the line containing the exception text\n', ' """\n', ' if aname == "lineno":\n', ' return lineno(self.loc, self.pstr)\n', ' elif aname in ("col", "column"):\n', ' return col(self.loc, self.pstr)\n', ' elif aname == "line":\n', ' return line(self.loc, self.pstr)\n', ' else:\n', ' raise AttributeError(aname)\n', '\n', ' def __str__(self):\n', ' if self.pstr:\n', ' if self.loc >= len(self.pstr):\n', " foundstr = ', found end of text'\n", ' else:\n', " foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\\\', '\\\\')\n", ' else:\n', " foundstr = ''\n", ' return ("%s%s (at char %d), (line:%d, col:%d)" %\n', ' (self.msg, foundstr, self.loc, self.lineno, self.column))\n', ' def __repr__(self):\n', ' return _ustr(self)\n', ' def markInputline(self, markerString=">!<"):\n', ' """Extracts the exception line from the input string, and marks\n', ' the location of the exception with a special symbol.\n', ' """\n', ' line_str = self.line\n', ' line_column = self.column - 1\n', ' if markerString:\n', ' line_str = "".join((line_str[:line_column],\n', ' markerString, line_str[line_column:]))\n', ' return line_str.strip()\n', ' def __dir__(self):\n', ' return "lineno col line".split() + dir(type(self))\n', '\n', 'class ParseException(ParseBaseException):\n', ' """\n', " Exception thrown when parse expressions don't match class;\n", ' supported attributes by name are:\n', ' - lineno - returns the line number of the exception text\n', ' - col - returns the column number of the exception text\n', ' - line - returns the line containing the exception text\n', '\n', ' Example::\n', '\n', ' try:\n', ' Word(nums).setName("integer").parseString("ABC")\n', ' except ParseException as pe:\n', ' print(pe)\n', ' print("column: {}".format(pe.col))\n', '\n', ' prints::\n', '\n', ' Expected integer (at char 0), (line:1, col:1)\n', ' column: 1\n', '\n', ' """\n', '\n', ' @staticmethod\n', ' def explain(exc, depth=16):\n', ' """\n', ' Method to take an exception and translate the Python internal traceback into a list\n', ' of the pyparsing expressions that caused the exception to be raised.\n', '\n', ' Parameters:\n', '\n', ' - exc - exception raised during parsing (need not be a ParseException, in support\n', ' of Python exceptions that might be raised in a parse action)\n', ' - depth (default=16) - number of levels back in the stack trace to list expression\n', ' and function names; if None, the full stack trace names will be listed; if 0, only\n', ' the failing input line, marker, and exception string will be shown\n', '\n', ' Returns a multi-line string listing the ParserElements and/or function names in the\n', " exception's stack trace.\n", '\n', ' Note: the diagnostic output will include string representations of the expressions\n', ' that failed to parse. These representations will be more helpful if you use `setName` to\n', ' give identifiable names to your expressions. Otherwise they will use the default string\n', ' forms, which may be cryptic to read.\n', '\n', ' explain() is only supported under Python 3.\n', ' """\n', ' import inspect\n', '\n', ' if depth is None:\n', ' depth = sys.getrecursionlimit()\n', ' ret = []\n', ' if isinstance(exc, ParseBaseException):\n', ' ret.append(exc.line)\n', " ret.append(' ' * (exc.col - 1) + '^')\n", ' ret.append("{0}: {1}".format(type(exc).__name__, exc))\n', '\n', ' if depth > 0:\n', ' callers = inspect.getinnerframes(exc.__traceback__, context=depth)\n', ' seen = set()\n', ' for i, ff in enumerate(callers[-depth:]):\n', ' frm = ff[0]\n', '\n', " f_self = frm.f_locals.get('self', None)\n", ' if isinstance(f_self, ParserElement):\n', " if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'):\n", ' continue\n', ' if f_self in seen:\n', ' continue\n', ' seen.add(f_self)\n', '\n', ' self_type = type(f_self)\n', ' ret.append("{0}.{1} - {2}".format(self_type.__module__,\n', ' self_type.__name__,\n', ' f_self))\n', ' elif f_self is not None:\n', ' self_type = type(f_self)\n', ' ret.append("{0}.{1}".format(self_type.__module__,\n', ' self_type.__name__))\n', ' else:\n', ' code = frm.f_code\n', " if code.co_name in ('wrapper', '<module>'):\n", ' continue\n', '\n', ' ret.append("{0}".format(code.co_name))\n', '\n', ' depth -= 1\n', ' if not depth:\n', ' break\n', '\n', " return '\\n'.join(ret)\n", '\n', '\n', 'class ParseFatalException(ParseBaseException):\n', ' """user-throwable exception thrown when inconsistent parse content\n', ' is found; stops all parsing immediately"""\n', ' pass\n', '\n', 'class ParseSyntaxException(ParseFatalException):\n', ' """just like :class:`ParseFatalException`, but thrown internally\n', " when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates\n", ' that parsing is to stop immediately because an unbacktrackable\n', ' syntax error has been found.\n', ' """\n', ' pass\n', '\n', '#~ class ReparseException(ParseBaseException):\n', ' #~ """Experimental class - parse actions can raise this exception to cause\n', ' #~ pyparsing to reparse the input string:\n', ' #~ - with a modified input string, and/or\n', ' #~ - with a modified start location\n', ' #~ Set the values of the ReparseException in the constructor, and raise the\n', ' #~ exception in a parse action to cause pyparsing to use the new string/location.\n', ' #~ Setting the values as None causes no change to be made.\n', ' #~ """\n', ' #~ def __init_( self, newstring, restartLoc ):\n', ' #~ self.newParseText = newstring\n', ' #~ self.reparseLoc = restartLoc\n', '\n', 'class RecursiveGrammarException(Exception):\n', ' """exception thrown by :class:`ParserElement.validate` if the\n', ' grammar could be improperly recursive\n', ' """\n', ' def __init__(self, parseElementList):\n', ' self.parseElementTrace = parseElementList\n', '\n', ' def __str__(self):\n', ' return "RecursiveGrammarException: %s" % self.parseElementTrace\n', '\n', 'class _ParseResultsWithOffset(object):\n', ' def __init__(self, p1, p2):\n', ' self.tup = (p1, p2)\n', ' def __getitem__(self, i):\n', ' return self.tup[i]\n', ' def __repr__(self):\n', ' return repr(self.tup[0])\n', ' def setOffset(self, i):\n', ' self.tup = (self.tup[0], i)\n', '\n', 'class ParseResults(object):\n', ' """Structured parse results, to provide multiple means of access to\n', ' the parsed data:\n', '\n', ' - as a list (``len(results)``)\n', ' - by list index (``results[0], results[1]``, etc.)\n', ' - by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`)\n', '\n', ' Example::\n', '\n', ' integer = Word(nums)\n', ' date_str = (integer.setResultsName("year") + \'/\'\n', ' + integer.setResultsName("month") + \'/\'\n', ' + integer.setResultsName("day"))\n', ' # equivalent form:\n', ' # date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', ' # parseString returns a ParseResults object\n', ' result = date_str.parseString("1999/12/31")\n', '\n', ' def test(s, fn=repr):\n', ' print("%s -> %s" % (s, fn(eval(s))))\n', ' test("list(result)")\n', ' test("result[0]")\n', ' test("result[\'month\']")\n', ' test("result.day")\n', ' test("\'month\' in result")\n', ' test("\'minutes\' in result")\n', ' test("result.dump()", str)\n', '\n', ' prints::\n', '\n', " list(result) -> ['1999', '/', '12', '/', '31']\n", " result[0] -> '1999'\n", " result['month'] -> '12'\n", " result.day -> '31'\n", " 'month' in result -> True\n", " 'minutes' in result -> False\n", " result.dump() -> ['1999', '/', '12', '/', '31']\n", ' - day: 31\n', ' - month: 12\n', ' - year: 1999\n', ' """\n', ' def __new__(cls, toklist=None, name=None, asList=True, modal=True):\n', ' if isinstance(toklist, cls):\n', ' return toklist\n', ' retobj = object.__new__(cls)\n', ' retobj.__doinit = True\n', ' return retobj\n', '\n', ' # Performance tuning: we construct a *lot* of these, so keep this\n', ' # constructor as small and fast as possible\n', ' def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance):\n', ' if self.__doinit:\n', ' self.__doinit = False\n', ' self.__name = None\n', ' self.__parent = None\n', ' self.__accumNames = {}\n', ' self.__asList = asList\n', ' self.__modal = modal\n', ' if toklist is None:\n', ' toklist = []\n', ' if isinstance(toklist, list):\n', ' self.__toklist = toklist[:]\n', ' elif isinstance(toklist, _generatorType):\n', ' self.__toklist = list(toklist)\n', ' else:\n', ' self.__toklist = [toklist]\n', ' self.__tokdict = dict()\n', '\n', ' if name is not None and name:\n', ' if not modal:\n', ' self.__accumNames[name] = 0\n', ' if isinstance(name, int):\n', ' name = _ustr(name) # will always return a str, but use _ustr for consistency\n', ' self.__name = name\n', " if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])):\n", ' if isinstance(toklist, basestring):\n', ' toklist = [toklist]\n', ' if asList:\n', ' if isinstance(toklist, ParseResults):\n', ' self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0)\n', ' else:\n', ' self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)\n', ' self[name].__name = name\n', ' else:\n', ' try:\n', ' self[name] = toklist[0]\n', ' except (KeyError, TypeError, IndexError):\n', ' self[name] = toklist\n', '\n', ' def __getitem__(self, i):\n', ' if isinstance(i, (int, slice)):\n', ' return self.__toklist[i]\n', ' else:\n', ' if i not in self.__accumNames:\n', ' return self.__tokdict[i][-1][0]\n', ' else:\n', ' return ParseResults([v[0] for v in self.__tokdict[i]])\n', '\n', ' def __setitem__(self, k, v, isinstance=isinstance):\n', ' if isinstance(v, _ParseResultsWithOffset):\n', ' self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]\n', ' sub = v[0]\n', ' elif isinstance(k, (int, slice)):\n', ' self.__toklist[k] = v\n', ' sub = v\n', ' else:\n', ' self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)]\n', ' sub = v\n', ' if isinstance(sub, ParseResults):\n', ' sub.__parent = wkref(self)\n', '\n', ' def __delitem__(self, i):\n', ' if isinstance(i, (int, slice)):\n', ' mylen = len(self.__toklist)\n', ' del self.__toklist[i]\n', '\n', ' # convert int to slice\n', ' if isinstance(i, int):\n', ' if i < 0:\n', ' i += mylen\n', ' i = slice(i, i + 1)\n', ' # get removed indices\n', ' removed = list(range(*i.indices(mylen)))\n', ' removed.reverse()\n', ' # fixup indices in token dictionary\n', ' for name, occurrences in self.__tokdict.items():\n', ' for j in removed:\n', ' for k, (value, position) in enumerate(occurrences):\n', ' occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))\n', ' else:\n', ' del self.__tokdict[i]\n', '\n', ' def __contains__(self, k):\n', ' return k in self.__tokdict\n', '\n', ' def __len__(self):\n', ' return len(self.__toklist)\n', '\n', ' def __bool__(self):\n', ' return (not not self.__toklist)\n', ' __nonzero__ = __bool__\n', '\n', ' def __iter__(self):\n', ' return iter(self.__toklist)\n', '\n', ' def __reversed__(self):\n', ' return iter(self.__toklist[::-1])\n', '\n', ' def _iterkeys(self):\n', ' if hasattr(self.__tokdict, "iterkeys"):\n', ' return self.__tokdict.iterkeys()\n', ' else:\n', ' return iter(self.__tokdict)\n', '\n', ' def _itervalues(self):\n', ' return (self[k] for k in self._iterkeys())\n', '\n', ' def _iteritems(self):\n', ' return ((k, self[k]) for k in self._iterkeys())\n', '\n', ' if PY_3:\n', ' keys = _iterkeys\n', ' """Returns an iterator of all named result keys."""\n', '\n', ' values = _itervalues\n', ' """Returns an iterator of all named result values."""\n', '\n', ' items = _iteritems\n', ' """Returns an iterator of all named result key-value tuples."""\n', '\n', ' else:\n', ' iterkeys = _iterkeys\n', ' """Returns an iterator of all named result keys (Python 2.x only)."""\n', '\n', ' itervalues = _itervalues\n', ' """Returns an iterator of all named result values (Python 2.x only)."""\n', '\n', ' iteritems = _iteritems\n', ' """Returns an iterator of all named result key-value tuples (Python 2.x only)."""\n', '\n', ' def keys(self):\n', ' """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""\n', ' return list(self.iterkeys())\n', '\n', ' def values(self):\n', ' """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""\n', ' return list(self.itervalues())\n', '\n', ' def items(self):\n', ' """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""\n', ' return list(self.iteritems())\n', '\n', ' def haskeys(self):\n', ' """Since keys() returns an iterator, this method is helpful in bypassing\n', ' code that looks for the existence of any defined results names."""\n', ' return bool(self.__tokdict)\n', '\n', ' def pop(self, *args, **kwargs):\n', ' """\n', ' Removes and returns item at specified index (default= ``last``).\n', ' Supports both ``list`` and ``dict`` semantics for ``pop()``. If\n', ' passed no argument or an integer argument, it will use ``list``\n', ' semantics and pop tokens from the list of parsed tokens. If passed\n', ' a non-integer argument (most likely a string), it will use ``dict``\n', ' semantics and pop the corresponding value from any defined results\n', ' names. A second default return value argument is supported, just as in\n', ' ``dict.pop()``.\n', '\n', ' Example::\n', '\n', ' def remove_first(tokens):\n', ' tokens.pop(0)\n', ' print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> [\'0\', \'123\', \'321\']\n', ' print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> [\'123\', \'321\']\n', '\n', ' label = Word(alphas)\n', ' patt = label("LABEL") + OneOrMore(Word(nums))\n', ' print(patt.parseString("AAB 123 321").dump())\n', '\n', ' # Use pop() in a parse action to remove named result (note that corresponding value is not\n', ' # removed from list form of results)\n', ' def remove_LABEL(tokens):\n', ' tokens.pop("LABEL")\n', ' return tokens\n', ' patt.addParseAction(remove_LABEL)\n', ' print(patt.parseString("AAB 123 321").dump())\n', '\n', ' prints::\n', '\n', " ['AAB', '123', '321']\n", ' - LABEL: AAB\n', '\n', " ['AAB', '123', '321']\n", ' """\n', ' if not args:\n', ' args = [-1]\n', ' for k, v in kwargs.items():\n', " if k == 'default':\n", ' args = (args[0], v)\n', ' else:\n', ' raise TypeError("pop() got an unexpected keyword argument \'%s\'" % k)\n', ' if (isinstance(args[0], int)\n', ' or len(args) == 1\n', ' or args[0] in self):\n', ' index = args[0]\n', ' ret = self[index]\n', ' del self[index]\n', ' return ret\n', ' else:\n', ' defaultvalue = args[1]\n', ' return defaultvalue\n', '\n', ' def get(self, key, defaultValue=None):\n', ' """\n', ' Returns named result matching the given key, or if there is no\n', ' such name, then returns the given ``defaultValue`` or ``None`` if no\n', ' ``defaultValue`` is specified.\n', '\n', ' Similar to ``dict.get()``.\n', '\n', ' Example::\n', '\n', ' integer = Word(nums)\n', ' date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', ' result = date_str.parseString("1999/12/31")\n', ' print(result.get("year")) # -> \'1999\'\n', ' print(result.get("hour", "not specified")) # -> \'not specified\'\n', ' print(result.get("hour")) # -> None\n', ' """\n', ' if key in self:\n', ' return self[key]\n', ' else:\n', ' return defaultValue\n', '\n', ' def insert(self, index, insStr):\n', ' """\n', ' Inserts new element at location index in the list of parsed tokens.\n', '\n', ' Similar to ``list.insert()``.\n', '\n', ' Example::\n', '\n', ' print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> [\'0\', \'123\', \'321\']\n', '\n', ' # use a parse action to insert the parse location in the front of the parsed results\n', ' def insert_locn(locn, tokens):\n', ' tokens.insert(0, locn)\n', ' print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, \'0\', \'123\', \'321\']\n', ' """\n', ' self.__toklist.insert(index, insStr)\n', ' # fixup indices in token dictionary\n', ' for name, occurrences in self.__tokdict.items():\n', ' for k, (value, position) in enumerate(occurrences):\n', ' occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))\n', '\n', ' def append(self, item):\n', ' """\n', ' Add single element to end of ParseResults list of elements.\n', '\n', ' Example::\n', '\n', ' print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> [\'0\', \'123\', \'321\']\n', '\n', ' # use a parse action to compute the sum of the parsed integers, and add it to the end\n', ' def append_sum(tokens):\n', ' tokens.append(sum(map(int, tokens)))\n', ' print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> [\'0\', \'123\', \'321\', 444]\n', ' """\n', ' self.__toklist.append(item)\n', '\n', ' def extend(self, itemseq):\n', ' """\n', ' Add sequence of elements to end of ParseResults list of elements.\n', '\n', ' Example::\n', '\n', ' patt = OneOrMore(Word(alphas))\n', '\n', ' # use a parse action to append the reverse of the matched strings, to make a palindrome\n', ' def make_palindrome(tokens):\n', ' tokens.extend(reversed([t[::-1] for t in tokens]))\n', " return ''.join(tokens)\n", ' print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> \'lskdjsdlkjflksddsklfjkldsjdksl\'\n', ' """\n', ' if isinstance(itemseq, ParseResults):\n', ' self.__iadd__(itemseq)\n', ' else:\n', ' self.__toklist.extend(itemseq)\n', '\n', ' def clear(self):\n', ' """\n', ' Clear all elements and results names.\n', ' """\n', ' del self.__toklist[:]\n', ' self.__tokdict.clear()\n', '\n', ' def __getattr__(self, name):\n', ' try:\n', ' return self[name]\n', ' except KeyError:\n', ' return ""\n', '\n', ' def __add__(self, other):\n', ' ret = self.copy()\n', ' ret += other\n', ' return ret\n', '\n', ' def __iadd__(self, other):\n', ' if other.__tokdict:\n', ' offset = len(self.__toklist)\n', ' addoffset = lambda a: offset if a < 0 else a + offset\n', ' otheritems = other.__tokdict.items()\n', ' otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))\n', ' for k, vlist in otheritems for v in vlist]\n', ' for k, v in otherdictitems:\n', ' self[k] = v\n', ' if isinstance(v[0], ParseResults):\n', ' v[0].__parent = wkref(self)\n', '\n', ' self.__toklist += other.__toklist\n', ' self.__accumNames.update(other.__accumNames)\n', ' return self\n', '\n', ' def __radd__(self, other):\n', ' if isinstance(other, int) and other == 0:\n', ' # useful for merging many ParseResults using sum() builtin\n', ' return self.copy()\n', ' else:\n', ' # this may raise a TypeError - so be it\n', ' return other + self\n', '\n', ' def __repr__(self):\n', ' return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict))\n', '\n', ' def __str__(self):\n', " return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'\n", '\n', " def _asStringList(self, sep=''):\n", ' out = []\n', ' for item in self.__toklist:\n', ' if out and sep:\n', ' out.append(sep)\n', ' if isinstance(item, ParseResults):\n', ' out += item._asStringList()\n', ' else:\n', ' out.append(_ustr(item))\n', ' return out\n', '\n', ' def asList(self):\n', ' """\n', ' Returns the parse results as a nested list of matching tokens, all converted to strings.\n', '\n', ' Example::\n', '\n', ' patt = OneOrMore(Word(alphas))\n', ' result = patt.parseString("sldkj lsdkj sldkj")\n', ' # even though the result prints in string-like form, it is actually a pyparsing ParseResults\n', " print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']\n", '\n', ' # Use asList() to create an actual list\n', ' result_list = result.asList()\n', " print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']\n", ' """\n', ' return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist]\n', '\n', ' def asDict(self):\n', ' """\n', ' Returns the named parse results as a nested dictionary.\n', '\n', ' Example::\n', '\n', ' integer = Word(nums)\n', ' date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', " result = date_str.parseString('12/31/1999')\n", " print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})\n", '\n', ' result_dict = result.asDict()\n', " print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}\n", '\n', ' # even though a ParseResults supports dict-like access, sometime you just need to have a dict\n', ' import json\n', ' print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable\n', ' print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}\n', ' """\n', ' if PY_3:\n', ' item_fn = self.items\n', ' else:\n', ' item_fn = self.iteritems\n', '\n', ' def toItem(obj):\n', ' if isinstance(obj, ParseResults):\n', ' if obj.haskeys():\n', ' return obj.asDict()\n', ' else:\n', ' return [toItem(v) for v in obj]\n', ' else:\n', ' return obj\n', '\n', ' return dict((k, toItem(v)) for k, v in item_fn())\n', '\n', ' def copy(self):\n', ' """\n', ' Returns a new copy of a :class:`ParseResults` object.\n', ' """\n', ' ret = ParseResults(self.__toklist)\n', ' ret.__tokdict = dict(self.__tokdict.items())\n', ' ret.__parent = self.__parent\n', ' ret.__accumNames.update(self.__accumNames)\n', ' ret.__name = self.__name\n', ' return ret\n', '\n', ' def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):\n', ' """\n', ' (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.\n', ' """\n', ' nl = "\\n"\n', ' out = []\n', ' namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()\n', ' for v in vlist)\n', ' nextLevelIndent = indent + " "\n', '\n', ' # collapse out indents if formatting is not desired\n', ' if not formatted:\n', ' indent = ""\n', ' nextLevelIndent = ""\n', ' nl = ""\n', '\n', ' selfTag = None\n', ' if doctag is not None:\n', ' selfTag = doctag\n', ' else:\n', ' if self.__name:\n', ' selfTag = self.__name\n', '\n', ' if not selfTag:\n', ' if namedItemsOnly:\n', ' return ""\n', ' else:\n', ' selfTag = "ITEM"\n', '\n', ' out += [nl, indent, "<", selfTag, ">"]\n', '\n', ' for i, res in enumerate(self.__toklist):\n', ' if isinstance(res, ParseResults):\n', ' if i in namedItems:\n', ' out += [res.asXML(namedItems[i],\n', ' namedItemsOnly and doctag is None,\n', ' nextLevelIndent,\n', ' formatted)]\n', ' else:\n', ' out += [res.asXML(None,\n', ' namedItemsOnly and doctag is None,\n', ' nextLevelIndent,\n', ' formatted)]\n', ' else:\n', ' # individual token, see if there is a name for it\n', ' resTag = None\n', ' if i in namedItems:\n', ' resTag = namedItems[i]\n', ' if not resTag:\n', ' if namedItemsOnly:\n', ' continue\n', ' else:\n', ' resTag = "ITEM"\n', ' xmlBodyText = _xml_escape(_ustr(res))\n', ' out += [nl, nextLevelIndent, "<", resTag, ">",\n', ' xmlBodyText,\n', ' "</", resTag, ">"]\n', '\n', ' out += [nl, indent, "</", selfTag, ">"]\n', ' return "".join(out)\n', '\n', ' def __lookup(self, sub):\n', ' for k, vlist in self.__tokdict.items():\n', ' for v, loc in vlist:\n', ' if sub is v:\n', ' return k\n', ' return None\n', '\n', ' def getName(self):\n', ' r"""\n', ' Returns the results name for this token expression. Useful when several\n', ' different expressions might match at a particular location.\n', '\n', ' Example::\n', '\n', ' integer = Word(nums)\n', ' ssn_expr = Regex(r"\\d\\d\\d-\\d\\d-\\d\\d\\d\\d")\n', " house_number_expr = Suppress('#') + Word(nums, alphanums)\n", ' user_data = (Group(house_number_expr)("house_number")\n', ' | Group(ssn_expr)("ssn")\n', ' | Group(integer)("age"))\n', ' user_info = OneOrMore(user_data)\n', '\n', ' result = user_info.parseString("22 111-22-3333 #221B")\n', ' for item in result:\n', " print(item.getName(), ':', item[0])\n", '\n', ' prints::\n', '\n', ' age : 22\n', ' ssn : 111-22-3333\n', ' house_number : 221B\n', ' """\n', ' if self.__name:\n', ' return self.__name\n', ' elif self.__parent:\n', ' par = self.__parent()\n', ' if par:\n', ' return par.__lookup(self)\n', ' else:\n', ' return None\n', ' elif (len(self) == 1\n', ' and len(self.__tokdict) == 1\n', ' and next(iter(self.__tokdict.values()))[0][1] in (0, -1)):\n', ' return next(iter(self.__tokdict.keys()))\n', ' else:\n', ' return None\n', '\n', " def dump(self, indent='', full=True, include_list=True, _depth=0):\n", ' """\n', ' Diagnostic method for listing out the contents of\n', ' a :class:`ParseResults`. Accepts an optional ``indent`` argument so\n', ' that this string can be embedded in a nested display of other data.\n', '\n', ' Example::\n', '\n', ' integer = Word(nums)\n', ' date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', " result = date_str.parseString('12/31/1999')\n", ' print(result.dump())\n', '\n', ' prints::\n', '\n', " ['12', '/', '31', '/', '1999']\n", ' - day: 1999\n', ' - month: 31\n', ' - year: 12\n', ' """\n', ' out = []\n', " NL = '\\n'\n", ' if include_list:\n', ' out.append(indent + _ustr(self.asList()))\n', ' else:\n', " out.append('')\n", '\n', ' if full:\n', ' if self.haskeys():\n', ' items = sorted((str(k), v) for k, v in self.items())\n', ' for k, v in items:\n', ' if out:\n', ' out.append(NL)\n', ' out.append("%s%s- %s: " % (indent, (\' \' * _depth), k))\n', ' if isinstance(v, ParseResults):\n', ' if v:\n', ' out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1))\n', ' else:\n', ' out.append(_ustr(v))\n', ' else:\n', ' out.append(repr(v))\n', ' elif any(isinstance(vv, ParseResults) for vv in self):\n', ' v = self\n', ' for i, vv in enumerate(v):\n', ' if isinstance(vv, ParseResults):\n', ' out.append("\\n%s%s[%d]:\\n%s%s%s" % (indent,\n', " (' ' * (_depth)),\n", ' i,\n', ' indent,\n', " (' ' * (_depth + 1)),\n", ' vv.dump(indent=indent,\n', ' full=full,\n', ' include_list=include_list,\n', ' _depth=_depth + 1)))\n', ' else:\n', ' out.append("\\n%s%s[%d]:\\n%s%s%s" % (indent,\n', " (' ' * (_depth)),\n", ' i,\n', ' indent,\n', " (' ' * (_depth + 1)),\n", ' _ustr(vv)))\n', '\n', ' return "".join(out)\n', '\n', ' def pprint(self, *args, **kwargs):\n', ' """\n', ' Pretty-printer for parsed results as a list, using the\n', ' `pprint <https://docs.python.org/3/library/pprint.html>`_ module.\n', ' Accepts additional positional or keyword args as defined for\n', ' `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .\n', '\n', ' Example::\n', '\n', ' ident = Word(alphas, alphanums)\n', ' num = Word(nums)\n', ' func = Forward()\n', " term = ident | num | Group('(' + func + ')')\n", ' func <<= ident + Group(Optional(delimitedList(term)))\n', ' result = func.parseString("fna a,b,(fnb c,d,200),100")\n', ' result.pprint(width=40)\n', '\n', ' prints::\n', '\n', " ['fna',\n", " ['a',\n", " 'b',\n", " ['(', 'fnb', ['c', 'd', '200'], ')'],\n", " '100']]\n", ' """\n', ' pprint.pprint(self.asList(), *args, **kwargs)\n', '\n', ' # add support for pickle protocol\n', ' def __getstate__(self):\n', ' return (self.__toklist,\n', ' (self.__tokdict.copy(),\n', ' self.__parent is not None and self.__parent() or None,\n', ' self.__accumNames,\n', ' self.__name))\n', '\n', ' def __setstate__(self, state):\n', ' self.__toklist = state[0]\n', ' self.__tokdict, par, inAccumNames, self.__name = state[1]\n', ' self.__accumNames = {}\n', ' self.__accumNames.update(inAccumNames)\n', ' if par is not None:\n', ' self.__parent = wkref(par)\n', ' else:\n', ' self.__parent = None\n', '\n', ' def __getnewargs__(self):\n', ' return self.__toklist, self.__name, self.__asList, self.__modal\n', '\n', ' def __dir__(self):\n', ' return dir(type(self)) + list(self.keys())\n', '\n', ' @classmethod\n', ' def from_dict(cls, other, name=None):\n', ' """\n', ' Helper classmethod to construct a ParseResults from a dict, preserving the\n', " name-value relations as results names. If an optional 'name' argument is\n", ' given, a nested ParseResults will be returned\n', ' """\n', ' def is_iterable(obj):\n', ' try:\n', ' iter(obj)\n', ' except Exception:\n', ' return False\n', ' else:\n', ' if PY_3:\n', ' return not isinstance(obj, (str, bytes))\n', ' else:\n', ' return not isinstance(obj, basestring)\n', '\n', ' ret = cls([])\n', ' for k, v in other.items():\n', ' if isinstance(v, Mapping):\n', ' ret += cls.from_dict(v, name=k)\n', ' else:\n', ' ret += cls([v], name=k, asList=is_iterable(v))\n', ' if name is not None:\n', ' ret = cls([ret], name=name)\n', ' return ret\n', '\n', 'MutableMapping.register(ParseResults)\n', '\n', 'def col (loc, strg):\n', ' """Returns current column within a string, counting newlines as line separators.\n', ' The first column is number 1.\n', '\n', ' Note: the default parsing behavior is to expand tabs in the input string\n', ' before starting the parsing process. See\n', ' :class:`ParserElement.parseString` for more\n', ' information on parsing strings containing ``<TAB>`` s, and suggested\n', ' methods to maintain a consistent view of the parsed string, the parse\n', ' location, and line and column positions within the parsed string.\n', ' """\n', ' s = strg\n', ' return 1 if 0 < loc < len(s) and s[loc-1] == \'\\n\' else loc - s.rfind("\\n", 0, loc)\n', '\n', 'def lineno(loc, strg):\n', ' """Returns current line number within a string, counting newlines as line separators.\n', ' The first line is number 1.\n', '\n', ' Note - the default parsing behavior is to expand tabs in the input string\n', ' before starting the parsing process. See :class:`ParserElement.parseString`\n', ' for more information on parsing strings containing ``<TAB>`` s, and\n', ' suggested methods to maintain a consistent view of the parsed string, the\n', ' parse location, and line and column positions within the parsed string.\n', ' """\n', ' return strg.count("\\n", 0, loc) + 1\n', '\n', 'def line(loc, strg):\n', ' """Returns the line of text containing loc within a string, counting newlines as line separators.\n', ' """\n', ' lastCR = strg.rfind("\\n", 0, loc)\n', ' nextCR = strg.find("\\n", loc)\n', ' if nextCR >= 0:\n', ' return strg[lastCR + 1:nextCR]\n', ' else:\n', ' return strg[lastCR + 1:]\n', '\n', 'def _defaultStartDebugAction(instring, loc, expr):\n', ' print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring))))\n', '\n', 'def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):\n', ' print("Matched " + _ustr(expr) + " -> " + str(toks.asList()))\n', '\n', 'def _defaultExceptionDebugAction(instring, loc, expr, exc):\n', ' print("Exception raised:" + _ustr(exc))\n', '\n', 'def nullDebugAction(*args):\n', ' """\'Do-nothing\' debug action, to suppress debugging output during parsing."""\n', ' pass\n', '\n', '# Only works on Python 3.x - nonlocal is toxic to Python 2 installs\n', "#~ 'decorator to trim function calls to match the arity of the target'\n", '#~ def _trim_arity(func, maxargs=3):\n', ' #~ if func in singleArgBuiltins:\n', ' #~ return lambda s,l,t: func(t)\n', ' #~ limit = 0\n', ' #~ foundArity = False\n', ' #~ def wrapper(*args):\n', ' #~ nonlocal limit,foundArity\n', ' #~ while 1:\n', ' #~ try:\n', ' #~ ret = func(*args[limit:])\n', ' #~ foundArity = True\n', ' #~ return ret\n', ' #~ except TypeError:\n', ' #~ if limit == maxargs or foundArity:\n', ' #~ raise\n', ' #~ limit += 1\n', ' #~ continue\n', ' #~ return wrapper\n', '\n', '# this version is Python 2.x-3.x cross-compatible\n', "'decorator to trim function calls to match the arity of the target'\n", 'def _trim_arity(func, maxargs=2):\n', ' if func in singleArgBuiltins:\n', ' return lambda s, l, t: func(t)\n', ' limit = [0]\n', ' foundArity = [False]\n', '\n', ' # traceback return data structure changed in Py3.5 - normalize back to plain tuples\n', ' if system_version[:2] >= (3, 5):\n', ' def extract_stack(limit=0):\n', ' # special handling for Python 3.5.0 - extra deep call stack by 1\n', ' offset = -3 if system_version == (3, 5, 0) else -2\n', ' frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]\n', ' return [frame_summary[:2]]\n', ' def extract_tb(tb, limit=0):\n', ' frames = traceback.extract_tb(tb, limit=limit)\n', ' frame_summary = frames[-1]\n', ' return [frame_summary[:2]]\n', ' else:\n', ' extract_stack = traceback.extract_stack\n', ' extract_tb = traceback.extract_tb\n', '\n', ' # synthesize what would be returned by traceback.extract_stack at the call to\n', " # user's parse action 'func', so that we don't incur call penalty at parse time\n", '\n', ' LINE_DIFF = 6\n', ' # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND\n', ' # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!\n', ' this_line = extract_stack(limit=2)[-1]\n', ' pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)\n', '\n', ' def wrapper(*args):\n', ' while 1:\n', ' try:\n', ' ret = func(*args[limit[0]:])\n', ' foundArity[0] = True\n', ' return ret\n', ' except TypeError:\n', ' # re-raise TypeErrors if they did not come from our arity testing\n', ' if foundArity[0]:\n', ' raise\n', ' else:\n', ' try:\n', ' tb = sys.exc_info()[-1]\n', ' if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:\n', ' raise\n', ' finally:\n', ' try:\n', ' del tb\n', ' except NameError:\n', ' pass\n', '\n', ' if limit[0] <= maxargs:\n', ' limit[0] += 1\n', ' continue\n', ' raise\n', '\n', ' # copy func name to wrapper for sensible debug output\n', ' func_name = "<parse action>"\n', ' try:\n', " func_name = getattr(func, '__name__',\n", " getattr(func, '__class__').__name__)\n", ' except Exception:\n', ' func_name = str(func)\n', ' wrapper.__name__ = func_name\n', '\n', ' return wrapper\n', '\n', '\n', 'class ParserElement(object):\n', ' """Abstract base level parser element class."""\n', ' DEFAULT_WHITE_CHARS = " \\n\\t\\r"\n', ' verbose_stacktrace = False\n', '\n', ' @staticmethod\n', ' def setDefaultWhitespaceChars(chars):\n', ' r"""\n', ' Overrides the default whitespace chars\n', '\n', ' Example::\n', '\n', ' # default whitespace chars are space, <TAB> and newline\n', ' OneOrMore(Word(alphas)).parseString("abc def\\nghi jkl") # -> [\'abc\', \'def\', \'ghi\', \'jkl\']\n', '\n', ' # change to just treat newline as significant\n', ' ParserElement.setDefaultWhitespaceChars(" \\t")\n', ' OneOrMore(Word(alphas)).parseString("abc def\\nghi jkl") # -> [\'abc\', \'def\']\n', ' """\n', ' ParserElement.DEFAULT_WHITE_CHARS = chars\n', '\n', ' @staticmethod\n', ' def inlineLiteralsUsing(cls):\n', ' """\n', ' Set class to be used for inclusion of string literals into a parser.\n', '\n', ' Example::\n', '\n', ' # default literal class used is Literal\n', ' integer = Word(nums)\n', ' date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', ' date_str.parseString("1999/12/31") # -> [\'1999\', \'/\', \'12\', \'/\', \'31\']\n', '\n', '\n', ' # change to Suppress\n', ' ParserElement.inlineLiteralsUsing(Suppress)\n', ' date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', '\n', ' date_str.parseString("1999/12/31") # -> [\'1999\', \'12\', \'31\']\n', ' """\n', ' ParserElement._literalStringClass = cls\n', '\n', ' @classmethod\n', ' def _trim_traceback(cls, tb):\n', ' while tb.tb_next:\n', ' tb = tb.tb_next\n', ' return tb\n', '\n', ' def __init__(self, savelist=False):\n', ' self.parseAction = list()\n', ' self.failAction = None\n', ' # ~ self.name = "<unknown>" # don\'t define self.name, let subclasses try/except upcall\n', ' self.strRepr = None\n', ' self.resultsName = None\n', ' self.saveAsList = savelist\n', ' self.skipWhitespace = True\n', ' self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)\n', ' self.copyDefaultWhiteChars = True\n', ' self.mayReturnEmpty = False # used when checking for left-recursion\n', ' self.keepTabs = False\n', ' self.ignoreExprs = list()\n', ' self.debug = False\n', ' self.streamlined = False\n', " self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index\n", ' self.errmsg = ""\n', ' self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)\n', ' self.debugActions = (None, None, None) # custom debug actions\n', ' self.re = None\n', ' self.callPreparse = True # used to avoid redundant calls to preParse\n', ' self.callDuringTry = False\n', '\n', ' def copy(self):\n', ' """\n', ' Make a copy of this :class:`ParserElement`. Useful for defining\n', ' different parse actions for the same parsing pattern, using copies of\n', ' the original parse element.\n', '\n', ' Example::\n', '\n', ' integer = Word(nums).setParseAction(lambda toks: int(toks[0]))\n', ' integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K")\n', ' integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")\n', '\n', ' print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))\n', '\n', ' prints::\n', '\n', ' [5120, 100, 655360, 268435456]\n', '\n', ' Equivalent form of ``expr.copy()`` is just ``expr()``::\n', '\n', ' integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")\n', ' """\n', ' cpy = copy.copy(self)\n', ' cpy.parseAction = self.parseAction[:]\n', ' cpy.ignoreExprs = self.ignoreExprs[:]\n', ' if self.copyDefaultWhiteChars:\n', ' cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS\n', ' return cpy\n', '\n', ' def setName(self, name):\n', ' """\n', ' Define name for this expression, makes debugging and exception messages clearer.\n', '\n', ' Example::\n', '\n', ' Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)\n', ' Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)\n', ' """\n', ' self.name = name\n', ' self.errmsg = "Expected " + self.name\n', ' if __diag__.enable_debug_on_named_expressions:\n', ' self.setDebug()\n', ' return self\n', '\n', ' def setResultsName(self, name, listAllMatches=False):\n', ' """\n', ' Define name for referencing matching tokens as a nested attribute\n', ' of the returned parse results.\n', ' NOTE: this returns a *copy* of the original :class:`ParserElement` object;\n', ' this is so that the client can define a basic element, such as an\n', ' integer, and reference it in multiple places with different names.\n', '\n', ' You can also set results names using the abbreviated syntax,\n', ' ``expr("name")`` in place of ``expr.setResultsName("name")``\n', ' - see :class:`__call__`.\n', '\n', ' Example::\n', '\n', ' date_str = (integer.setResultsName("year") + \'/\'\n', ' + integer.setResultsName("month") + \'/\'\n', ' + integer.setResultsName("day"))\n', '\n', ' # equivalent form:\n', ' date_str = integer("year") + \'/\' + integer("month") + \'/\' + integer("day")\n', ' """\n', ' return self._setResultsName(name, listAllMatches)\n', '\n', ' def _setResultsName(self, name, listAllMatches=False):\n', ' newself = self.copy()\n', ' if name.endswith("*"):\n', ' name = name[:-1]\n', ' listAllMatches = True\n', ' newself.resultsName = name\n', ' newself.modalResults = not listAllMatches\n', ' return newself\n', '\n', ' def setBreak(self, breakFlag=True):\n', ' """Method to invoke the Python pdb debugger when this element is\n', ' about to be parsed. Set ``breakFlag`` to True to enable, False to\n', ' disable.\n', ' """\n', ' if breakFlag:\n', ' _parseMethod = self._parse\n', ' def breaker(instring, loc, doActions=True, callPreParse=True):\n', ' import pdb\n', ' # this call to pdb.set_trace() is intentional, not a checkin error\n', ' pdb.set_trace()\n', ' return _parseMethod(instring, loc, doActions, callPreParse)\n', ' breaker._originalParseMethod = _parseMethod\n', ' self._parse = breaker\n', ' else:\n', ' if hasattr(self._parse, "_originalParseMethod"):\n', ' self._parse = self._parse._originalParseMethod\n', ' return self\n', '\n', ' def setParseAction(self, *fns, **kwargs):\n', ' """\n', ' Define one or more actions to perform when successfully matching parse element definition.\n', ' Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` ,\n', ' ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:\n', '\n', ' - s = the original string being parsed (see note below)\n', ' - loc = the location of the matching substring\n', ' - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object\n', '\n', ' If the functions in fns modify the tokens, they can return them as the return\n', ' value from fn, and the modified list of tokens will replace the original.\n', ' Otherwise, fn does not need to return any value.\n', '\n', ' If None is passed as the parse action, all previously added parse actions for this\n', ' expression are cleared.\n', '\n', ' Optional keyword arguments:\n', ' - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing\n', '\n', ' Note: the default parsing behavior is to expand tabs in the input string\n', ' before starting the parsing process. See :class:`parseString for more\n', ' information on parsing strings containing ``<TAB>`` s, and suggested\n', ' methods to maintain a consistent view of the parsed string, the parse\n', ' location, and line and column positions within the parsed string.\n', '\n', ' Example::\n', '\n', ' integer = Word(nums)\n', " date_str = integer + '/' + integer + '/' + integer\n", '\n', ' date_str.parseString("1999/12/31") # -> [\'1999\', \'/\', \'12\', \'/\', \'31\']\n', '\n', ' # use parse action to convert to ints at parse time\n', ' integer = Word(nums).setParseAction(lambda toks: int(toks[0]))\n', " date_str = integer + '/' + integer + '/' + integer\n", '\n', ' # note that integer fields are now ints, not strings\n', ' date_str.parseString("1999/12/31") # -> [1999, \'/\', 12, \'/\', 31]\n', ' """\n', ' if list(fns) == [None,]:\n', ' self.parseAction = []\n', ' else:\n', ' if not all(callable(fn) for fn in fns):\n', ' raise TypeError("parse actions must be callable")\n', ' self.parseAction = list(map(_trim_arity, list(fns)))\n', ' self.callDuringTry = kwargs.get("callDuringTry", False)\n', ' return self\n', '\n', ' def addParseAction(self, *fns, **kwargs):\n', ' """\n', " Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`.\n", '\n', ' See examples in :class:`copy`.\n', ' """\n', ' self.parseAction += list(map(_trim_arity, list(fns)))\n', ' self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)\n', ' return self\n', '\n', ' def addCondition(self, *fns, **kwargs):\n', ' """Add a boolean predicate function to expression\'s list of parse actions. See\n', ' :class:`setParseAction` for function call signatures. Unlike ``setParseAction``,\n', ' functions passed to ``addCondition`` need to return boolean success/fail of the condition.\n', '\n', ' Optional keyword arguments:\n', ' - message = define a custom message to be used in the raised exception\n', ' - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException\n', '\n', ' Example::\n', '\n', ' integer = Word(nums).setParseAction(lambda toks: int(toks[0]))\n', ' year_int = integer.copy()\n', ' year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")\n', " date_str = year_int + '/' + integer + '/' + integer\n", '\n', ' result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)\n', ' """\n', ' for fn in fns:\n', " self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'),\n", " fatal=kwargs.get('fatal', False)))\n", '\n', ' self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)\n', ' return self\n', '\n', ' def setFailAction(self, fn):\n', ' """Define action to perform if parsing fails at this expression.\n', ' Fail acton fn is a callable function that takes the arguments\n', ' ``fn(s, loc, expr, err)`` where:\n', ' - s = string being parsed\n', ' - loc = location where expression match was attempted and failed\n', ' - expr = the parse expression that failed\n', ' - err = the exception thrown\n', ' The function returns no value. It may throw :class:`ParseFatalException`\n', ' if it is desired to stop parsing immediately."""\n', ' self.failAction = fn\n', ' return self\n', '\n', ' def _skipIgnorables(self, instring, loc):\n', ' exprsFound = True\n', ' while exprsFound:\n', ' exprsFound = False\n', ' for e in self.ignoreExprs:\n', ' try:\n', ' while 1:\n', ' loc, dummy = e._parse(instring, loc)\n', ' exprsFound = True\n', ' except ParseException:\n', ' pass\n', ' return loc\n', '\n', ' def preParse(self, instring, loc):\n', ' if self.ignoreExprs:\n', ' loc = self._skipIgnorables(instring, loc)\n', '\n', ' if self.skipWhitespace:\n', ' wt = self.whiteChars\n', ' instrlen = len(instring)\n', ' while loc < instrlen and instring[loc] in wt:\n', ' loc += 1\n', '\n', ' return loc\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' return loc, []\n', '\n', ' def postParse(self, instring, loc, tokenlist):\n', ' return tokenlist\n', '\n', ' # ~ @profile\n', ' def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):\n', ' TRY, MATCH, FAIL = 0, 1, 2\n', ' debugging = (self.debug) # and doActions)\n', '\n', ' if debugging or self.failAction:\n', ' # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring)))\n', ' if self.debugActions[TRY]:\n', ' self.debugActions[TRY](instring, loc, self)\n', ' try:\n', ' if callPreParse and self.callPreparse:\n', ' preloc = self.preParse(instring, loc)\n', ' else:\n', ' preloc = loc\n', ' tokensStart = preloc\n', ' if self.mayIndexError or preloc >= len(instring):\n', ' try:\n', ' loc, tokens = self.parseImpl(instring, preloc, doActions)\n', ' except IndexError:\n', ' raise ParseException(instring, len(instring), self.errmsg, self)\n', ' else:\n', ' loc, tokens = self.parseImpl(instring, preloc, doActions)\n', ' except Exception as err:\n', ' # ~ print ("Exception raised:", err)\n', ' if self.debugActions[FAIL]:\n', ' self.debugActions[FAIL](instring, tokensStart, self, err)\n', ' if self.failAction:\n', ' self.failAction(instring, tokensStart, self, err)\n', ' raise\n', ' else:\n', ' if callPreParse and self.callPreparse:\n', ' preloc = self.preParse(instring, loc)\n', ' else:\n', ' preloc = loc\n', ' tokensStart = preloc\n', ' if self.mayIndexError or preloc >= len(instring):\n', ' try:\n', ' loc, tokens = self.parseImpl(instring, preloc, doActions)\n', ' except IndexError:\n', ' raise ParseException(instring, len(instring), self.errmsg, self)\n', ' else:\n', ' loc, tokens = self.parseImpl(instring, preloc, doActions)\n', '\n', ' tokens = self.postParse(instring, loc, tokens)\n', '\n', ' retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)\n', ' if self.parseAction and (doActions or self.callDuringTry):\n', ' if debugging:\n', ' try:\n', ' for fn in self.parseAction:\n', ' try:\n', ' tokens = fn(instring, tokensStart, retTokens)\n', ' except IndexError as parse_action_exc:\n', ' exc = ParseException("exception raised in parse action")\n', ' exc.__cause__ = parse_action_exc\n', ' raise exc\n', '\n', ' if tokens is not None and tokens is not retTokens:\n', ' retTokens = ParseResults(tokens,\n', ' self.resultsName,\n', ' asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),\n', ' modal=self.modalResults)\n', ' except Exception as err:\n', ' # ~ print "Exception raised in user parse action:", err\n', ' if self.debugActions[FAIL]:\n', ' self.debugActions[FAIL](instring, tokensStart, self, err)\n', ' raise\n', ' else:\n', ' for fn in self.parseAction:\n', ' try:\n', ' tokens = fn(instring, tokensStart, retTokens)\n', ' except IndexError as parse_action_exc:\n', ' exc = ParseException("exception raised in parse action")\n', ' exc.__cause__ = parse_action_exc\n', ' raise exc\n', '\n', ' if tokens is not None and tokens is not retTokens:\n', ' retTokens = ParseResults(tokens,\n', ' self.resultsName,\n', ' asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),\n', ' modal=self.modalResults)\n', ' if debugging:\n', ' # ~ print ("Matched", self, "->", retTokens.asList())\n', ' if self.debugActions[MATCH]:\n', ' self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens)\n', '\n', ' return loc, retTokens\n', '\n', ' def tryParse(self, instring, loc):\n', ' try:\n', ' return self._parse(instring, loc, doActions=False)[0]\n', ' except ParseFatalException:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' def canParseNext(self, instring, loc):\n', ' try:\n', ' self.tryParse(instring, loc)\n', ' except (ParseException, IndexError):\n', ' return False\n', ' else:\n', ' return True\n', '\n', ' class _UnboundedCache(object):\n', ' def __init__(self):\n', ' cache = {}\n', ' self.not_in_cache = not_in_cache = object()\n', '\n', ' def get(self, key):\n', ' return cache.get(key, not_in_cache)\n', '\n', ' def set(self, key, value):\n', ' cache[key] = value\n', '\n', ' def clear(self):\n', ' cache.clear()\n', '\n', ' def cache_len(self):\n', ' return len(cache)\n', '\n', ' self.get = types.MethodType(get, self)\n', ' self.set = types.MethodType(set, self)\n', ' self.clear = types.MethodType(clear, self)\n', ' self.__len__ = types.MethodType(cache_len, self)\n', '\n', ' if _OrderedDict is not None:\n', ' class _FifoCache(object):\n', ' def __init__(self, size):\n', ' self.not_in_cache = not_in_cache = object()\n', '\n', ' cache = _OrderedDict()\n', '\n', ' def get(self, key):\n', ' return cache.get(key, not_in_cache)\n', '\n', ' def set(self, key, value):\n', ' cache[key] = value\n', ' while len(cache) > size:\n', ' try:\n', ' cache.popitem(False)\n', ' except KeyError:\n', ' pass\n', '\n', ' def clear(self):\n', ' cache.clear()\n', '\n', ' def cache_len(self):\n', ' return len(cache)\n', '\n', ' self.get = types.MethodType(get, self)\n', ' self.set = types.MethodType(set, self)\n', ' self.clear = types.MethodType(clear, self)\n', ' self.__len__ = types.MethodType(cache_len, self)\n', '\n', ' else:\n', ' class _FifoCache(object):\n', ' def __init__(self, size):\n', ' self.not_in_cache = not_in_cache = object()\n', '\n', ' cache = {}\n', ' key_fifo = collections.deque([], size)\n', '\n', ' def get(self, key):\n', ' return cache.get(key, not_in_cache)\n', '\n', ' def set(self, key, value):\n', ' cache[key] = value\n', ' while len(key_fifo) > size:\n', ' cache.pop(key_fifo.popleft(), None)\n', ' key_fifo.append(key)\n', '\n', ' def clear(self):\n', ' cache.clear()\n', ' key_fifo.clear()\n', '\n', ' def cache_len(self):\n', ' return len(cache)\n', '\n', ' self.get = types.MethodType(get, self)\n', ' self.set = types.MethodType(set, self)\n', ' self.clear = types.MethodType(clear, self)\n', ' self.__len__ = types.MethodType(cache_len, self)\n', '\n', ' # argument cache for optimizing repeated calls when backtracking through recursive expressions\n', " packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail\n", ' packrat_cache_lock = RLock()\n', ' packrat_cache_stats = [0, 0]\n', '\n', ' # this method gets repeatedly called during backtracking with the same arguments -\n', ' # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression\n', ' def _parseCache(self, instring, loc, doActions=True, callPreParse=True):\n', ' HIT, MISS = 0, 1\n', ' lookup = (self, instring, loc, callPreParse, doActions)\n', ' with ParserElement.packrat_cache_lock:\n', ' cache = ParserElement.packrat_cache\n', ' value = cache.get(lookup)\n', ' if value is cache.not_in_cache:\n', ' ParserElement.packrat_cache_stats[MISS] += 1\n', ' try:\n', ' value = self._parseNoCache(instring, loc, doActions, callPreParse)\n', ' except ParseBaseException as pe:\n', ' # cache a copy of the exception, without the traceback\n', ' cache.set(lookup, pe.__class__(*pe.args))\n', ' raise\n', ' else:\n', ' cache.set(lookup, (value[0], value[1].copy()))\n', ' return value\n', ' else:\n', ' ParserElement.packrat_cache_stats[HIT] += 1\n', ' if isinstance(value, Exception):\n', ' raise value\n', ' return value[0], value[1].copy()\n', '\n', ' _parse = _parseNoCache\n', '\n', ' @staticmethod\n', ' def resetCache():\n', ' ParserElement.packrat_cache.clear()\n', ' ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)\n', '\n', ' _packratEnabled = False\n', ' @staticmethod\n', ' def enablePackrat(cache_size_limit=128):\n', ' """Enables "packrat" parsing, which adds memoizing to the parsing logic.\n', ' Repeated parse attempts at the same string location (which happens\n', ' often in many complex grammars) can immediately return a cached value,\n', ' instead of re-executing parsing/validating code. Memoizing is done of\n', ' both valid results and parsing exceptions.\n', '\n', ' Parameters:\n', '\n', ' - cache_size_limit - (default= ``128``) - if an integer value is provided\n', ' will limit the size of the packrat cache; if None is passed, then\n', ' the cache size will be unbounded; if 0 is passed, the cache will\n', ' be effectively disabled.\n', '\n', ' This speedup may break existing programs that use parse actions that\n', ' have side-effects. For this reason, packrat parsing is disabled when\n', ' you first import pyparsing. To activate the packrat feature, your\n', ' program must call the class method :class:`ParserElement.enablePackrat`.\n', ' For best results, call ``enablePackrat()`` immediately after\n', ' importing pyparsing.\n', '\n', ' Example::\n', '\n', ' import pyparsing\n', ' pyparsing.ParserElement.enablePackrat()\n', ' """\n', ' if not ParserElement._packratEnabled:\n', ' ParserElement._packratEnabled = True\n', ' if cache_size_limit is None:\n', ' ParserElement.packrat_cache = ParserElement._UnboundedCache()\n', ' else:\n', ' ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)\n', ' ParserElement._parse = ParserElement._parseCache\n', '\n', ' def parseString(self, instring, parseAll=False):\n', ' """\n', ' Execute the parse expression with the given string.\n', ' This is the main interface to the client code, once the complete\n', ' expression has been built.\n', '\n', ' Returns the parsed data as a :class:`ParseResults` object, which may be\n', ' accessed as a list, or as a dict or object with attributes if the given parser\n', ' includes results names.\n', '\n', ' If you want the grammar to require that the entire input string be\n', ' successfully parsed, then set ``parseAll`` to True (equivalent to ending\n', ' the grammar with ``StringEnd()``).\n', '\n', ' Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,\n', ' in order to report proper column numbers in parse actions.\n', ' If the input string contains tabs and\n', ' the grammar uses parse actions that use the ``loc`` argument to index into the\n', ' string being parsed, you can ensure you have a consistent view of the input\n', ' string by:\n', '\n', ' - calling ``parseWithTabs`` on your grammar before calling ``parseString``\n', ' (see :class:`parseWithTabs`)\n', ' - define your parse action using the full ``(s, loc, toks)`` signature, and\n', " reference the input string using the parse action's ``s`` argument\n", ' - explictly expand the tabs in your input string before calling\n', ' ``parseString``\n', '\n', ' Example::\n', '\n', " Word('a').parseString('aaaaabaaa') # -> ['aaaaa']\n", " Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text\n", ' """\n', ' ParserElement.resetCache()\n', ' if not self.streamlined:\n', ' self.streamline()\n', ' # ~ self.saveAsList = True\n', ' for e in self.ignoreExprs:\n', ' e.streamline()\n', ' if not self.keepTabs:\n', ' instring = instring.expandtabs()\n', ' try:\n', ' loc, tokens = self._parse(instring, 0)\n', ' if parseAll:\n', ' loc = self.preParse(instring, loc)\n', ' se = Empty() + StringEnd()\n', ' se._parse(instring, loc)\n', ' except ParseBaseException as exc:\n', ' if ParserElement.verbose_stacktrace:\n', ' raise\n', ' else:\n', ' # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', " if getattr(exc, '__traceback__', None) is not None:\n", ' exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', ' raise exc\n', ' else:\n', ' return tokens\n', '\n', ' def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):\n', ' """\n', ' Scan the input string for expression matches. Each match will return the\n', ' matching tokens, start location, and end location. May be called with optional\n', " ``maxMatches`` argument, to clip scanning after 'n' matches are found. If\n", ' ``overlap`` is specified, then overlapping matches will be reported.\n', '\n', ' Note that the start and end locations are reported relative to the string\n', ' being parsed. See :class:`parseString` for more information on parsing\n', ' strings with embedded tabs.\n', '\n', ' Example::\n', '\n', ' source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"\n', ' print(source)\n', ' for tokens, start, end in Word(alphas).scanString(source):\n', " print(' '*start + '^'*(end-start))\n", " print(' '*start + tokens[0])\n", '\n', ' prints::\n', '\n', ' sldjf123lsdjjkf345sldkjf879lkjsfd987\n', ' ^^^^^\n', ' sldjf\n', ' ^^^^^^^\n', ' lsdjjkf\n', ' ^^^^^^\n', ' sldkjf\n', ' ^^^^^^\n', ' lkjsfd\n', ' """\n', ' if not self.streamlined:\n', ' self.streamline()\n', ' for e in self.ignoreExprs:\n', ' e.streamline()\n', '\n', ' if not self.keepTabs:\n', ' instring = _ustr(instring).expandtabs()\n', ' instrlen = len(instring)\n', ' loc = 0\n', ' preparseFn = self.preParse\n', ' parseFn = self._parse\n', ' ParserElement.resetCache()\n', ' matches = 0\n', ' try:\n', ' while loc <= instrlen and matches < maxMatches:\n', ' try:\n', ' preloc = preparseFn(instring, loc)\n', ' nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)\n', ' except ParseException:\n', ' loc = preloc + 1\n', ' else:\n', ' if nextLoc > loc:\n', ' matches += 1\n', ' yield tokens, preloc, nextLoc\n', ' if overlap:\n', ' nextloc = preparseFn(instring, loc)\n', ' if nextloc > loc:\n', ' loc = nextLoc\n', ' else:\n', ' loc += 1\n', ' else:\n', ' loc = nextLoc\n', ' else:\n', ' loc = preloc + 1\n', ' except ParseBaseException as exc:\n', ' if ParserElement.verbose_stacktrace:\n', ' raise\n', ' else:\n', ' # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', " if getattr(exc, '__traceback__', None) is not None:\n", ' exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', ' raise exc\n', '\n', ' def transformString(self, instring):\n', ' """\n', ' Extension to :class:`scanString`, to modify matching text with modified tokens that may\n', ' be returned from a parse action. To use ``transformString``, define a grammar and\n', ' attach a parse action to it that modifies the returned token list.\n', ' Invoking ``transformString()`` on a target string will then scan for matches,\n', ' and replace the matched text patterns according to the logic in the parse\n', ' action. ``transformString()`` returns the resulting transformed string.\n', '\n', ' Example::\n', '\n', ' wd = Word(alphas)\n', ' wd.setParseAction(lambda toks: toks[0].title())\n', '\n', ' print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))\n', '\n', ' prints::\n', '\n', ' Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.\n', ' """\n', ' out = []\n', ' lastE = 0\n', ' # force preservation of <TAB>s, to minimize unwanted transformation of string, and to\n', ' # keep string locs straight between transformString and scanString\n', ' self.keepTabs = True\n', ' try:\n', ' for t, s, e in self.scanString(instring):\n', ' out.append(instring[lastE:s])\n', ' if t:\n', ' if isinstance(t, ParseResults):\n', ' out += t.asList()\n', ' elif isinstance(t, list):\n', ' out += t\n', ' else:\n', ' out.append(t)\n', ' lastE = e\n', ' out.append(instring[lastE:])\n', ' out = [o for o in out if o]\n', ' return "".join(map(_ustr, _flatten(out)))\n', ' except ParseBaseException as exc:\n', ' if ParserElement.verbose_stacktrace:\n', ' raise\n', ' else:\n', ' # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', " if getattr(exc, '__traceback__', None) is not None:\n", ' exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', ' raise exc\n', '\n', ' def searchString(self, instring, maxMatches=_MAX_INT):\n', ' """\n', ' Another extension to :class:`scanString`, simplifying the access to the tokens found\n', ' to match the given parse expression. May be called with optional\n', " ``maxMatches`` argument, to clip searching after 'n' matches are found.\n", '\n', ' Example::\n', '\n', ' # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters\n', ' cap_word = Word(alphas.upper(), alphas.lower())\n', '\n', ' print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))\n', '\n', ' # the sum() builtin can be used to merge results into a single ParseResults object\n', ' print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))\n', '\n', ' prints::\n', '\n', " [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]\n", " ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']\n", ' """\n', ' try:\n', ' return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)])\n', ' except ParseBaseException as exc:\n', ' if ParserElement.verbose_stacktrace:\n', ' raise\n', ' else:\n', ' # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', " if getattr(exc, '__traceback__', None) is not None:\n", ' exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', ' raise exc\n', '\n', ' def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):\n', ' """\n', ' Generator method to split a string using the given expression as a separator.\n', ' May be called with optional ``maxsplit`` argument, to limit the number of splits;\n', ' and the optional ``includeSeparators`` argument (default= ``False``), if the separating\n', ' matching text should be included in the split results.\n', '\n', ' Example::\n', '\n', ' punc = oneOf(list(".,;:/-!?"))\n', ' print(list(punc.split("This, this?, this sentence, is badly punctuated!")))\n', '\n', ' prints::\n', '\n', " ['This', ' this', '', ' this sentence', ' is badly punctuated', '']\n", ' """\n', ' splits = 0\n', ' last = 0\n', ' for t, s, e in self.scanString(instring, maxMatches=maxsplit):\n', ' yield instring[last:s]\n', ' if includeSeparators:\n', ' yield t[0]\n', ' last = e\n', ' yield instring[last:]\n', '\n', ' def __add__(self, other):\n', ' """\n', ' Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement\n', ' converts them to :class:`Literal`s by default.\n', '\n', ' Example::\n', '\n', ' greet = Word(alphas) + "," + Word(alphas) + "!"\n', ' hello = "Hello, World!"\n', ' print (hello, "->", greet.parseString(hello))\n', '\n', ' prints::\n', '\n', " Hello, World! -> ['Hello', ',', 'World', '!']\n", '\n', ' ``...`` may be used as a parse expression as a short form of :class:`SkipTo`.\n', '\n', " Literal('start') + ... + Literal('end')\n", '\n', ' is equivalent to:\n', '\n', ' Literal(\'start\') + SkipTo(\'end\')("_skipped*") + Literal(\'end\')\n', '\n', " Note that the skipped text is returned with '_skipped' as a results name,\n", ' and to support having multiple skips in the same parser, the value returned is\n', ' a list of all skipped text.\n', ' """\n', ' if other is Ellipsis:\n', ' return _PendingSkip(self)\n', '\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return And([self, other])\n', '\n', ' def __radd__(self, other):\n', ' """\n', ' Implementation of + operator when left operand is not a :class:`ParserElement`\n', ' """\n', ' if other is Ellipsis:\n', ' return SkipTo(self)("_skipped*") + self\n', '\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return other + self\n', '\n', ' def __sub__(self, other):\n', ' """\n', ' Implementation of - operator, returns :class:`And` with error stop\n', ' """\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return self + And._ErrorStop() + other\n', '\n', ' def __rsub__(self, other):\n', ' """\n', ' Implementation of - operator when left operand is not a :class:`ParserElement`\n', ' """\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return other - self\n', '\n', ' def __mul__(self, other):\n', ' """\n', ' Implementation of * operator, allows use of ``expr * 3`` in place of\n', ' ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer\n', ' tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples\n', ' may also include ``None`` as in:\n', ' - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent\n', ' to ``expr*n + ZeroOrMore(expr)``\n', ' (read as "at least n instances of ``expr``")\n', ' - ``expr*(None, n)`` is equivalent to ``expr*(0, n)``\n', ' (read as "0 to n instances of ``expr``")\n', ' - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``\n', ' - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``\n', '\n', ' Note that ``expr*(None, n)`` does not raise an exception if\n', ' more than n exprs exist in the input stream; that is,\n', ' ``expr*(None, n)`` does not enforce a maximum number of expr\n', ' occurrences. If this behavior is desired, then write\n', ' ``expr*(None, n) + ~expr``\n', ' """\n', ' if other is Ellipsis:\n', ' other = (0, None)\n', ' elif isinstance(other, tuple) and other[:1] == (Ellipsis,):\n', ' other = ((0, ) + other[1:] + (None,))[:2]\n', '\n', ' if isinstance(other, int):\n', ' minElements, optElements = other, 0\n', ' elif isinstance(other, tuple):\n', ' other = tuple(o if o is not Ellipsis else None for o in other)\n', ' other = (other + (None, None))[:2]\n', ' if other[0] is None:\n', ' other = (0, other[1])\n', ' if isinstance(other[0], int) and other[1] is None:\n', ' if other[0] == 0:\n', ' return ZeroOrMore(self)\n', ' if other[0] == 1:\n', ' return OneOrMore(self)\n', ' else:\n', ' return self * other[0] + ZeroOrMore(self)\n', ' elif isinstance(other[0], int) and isinstance(other[1], int):\n', ' minElements, optElements = other\n', ' optElements -= minElements\n', ' else:\n', ' raise TypeError("cannot multiply \'ParserElement\' and (\'%s\', \'%s\') objects", type(other[0]), type(other[1]))\n', ' else:\n', ' raise TypeError("cannot multiply \'ParserElement\' and \'%s\' objects", type(other))\n', '\n', ' if minElements < 0:\n', ' raise ValueError("cannot multiply ParserElement by negative value")\n', ' if optElements < 0:\n', ' raise ValueError("second tuple value must be greater or equal to first tuple value")\n', ' if minElements == optElements == 0:\n', ' raise ValueError("cannot multiply ParserElement by 0 or (0, 0)")\n', '\n', ' if optElements:\n', ' def makeOptionalList(n):\n', ' if n > 1:\n', ' return Optional(self + makeOptionalList(n - 1))\n', ' else:\n', ' return Optional(self)\n', ' if minElements:\n', ' if minElements == 1:\n', ' ret = self + makeOptionalList(optElements)\n', ' else:\n', ' ret = And([self] * minElements) + makeOptionalList(optElements)\n', ' else:\n', ' ret = makeOptionalList(optElements)\n', ' else:\n', ' if minElements == 1:\n', ' ret = self\n', ' else:\n', ' ret = And([self] * minElements)\n', ' return ret\n', '\n', ' def __rmul__(self, other):\n', ' return self.__mul__(other)\n', '\n', ' def __or__(self, other):\n', ' """\n', ' Implementation of | operator - returns :class:`MatchFirst`\n', ' """\n', ' if other is Ellipsis:\n', ' return _PendingSkip(self, must_skip=True)\n', '\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return MatchFirst([self, other])\n', '\n', ' def __ror__(self, other):\n', ' """\n', ' Implementation of | operator when left operand is not a :class:`ParserElement`\n', ' """\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return other | self\n', '\n', ' def __xor__(self, other):\n', ' """\n', ' Implementation of ^ operator - returns :class:`Or`\n', ' """\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return Or([self, other])\n', '\n', ' def __rxor__(self, other):\n', ' """\n', ' Implementation of ^ operator when left operand is not a :class:`ParserElement`\n', ' """\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return other ^ self\n', '\n', ' def __and__(self, other):\n', ' """\n', ' Implementation of & operator - returns :class:`Each`\n', ' """\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return Each([self, other])\n', '\n', ' def __rand__(self, other):\n', ' """\n', ' Implementation of & operator when left operand is not a :class:`ParserElement`\n', ' """\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' if not isinstance(other, ParserElement):\n', ' warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),\n', ' SyntaxWarning, stacklevel=2)\n', ' return None\n', ' return other & self\n', '\n', ' def __invert__(self):\n', ' """\n', ' Implementation of ~ operator - returns :class:`NotAny`\n', ' """\n', ' return NotAny(self)\n', '\n', ' def __iter__(self):\n', ' # must implement __iter__ to override legacy use of sequential access to __getitem__ to\n', ' # iterate over a sequence\n', " raise TypeError('%r object is not iterable' % self.__class__.__name__)\n", '\n', ' def __getitem__(self, key):\n', ' """\n', ' use ``[]`` indexing notation as a short form for expression repetition:\n', ' - ``expr[n]`` is equivalent to ``expr*n``\n', ' - ``expr[m, n]`` is equivalent to ``expr*(m, n)``\n', ' - ``expr[n, ...]`` or ``expr[n,]`` is equivalent\n', ' to ``expr*n + ZeroOrMore(expr)``\n', ' (read as "at least n instances of ``expr``")\n', ' - ``expr[..., n]`` is equivalent to ``expr*(0, n)``\n', ' (read as "0 to n instances of ``expr``")\n', ' - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``\n', ' - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``\n', ' ``None`` may be used in place of ``...``.\n', '\n', ' Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception\n', ' if more than ``n`` ``expr``s exist in the input stream. If this behavior is\n', ' desired, then write ``expr[..., n] + ~expr``.\n', ' """\n', '\n', ' # convert single arg keys to tuples\n', ' try:\n', ' if isinstance(key, str):\n', ' key = (key,)\n', ' iter(key)\n', ' except TypeError:\n', ' key = (key, key)\n', '\n', ' if len(key) > 2:\n', ' warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5],\n', " '... [{0}]'.format(len(key))\n", " if len(key) > 5 else ''))\n", '\n', ' # clip to 2 elements\n', ' ret = self * tuple(key[:2])\n', ' return ret\n', '\n', ' def __call__(self, name=None):\n', ' """\n', ' Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.\n', '\n', " If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be\n", ' passed as ``True``.\n', '\n', ' If ``name` is omitted, same as calling :class:`copy`.\n', '\n', ' Example::\n', '\n', ' # these are equivalent\n', ' userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno")\n', ' userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")\n', ' """\n', ' if name is not None:\n', ' return self._setResultsName(name)\n', ' else:\n', ' return self.copy()\n', '\n', ' def suppress(self):\n', ' """\n', ' Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from\n', ' cluttering up returned output.\n', ' """\n', ' return Suppress(self)\n', '\n', ' def leaveWhitespace(self):\n', ' """\n', ' Disables the skipping of whitespace before matching the characters in the\n', " :class:`ParserElement`'s defined pattern. This is normally only used internally by\n", ' the pyparsing module, but may be needed in some whitespace-sensitive grammars.\n', ' """\n', ' self.skipWhitespace = False\n', ' return self\n', '\n', ' def setWhitespaceChars(self, chars):\n', ' """\n', ' Overrides the default whitespace chars\n', ' """\n', ' self.skipWhitespace = True\n', ' self.whiteChars = chars\n', ' self.copyDefaultWhiteChars = False\n', ' return self\n', '\n', ' def parseWithTabs(self):\n', ' """\n', ' Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string.\n', ' Must be called before ``parseString`` when the input grammar contains elements that\n', ' match ``<TAB>`` characters.\n', ' """\n', ' self.keepTabs = True\n', ' return self\n', '\n', ' def ignore(self, other):\n', ' """\n', ' Define expression to be ignored (e.g., comments) while doing pattern\n', ' matching; may be called repeatedly, to define multiple comment or other\n', ' ignorable patterns.\n', '\n', ' Example::\n', '\n', ' patt = OneOrMore(Word(alphas))\n', " patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']\n", '\n', ' patt.ignore(cStyleComment)\n', " patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']\n", ' """\n', ' if isinstance(other, basestring):\n', ' other = Suppress(other)\n', '\n', ' if isinstance(other, Suppress):\n', ' if other not in self.ignoreExprs:\n', ' self.ignoreExprs.append(other)\n', ' else:\n', ' self.ignoreExprs.append(Suppress(other.copy()))\n', ' return self\n', '\n', ' def setDebugActions(self, startAction, successAction, exceptionAction):\n', ' """\n', ' Enable display of debugging messages while doing pattern matching.\n', ' """\n', ' self.debugActions = (startAction or _defaultStartDebugAction,\n', ' successAction or _defaultSuccessDebugAction,\n', ' exceptionAction or _defaultExceptionDebugAction)\n', ' self.debug = True\n', ' return self\n', '\n', ' def setDebug(self, flag=True):\n', ' """\n', ' Enable display of debugging messages while doing pattern matching.\n', ' Set ``flag`` to True to enable, False to disable.\n', '\n', ' Example::\n', '\n', ' wd = Word(alphas).setName("alphaword")\n', ' integer = Word(nums).setName("numword")\n', ' term = wd | integer\n', '\n', ' # turn on debugging for wd\n', ' wd.setDebug()\n', '\n', ' OneOrMore(term).parseString("abc 123 xyz 890")\n', '\n', ' prints::\n', '\n', ' Match alphaword at loc 0(1,1)\n', " Matched alphaword -> ['abc']\n", ' Match alphaword at loc 3(1,4)\n', ' Exception raised:Expected alphaword (at char 4), (line:1, col:5)\n', ' Match alphaword at loc 7(1,8)\n', " Matched alphaword -> ['xyz']\n", ' Match alphaword at loc 11(1,12)\n', ' Exception raised:Expected alphaword (at char 12), (line:1, col:13)\n', ' Match alphaword at loc 15(1,16)\n', ' Exception raised:Expected alphaword (at char 15), (line:1, col:16)\n', '\n', ' The output shown is that produced by the default debug actions - custom debug actions can be\n', ' specified using :class:`setDebugActions`. Prior to attempting\n', ' to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``\n', ' is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``\n', ' message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression,\n', ' which makes debugging and exception messages easier to understand - for instance, the default\n', ' name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``.\n', ' """\n', ' if flag:\n', ' self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)\n', ' else:\n', ' self.debug = False\n', ' return self\n', '\n', ' def __str__(self):\n', ' return self.name\n', '\n', ' def __repr__(self):\n', ' return _ustr(self)\n', '\n', ' def streamline(self):\n', ' self.streamlined = True\n', ' self.strRepr = None\n', ' return self\n', '\n', ' def checkRecursion(self, parseElementList):\n', ' pass\n', '\n', ' def validate(self, validateTrace=None):\n', ' """\n', ' Check defined expressions for valid structure, check for infinite recursive definitions.\n', ' """\n', ' self.checkRecursion([])\n', '\n', ' def parseFile(self, file_or_filename, parseAll=False):\n', ' """\n', ' Execute the parse expression on the given file or filename.\n', ' If a filename is specified (instead of a file object),\n', ' the entire file is opened, read, and closed before parsing.\n', ' """\n', ' try:\n', ' file_contents = file_or_filename.read()\n', ' except AttributeError:\n', ' with open(file_or_filename, "r") as f:\n', ' file_contents = f.read()\n', ' try:\n', ' return self.parseString(file_contents, parseAll)\n', ' except ParseBaseException as exc:\n', ' if ParserElement.verbose_stacktrace:\n', ' raise\n', ' else:\n', ' # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n', " if getattr(exc, '__traceback__', None) is not None:\n", ' exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n', ' raise exc\n', '\n', ' def __eq__(self, other):\n', ' if self is other:\n', ' return True\n', ' elif isinstance(other, basestring):\n', ' return self.matches(other)\n', ' elif isinstance(other, ParserElement):\n', ' return vars(self) == vars(other)\n', ' return False\n', '\n', ' def __ne__(self, other):\n', ' return not (self == other)\n', '\n', ' def __hash__(self):\n', ' return id(self)\n', '\n', ' def __req__(self, other):\n', ' return self == other\n', '\n', ' def __rne__(self, other):\n', ' return not (self == other)\n', '\n', ' def matches(self, testString, parseAll=True):\n', ' """\n', ' Method for quick testing of a parser against a test string. Good for simple\n', ' inline microtests of sub expressions while building up larger parser.\n', '\n', ' Parameters:\n', ' - testString - to test against this expression for a match\n', ' - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests\n', '\n', ' Example::\n', '\n', ' expr = Word(nums)\n', ' assert expr.matches("100")\n', ' """\n', ' try:\n', ' self.parseString(_ustr(testString), parseAll=parseAll)\n', ' return True\n', ' except ParseBaseException:\n', ' return False\n', '\n', " def runTests(self, tests, parseAll=True, comment='#',\n", ' fullDump=True, printResults=True, failureTests=False, postParse=None,\n', ' file=None):\n', ' """\n', ' Execute the parse expression on a series of test strings, showing each\n', ' test, the parsed results or where the parse failed. Quick and easy way to\n', ' run a parse expression against a list of sample strings.\n', '\n', ' Parameters:\n', ' - tests - a list of separate test strings, or a multiline string of test strings\n', ' - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests\n', " - comment - (default= ``'#'``) - expression for indicating embedded comments in the test\n", ' string; pass None to disable comment filtering\n', ' - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline;\n', ' if False, only dump nested list\n', ' - printResults - (default= ``True``) prints test output to stdout\n', ' - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing\n', ' - postParse - (default= ``None``) optional callback for successful parse results; called as\n', ' `fn(test_string, parse_results)` and returns a string to be added to the test output\n', ' - file - (default=``None``) optional file-like object to which test output will be written;\n', ' if None, will default to ``sys.stdout``\n', '\n', ' Returns: a (success, results) tuple, where success indicates that all tests succeeded\n', ' (or failed if ``failureTests`` is True), and the results contain a list of lines of each\n', " test's output\n", '\n', ' Example::\n', '\n', ' number_expr = pyparsing_common.number.copy()\n', '\n', " result = number_expr.runTests('''\n", ' # unsigned integer\n', ' 100\n', ' # negative integer\n', ' -100\n', ' # float with scientific notation\n', ' 6.02e23\n', ' # integer with scientific notation\n', ' 1e-12\n', " ''')\n", ' print("Success" if result[0] else "Failed!")\n', '\n', " result = number_expr.runTests('''\n", ' # stray character\n', ' 100Z\n', " # missing leading digit before '.'\n", ' -.100\n', " # too many '.'\n", ' 3.14.159\n', " ''', failureTests=True)\n", ' print("Success" if result[0] else "Failed!")\n', '\n', ' prints::\n', '\n', ' # unsigned integer\n', ' 100\n', ' [100]\n', '\n', ' # negative integer\n', ' -100\n', ' [-100]\n', '\n', ' # float with scientific notation\n', ' 6.02e23\n', ' [6.02e+23]\n', '\n', ' # integer with scientific notation\n', ' 1e-12\n', ' [1e-12]\n', '\n', ' Success\n', '\n', ' # stray character\n', ' 100Z\n', ' ^\n', ' FAIL: Expected end of text (at char 3), (line:1, col:4)\n', '\n', " # missing leading digit before '.'\n", ' -.100\n', ' ^\n', ' FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)\n', '\n', " # too many '.'\n", ' 3.14.159\n', ' ^\n', ' FAIL: Expected end of text (at char 4), (line:1, col:5)\n', '\n', ' Success\n', '\n', ' Each test string must be on a single line. If you want to test a string that spans multiple\n', ' lines, create a test like this::\n', '\n', ' expr.runTest(r"this is a test\\\\n of strings that spans \\\\n 3 lines")\n', '\n', " (Note that this is a raw string literal, you must include the leading 'r'.)\n", ' """\n', ' if isinstance(tests, basestring):\n', ' tests = list(map(str.strip, tests.rstrip().splitlines()))\n', ' if isinstance(comment, basestring):\n', ' comment = Literal(comment)\n', ' if file is None:\n', ' file = sys.stdout\n', ' print_ = file.write\n', '\n', ' allResults = []\n', ' comments = []\n', ' success = True\n', " NL = Literal(r'\\n').addParseAction(replaceWith('\\n')).ignore(quotedString)\n", " BOM = u'\\ufeff'\n", ' for t in tests:\n', ' if comment is not None and comment.matches(t, False) or comments and not t:\n', ' comments.append(t)\n', ' continue\n', ' if not t:\n', ' continue\n', " out = ['\\n' + '\\n'.join(comments) if comments else '', t]\n", ' comments = []\n', ' try:\n', ' # convert newline marks to actual newlines, and strip leading BOM if present\n', ' t = NL.transformString(t.lstrip(BOM))\n', ' result = self.parseString(t, parseAll=parseAll)\n', ' except ParseBaseException as pe:\n', ' fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""\n', " if '\\n' in t:\n", ' out.append(line(pe.loc, t))\n', " out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal)\n", ' else:\n', " out.append(' ' * pe.loc + '^' + fatal)\n", ' out.append("FAIL: " + str(pe))\n', ' success = success and failureTests\n', ' result = pe\n', ' except Exception as exc:\n', ' out.append("FAIL-EXCEPTION: " + str(exc))\n', ' success = success and failureTests\n', ' result = exc\n', ' else:\n', ' success = success and not failureTests\n', ' if postParse is not None:\n', ' try:\n', ' pp_value = postParse(t, result)\n', ' if pp_value is not None:\n', ' if isinstance(pp_value, ParseResults):\n', ' out.append(pp_value.dump())\n', ' else:\n', ' out.append(str(pp_value))\n', ' else:\n', ' out.append(result.dump())\n', ' except Exception as e:\n', ' out.append(result.dump(full=fullDump))\n', ' out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e))\n', ' else:\n', ' out.append(result.dump(full=fullDump))\n', '\n', ' if printResults:\n', ' if fullDump:\n', " out.append('')\n", " print_('\\n'.join(out))\n", '\n', ' allResults.append((t, result))\n', '\n', ' return success, allResults\n', '\n', '\n', 'class _PendingSkip(ParserElement):\n', " # internal placeholder class to hold a place were '...' is added to a parser element,\n", ' # once another ParserElement is added, this placeholder will be replaced with a SkipTo\n', ' def __init__(self, expr, must_skip=False):\n', ' super(_PendingSkip, self).__init__()\n', " self.strRepr = str(expr + Empty()).replace('Empty', '...')\n", ' self.name = self.strRepr\n', ' self.anchor = expr\n', ' self.must_skip = must_skip\n', '\n', ' def __add__(self, other):\n', ' skipper = SkipTo(other).setName("...")("_skipped*")\n', ' if self.must_skip:\n', ' def must_skip(t):\n', " if not t._skipped or t._skipped.asList() == ['']:\n", ' del t[0]\n', ' t.pop("_skipped", None)\n', ' def show_skip(t):\n', " if t._skipped.asList()[-1:] == ['']:\n", " skipped = t.pop('_skipped')\n", " t['_skipped'] = 'missing <' + repr(self.anchor) + '>'\n", ' return (self.anchor + skipper().addParseAction(must_skip)\n', ' | skipper().addParseAction(show_skip)) + other\n', '\n', ' return self.anchor + skipper + other\n', '\n', ' def __repr__(self):\n', ' return self.strRepr\n', '\n', ' def parseImpl(self, *args):\n', ' raise Exception("use of `...` expression without following SkipTo target expression")\n', '\n', '\n', 'class Token(ParserElement):\n', ' """Abstract :class:`ParserElement` subclass, for defining atomic\n', ' matching patterns.\n', ' """\n', ' def __init__(self):\n', ' super(Token, self).__init__(savelist=False)\n', '\n', '\n', 'class Empty(Token):\n', ' """An empty token, will always match.\n', ' """\n', ' def __init__(self):\n', ' super(Empty, self).__init__()\n', ' self.name = "Empty"\n', ' self.mayReturnEmpty = True\n', ' self.mayIndexError = False\n', '\n', '\n', 'class NoMatch(Token):\n', ' """A token that will never match.\n', ' """\n', ' def __init__(self):\n', ' super(NoMatch, self).__init__()\n', ' self.name = "NoMatch"\n', ' self.mayReturnEmpty = True\n', ' self.mayIndexError = False\n', ' self.errmsg = "Unmatchable token"\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '\n', 'class Literal(Token):\n', ' """Token to exactly match a specified string.\n', '\n', ' Example::\n', '\n', " Literal('blah').parseString('blah') # -> ['blah']\n", " Literal('blah').parseString('blahfooblah') # -> ['blah']\n", ' Literal(\'blah\').parseString(\'bla\') # -> Exception: Expected "blah"\n', '\n', ' For case-insensitive matching, use :class:`CaselessLiteral`.\n', '\n', ' For keyword matching (force word break before and after the matched string),\n', ' use :class:`Keyword` or :class:`CaselessKeyword`.\n', ' """\n', ' def __init__(self, matchString):\n', ' super(Literal, self).__init__()\n', ' self.match = matchString\n', ' self.matchLen = len(matchString)\n', ' try:\n', ' self.firstMatchChar = matchString[0]\n', ' except IndexError:\n', ' warnings.warn("null string passed to Literal; use Empty() instead",\n', ' SyntaxWarning, stacklevel=2)\n', ' self.__class__ = Empty\n', ' self.name = \'"%s"\' % _ustr(self.match)\n', ' self.errmsg = "Expected " + self.name\n', ' self.mayReturnEmpty = False\n', ' self.mayIndexError = False\n', '\n', ' # Performance tuning: modify __class__ to select\n', ' # a parseImpl optimized for single-character check\n', ' if self.matchLen == 1 and type(self) is Literal:\n', ' self.__class__ = _SingleCharLiteral\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc):\n', ' return loc + self.matchLen, self.match\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class _SingleCharLiteral(Literal):\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if instring[loc] == self.firstMatchChar:\n', ' return loc + 1, self.match\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '_L = Literal\n', 'ParserElement._literalStringClass = Literal\n', '\n', 'class Keyword(Token):\n', ' """Token to exactly match a specified string as a keyword, that is,\n', ' it must be immediately followed by a non-keyword character. Compare\n', ' with :class:`Literal`:\n', '\n', ' - ``Literal("if")`` will match the leading ``\'if\'`` in\n', " ``'ifAndOnlyIf'``.\n", ' - ``Keyword("if")`` will not; it will only match the leading\n', " ``'if'`` in ``'if x=1'``, or ``'if(y==2)'``\n", '\n', ' Accepts two optional constructor arguments in addition to the\n', ' keyword string:\n', '\n', ' - ``identChars`` is a string of characters that would be valid\n', ' identifier characters, defaulting to all alphanumerics + "_" and\n', ' "$"\n', ' - ``caseless`` allows case-insensitive matching, default is ``False``.\n', '\n', ' Example::\n', '\n', ' Keyword("start").parseString("start") # -> [\'start\']\n', ' Keyword("start").parseString("starting") # -> Exception\n', '\n', ' For case-insensitive matching, use :class:`CaselessKeyword`.\n', ' """\n', ' DEFAULT_KEYWORD_CHARS = alphanums + "_$"\n', '\n', ' def __init__(self, matchString, identChars=None, caseless=False):\n', ' super(Keyword, self).__init__()\n', ' if identChars is None:\n', ' identChars = Keyword.DEFAULT_KEYWORD_CHARS\n', ' self.match = matchString\n', ' self.matchLen = len(matchString)\n', ' try:\n', ' self.firstMatchChar = matchString[0]\n', ' except IndexError:\n', ' warnings.warn("null string passed to Keyword; use Empty() instead",\n', ' SyntaxWarning, stacklevel=2)\n', ' self.name = \'"%s"\' % self.match\n', ' self.errmsg = "Expected " + self.name\n', ' self.mayReturnEmpty = False\n', ' self.mayIndexError = False\n', ' self.caseless = caseless\n', ' if caseless:\n', ' self.caselessmatch = matchString.upper()\n', ' identChars = identChars.upper()\n', ' self.identChars = set(identChars)\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if self.caseless:\n', ' if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch)\n', ' and (loc >= len(instring) - self.matchLen\n', ' or instring[loc + self.matchLen].upper() not in self.identChars)\n', ' and (loc == 0\n', ' or instring[loc - 1].upper() not in self.identChars)):\n', ' return loc + self.matchLen, self.match\n', '\n', ' else:\n', ' if instring[loc] == self.firstMatchChar:\n', ' if ((self.matchLen == 1 or instring.startswith(self.match, loc))\n', ' and (loc >= len(instring) - self.matchLen\n', ' or instring[loc + self.matchLen] not in self.identChars)\n', ' and (loc == 0 or instring[loc - 1] not in self.identChars)):\n', ' return loc + self.matchLen, self.match\n', '\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' def copy(self):\n', ' c = super(Keyword, self).copy()\n', ' c.identChars = Keyword.DEFAULT_KEYWORD_CHARS\n', ' return c\n', '\n', ' @staticmethod\n', ' def setDefaultKeywordChars(chars):\n', ' """Overrides the default Keyword chars\n', ' """\n', ' Keyword.DEFAULT_KEYWORD_CHARS = chars\n', '\n', 'class CaselessLiteral(Literal):\n', ' """Token to match a specified string, ignoring case of letters.\n', ' Note: the matched results will always be in the case of the given\n', ' match string, NOT the case of the input text.\n', '\n', ' Example::\n', '\n', ' OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> [\'CMD\', \'CMD\', \'CMD\']\n', '\n', ' (Contrast with example for :class:`CaselessKeyword`.)\n', ' """\n', ' def __init__(self, matchString):\n', ' super(CaselessLiteral, self).__init__(matchString.upper())\n', ' # Preserve the defining literal.\n', ' self.returnString = matchString\n', ' self.name = "\'%s\'" % self.returnString\n', ' self.errmsg = "Expected " + self.name\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if instring[loc:loc + self.matchLen].upper() == self.match:\n', ' return loc + self.matchLen, self.returnString\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class CaselessKeyword(Keyword):\n', ' """\n', ' Caseless version of :class:`Keyword`.\n', '\n', ' Example::\n', '\n', ' OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> [\'CMD\', \'CMD\']\n', '\n', ' (Contrast with example for :class:`CaselessLiteral`.)\n', ' """\n', ' def __init__(self, matchString, identChars=None):\n', ' super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)\n', '\n', 'class CloseMatch(Token):\n', ' """A variation on :class:`Literal` which matches "close" matches,\n', " that is, strings with at most 'n' mismatching characters.\n", ' :class:`CloseMatch` takes parameters:\n', '\n', ' - ``match_string`` - string to be matched\n', ' - ``maxMismatches`` - (``default=1``) maximum number of\n', ' mismatches allowed to count as a match\n', '\n', ' The results from a successful parse will contain the matched text\n', ' from the input string and the following named results:\n', '\n', ' - ``mismatches`` - a list of the positions within the\n', ' match_string where mismatches were found\n', ' - ``original`` - the original match_string used to compare\n', ' against the input string\n', '\n', ' If ``mismatches`` is an empty list, then the match was an exact\n', ' match.\n', '\n', ' Example::\n', '\n', ' patt = CloseMatch("ATCATCGAATGGA")\n', ' patt.parseString("ATCATCGAAXGGA") # -> ([\'ATCATCGAAXGGA\'], {\'mismatches\': [[9]], \'original\': [\'ATCATCGAATGGA\']})\n', ' patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected \'ATCATCGAATGGA\' (with up to 1 mismatches) (at char 0), (line:1, col:1)\n', '\n', ' # exact match\n', ' patt.parseString("ATCATCGAATGGA") # -> ([\'ATCATCGAATGGA\'], {\'mismatches\': [[]], \'original\': [\'ATCATCGAATGGA\']})\n', '\n', ' # close match allowing up to 2 mismatches\n', ' patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)\n', ' patt.parseString("ATCAXCGAAXGGA") # -> ([\'ATCAXCGAAXGGA\'], {\'mismatches\': [[4, 9]], \'original\': [\'ATCATCGAATGGA\']})\n', ' """\n', ' def __init__(self, match_string, maxMismatches=1):\n', ' super(CloseMatch, self).__init__()\n', ' self.name = match_string\n', ' self.match_string = match_string\n', ' self.maxMismatches = maxMismatches\n', ' self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)\n', ' self.mayIndexError = False\n', ' self.mayReturnEmpty = False\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' start = loc\n', ' instrlen = len(instring)\n', ' maxloc = start + len(self.match_string)\n', '\n', ' if maxloc <= instrlen:\n', ' match_string = self.match_string\n', ' match_stringloc = 0\n', ' mismatches = []\n', ' maxMismatches = self.maxMismatches\n', '\n', ' for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)):\n', ' src, mat = s_m\n', ' if src != mat:\n', ' mismatches.append(match_stringloc)\n', ' if len(mismatches) > maxMismatches:\n', ' break\n', ' else:\n', ' loc = match_stringloc + 1\n', ' results = ParseResults([instring[start:loc]])\n', " results['original'] = match_string\n", " results['mismatches'] = mismatches\n", ' return loc, results\n', '\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', '\n', 'class Word(Token):\n', ' """Token for matching words composed of allowed character sets.\n', ' Defined with string containing all allowed initial characters, an\n', ' optional string containing allowed body characters (if omitted,\n', ' defaults to the initial character set), and an optional minimum,\n', ' maximum, and/or exact length. The default value for ``min`` is\n', ' 1 (a minimum value < 1 is not valid); the default values for\n', ' ``max`` and ``exact`` are 0, meaning no maximum or exact\n', ' length restriction. An optional ``excludeChars`` parameter can\n', ' list characters that might be found in the input ``bodyChars``\n', ' string; useful to define a word of all printables except for one or\n', ' two characters, for instance.\n', '\n', ' :class:`srange` is useful for defining custom character set strings\n', ' for defining ``Word`` expressions, using range notation from\n', ' regular expression character sets.\n', '\n', ' A common mistake is to use :class:`Word` to match a specific literal\n', ' string, as in ``Word("Address")``. Remember that :class:`Word`\n', ' uses the string argument to define *sets* of matchable characters.\n', ' This expression would match "Add", "AAA", "dAred", or any other word\n', " made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an\n", ' exact literal string, use :class:`Literal` or :class:`Keyword`.\n', '\n', ' pyparsing includes helper strings for building Words:\n', '\n', ' - :class:`alphas`\n', ' - :class:`nums`\n', ' - :class:`alphanums`\n', ' - :class:`hexnums`\n', ' - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255\n', ' - accented, tilded, umlauted, etc.)\n', ' - :class:`punc8bit` (non-alphabetic characters in ASCII range\n', ' 128-255 - currency, symbols, superscripts, diacriticals, etc.)\n', ' - :class:`printables` (any non-whitespace character)\n', '\n', ' Example::\n', '\n', ' # a word composed of digits\n', ' integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))\n', '\n', ' # a word with a leading capital, and zero or more lowercase\n', ' capital_word = Word(alphas.upper(), alphas.lower())\n', '\n', " # hostnames are alphanumeric, with leading alpha, and '-'\n", " hostname = Word(alphas, alphanums + '-')\n", '\n', ' # roman numeral (not a strict parser, accepts invalid mix of characters)\n', ' roman = Word("IVXLCDM")\n', '\n', " # any string of non-whitespace characters, except for ','\n", ' csv_value = Word(printables, excludeChars=",")\n', ' """\n', ' def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None):\n', ' super(Word, self).__init__()\n', ' if excludeChars:\n', ' excludeChars = set(excludeChars)\n', " initChars = ''.join(c for c in initChars if c not in excludeChars)\n", ' if bodyChars:\n', " bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)\n", ' self.initCharsOrig = initChars\n', ' self.initChars = set(initChars)\n', ' if bodyChars:\n', ' self.bodyCharsOrig = bodyChars\n', ' self.bodyChars = set(bodyChars)\n', ' else:\n', ' self.bodyCharsOrig = initChars\n', ' self.bodyChars = set(initChars)\n', '\n', ' self.maxSpecified = max > 0\n', '\n', ' if min < 1:\n', ' raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")\n', '\n', ' self.minLen = min\n', '\n', ' if max > 0:\n', ' self.maxLen = max\n', ' else:\n', ' self.maxLen = _MAX_INT\n', '\n', ' if exact > 0:\n', ' self.maxLen = exact\n', ' self.minLen = exact\n', '\n', ' self.name = _ustr(self)\n', ' self.errmsg = "Expected " + self.name\n', ' self.mayIndexError = False\n', ' self.asKeyword = asKeyword\n', '\n', " if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0):\n", ' if self.bodyCharsOrig == self.initCharsOrig:\n', ' self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)\n', ' elif len(self.initCharsOrig) == 1:\n', ' self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig),\n', ' _escapeRegexRangeChars(self.bodyCharsOrig),)\n', ' else:\n', ' self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig),\n', ' _escapeRegexRangeChars(self.bodyCharsOrig),)\n', ' if self.asKeyword:\n', ' self.reString = r"\\b" + self.reString + r"\\b"\n', '\n', ' try:\n', ' self.re = re.compile(self.reString)\n', ' except Exception:\n', ' self.re = None\n', ' else:\n', ' self.re_match = self.re.match\n', ' self.__class__ = _WordRegex\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if instring[loc] not in self.initChars:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' start = loc\n', ' loc += 1\n', ' instrlen = len(instring)\n', ' bodychars = self.bodyChars\n', ' maxloc = start + self.maxLen\n', ' maxloc = min(maxloc, instrlen)\n', ' while loc < maxloc and instring[loc] in bodychars:\n', ' loc += 1\n', '\n', ' throwException = False\n', ' if loc - start < self.minLen:\n', ' throwException = True\n', ' elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:\n', ' throwException = True\n', ' elif self.asKeyword:\n', ' if (start > 0 and instring[start - 1] in bodychars\n', ' or loc < instrlen and instring[loc] in bodychars):\n', ' throwException = True\n', '\n', ' if throwException:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' return loc, instring[start:loc]\n', '\n', ' def __str__(self):\n', ' try:\n', ' return super(Word, self).__str__()\n', ' except Exception:\n', ' pass\n', '\n', ' if self.strRepr is None:\n', '\n', ' def charsAsStr(s):\n', ' if len(s) > 4:\n', ' return s[:4] + "..."\n', ' else:\n', ' return s\n', '\n', ' if self.initCharsOrig != self.bodyCharsOrig:\n', ' self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig))\n', ' else:\n', ' self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)\n', '\n', ' return self.strRepr\n', '\n', 'class _WordRegex(Word):\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' result = self.re_match(instring, loc)\n', ' if not result:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' loc = result.end()\n', ' return loc, result.group()\n', '\n', '\n', 'class Char(_WordRegex):\n', ' """A short-cut class for defining ``Word(characters, exact=1)``,\n', ' when defining a match of any single character in a string of\n', ' characters.\n', ' """\n', ' def __init__(self, charset, asKeyword=False, excludeChars=None):\n', ' super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars)\n', ' self.reString = "[%s]" % _escapeRegexRangeChars(\'\'.join(self.initChars))\n', ' if asKeyword:\n', ' self.reString = r"\\b%s\\b" % self.reString\n', ' self.re = re.compile(self.reString)\n', ' self.re_match = self.re.match\n', '\n', '\n', 'class Regex(Token):\n', ' r"""Token for matching strings that match a given regular\n', ' expression. Defined with string specifying the regular expression in\n', ' a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.\n', ' If the given regex contains named groups (defined using ``(?P<name>...)``),\n', ' these will be preserved as named parse results.\n', '\n', ' If instead of the Python stdlib re module you wish to use a different RE module\n', ' (such as the `regex` module), you can replace it by either building your\n', ' Regex object with a compiled RE that was compiled using regex:\n', '\n', ' Example::\n', '\n', ' realnum = Regex(r"[+-]?\\d+\\.\\d*")\n', " date = Regex(r'(?P<year>\\d{4})-(?P<month>\\d\\d?)-(?P<day>\\d\\d?)')\n", ' # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression\n', ' roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")\n', '\n', ' # use regex module instead of stdlib re module to construct a Regex using\n', ' # a compiled regular expression\n', ' import regex\n', " parser = pp.Regex(regex.compile(r'[0-9]'))\n", '\n', ' """\n', ' def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False):\n', ' """The parameters ``pattern`` and ``flags`` are passed\n', ' to the ``re.compile()`` function as-is. See the Python\n', ' `re module <https://docs.python.org/3/library/re.html>`_ module for an\n', ' explanation of the acceptable patterns and flags.\n', ' """\n', ' super(Regex, self).__init__()\n', '\n', ' if isinstance(pattern, basestring):\n', ' if not pattern:\n', ' warnings.warn("null string passed to Regex; use Empty() instead",\n', ' SyntaxWarning, stacklevel=2)\n', '\n', ' self.pattern = pattern\n', ' self.flags = flags\n', '\n', ' try:\n', ' self.re = re.compile(self.pattern, self.flags)\n', ' self.reString = self.pattern\n', ' except sre_constants.error:\n', ' warnings.warn("invalid pattern (%s) passed to Regex" % pattern,\n', ' SyntaxWarning, stacklevel=2)\n', ' raise\n', '\n', " elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'):\n", ' self.re = pattern\n', ' self.pattern = self.reString = pattern.pattern\n', ' self.flags = flags\n', '\n', ' else:\n', ' raise TypeError("Regex may only be constructed with a string or a compiled RE object")\n', '\n', ' self.re_match = self.re.match\n', '\n', ' self.name = _ustr(self)\n', ' self.errmsg = "Expected " + self.name\n', ' self.mayIndexError = False\n', ' self.mayReturnEmpty = self.re_match("") is not None\n', ' self.asGroupList = asGroupList\n', ' self.asMatch = asMatch\n', ' if self.asGroupList:\n', ' self.parseImpl = self.parseImplAsGroupList\n', ' if self.asMatch:\n', ' self.parseImpl = self.parseImplAsMatch\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' result = self.re_match(instring, loc)\n', ' if not result:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' loc = result.end()\n', ' ret = ParseResults(result.group())\n', ' d = result.groupdict()\n', ' if d:\n', ' for k, v in d.items():\n', ' ret[k] = v\n', ' return loc, ret\n', '\n', ' def parseImplAsGroupList(self, instring, loc, doActions=True):\n', ' result = self.re_match(instring, loc)\n', ' if not result:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' loc = result.end()\n', ' ret = result.groups()\n', ' return loc, ret\n', '\n', ' def parseImplAsMatch(self, instring, loc, doActions=True):\n', ' result = self.re_match(instring, loc)\n', ' if not result:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' loc = result.end()\n', ' ret = result\n', ' return loc, ret\n', '\n', ' def __str__(self):\n', ' try:\n', ' return super(Regex, self).__str__()\n', ' except Exception:\n', ' pass\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "Re:(%s)" % repr(self.pattern)\n', '\n', ' return self.strRepr\n', '\n', ' def sub(self, repl):\n', ' r"""\n', ' Return Regex with an attached parse action to transform the parsed\n', ' result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.\n', '\n', ' Example::\n', '\n', ' make_html = Regex(r"(\\w+):(.*?):").sub(r"<\\1>\\2</\\1>")\n', ' print(make_html.transformString("h1:main title:"))\n', ' # prints "<h1>main title</h1>"\n', ' """\n', ' if self.asGroupList:\n', ' warnings.warn("cannot use sub() with Regex(asGroupList=True)",\n', ' SyntaxWarning, stacklevel=2)\n', ' raise SyntaxError()\n', '\n', ' if self.asMatch and callable(repl):\n', ' warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)",\n', ' SyntaxWarning, stacklevel=2)\n', ' raise SyntaxError()\n', '\n', ' if self.asMatch:\n', ' def pa(tokens):\n', ' return tokens[0].expand(repl)\n', ' else:\n', ' def pa(tokens):\n', ' return self.re.sub(repl, tokens[0])\n', ' return self.addParseAction(pa)\n', '\n', 'class QuotedString(Token):\n', ' r"""\n', ' Token for matching strings that are delimited by quoting characters.\n', '\n', ' Defined with the following parameters:\n', '\n', ' - quoteChar - string of one or more characters defining the\n', ' quote delimiting string\n', ' - escChar - character to escape quotes, typically backslash\n', ' (default= ``None``)\n', ' - escQuote - special quote sequence to escape an embedded quote\n', ' string (such as SQL\'s ``""`` to escape an embedded ``"``)\n', ' (default= ``None``)\n', ' - multiline - boolean indicating whether quotes can span\n', ' multiple lines (default= ``False``)\n', ' - unquoteResults - boolean indicating whether the matched text\n', ' should be unquoted (default= ``True``)\n', ' - endQuoteChar - string of one or more characters defining the\n', ' end of the quote delimited string (default= ``None`` => same as\n', ' quoteChar)\n', ' - convertWhitespaceEscapes - convert escaped whitespace\n', " (``'\\t'``, ``'\\n'``, etc.) to actual whitespace\n", ' (default= ``True``)\n', '\n', ' Example::\n', '\n', ' qs = QuotedString(\'"\')\n', ' print(qs.searchString(\'lsjdf "This is the quote" sldjf\'))\n', " complex_qs = QuotedString('{{', endQuoteChar='}}')\n", ' print(complex_qs.searchString(\'lsjdf {{This is the "quote"}} sldjf\'))\n', ' sql_qs = QuotedString(\'"\', escQuote=\'""\')\n', ' print(sql_qs.searchString(\'lsjdf "This is the quote with ""embedded"" quotes" sldjf\'))\n', '\n', ' prints::\n', '\n', " [['This is the quote']]\n", ' [[\'This is the "quote"\']]\n', ' [[\'This is the quote with "embedded" quotes\']]\n', ' """\n', ' def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False,\n', ' unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):\n', ' super(QuotedString, self).__init__()\n', '\n', ' # remove white space from quote chars - wont work anyway\n', ' quoteChar = quoteChar.strip()\n', ' if not quoteChar:\n', ' warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)\n', ' raise SyntaxError()\n', '\n', ' if endQuoteChar is None:\n', ' endQuoteChar = quoteChar\n', ' else:\n', ' endQuoteChar = endQuoteChar.strip()\n', ' if not endQuoteChar:\n', ' warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)\n', ' raise SyntaxError()\n', '\n', ' self.quoteChar = quoteChar\n', ' self.quoteCharLen = len(quoteChar)\n', ' self.firstQuoteChar = quoteChar[0]\n', ' self.endQuoteChar = endQuoteChar\n', ' self.endQuoteCharLen = len(endQuoteChar)\n', ' self.escChar = escChar\n', ' self.escQuote = escQuote\n', ' self.unquoteResults = unquoteResults\n', ' self.convertWhitespaceEscapes = convertWhitespaceEscapes\n', '\n', ' if multiline:\n', ' self.flags = re.MULTILINE | re.DOTALL\n', " self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar),\n", ' _escapeRegexRangeChars(self.endQuoteChar[0]),\n', " (escChar is not None and _escapeRegexRangeChars(escChar) or ''))\n", ' else:\n', ' self.flags = 0\n', " self.pattern = r'%s(?:[^%s\\n\\r%s]' % (re.escape(self.quoteChar),\n", ' _escapeRegexRangeChars(self.endQuoteChar[0]),\n', " (escChar is not None and _escapeRegexRangeChars(escChar) or ''))\n", ' if len(self.endQuoteChar) > 1:\n', ' self.pattern += (\n', ' \'|(?:\' + \')|(?:\'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),\n', ' _escapeRegexRangeChars(self.endQuoteChar[i]))\n', " for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')')\n", '\n', ' if escQuote:\n', " self.pattern += (r'|(?:%s)' % re.escape(escQuote))\n", ' if escChar:\n', " self.pattern += (r'|(?:%s.)' % re.escape(escChar))\n", ' self.escCharReplacePattern = re.escape(self.escChar) + "(.)"\n', " self.pattern += (r')*%s' % re.escape(self.endQuoteChar))\n", '\n', ' try:\n', ' self.re = re.compile(self.pattern, self.flags)\n', ' self.reString = self.pattern\n', ' self.re_match = self.re.match\n', ' except sre_constants.error:\n', ' warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,\n', ' SyntaxWarning, stacklevel=2)\n', ' raise\n', '\n', ' self.name = _ustr(self)\n', ' self.errmsg = "Expected " + self.name\n', ' self.mayIndexError = False\n', ' self.mayReturnEmpty = True\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None\n', ' if not result:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' loc = result.end()\n', ' ret = result.group()\n', '\n', ' if self.unquoteResults:\n', '\n', ' # strip off quotes\n', ' ret = ret[self.quoteCharLen: -self.endQuoteCharLen]\n', '\n', ' if isinstance(ret, basestring):\n', ' # replace escaped whitespace\n', " if '\\\\' in ret and self.convertWhitespaceEscapes:\n", ' ws_map = {\n', " r'\\t': '\\t',\n", " r'\\n': '\\n',\n", " r'\\f': '\\f',\n", " r'\\r': '\\r',\n", ' }\n', ' for wslit, wschar in ws_map.items():\n', ' ret = ret.replace(wslit, wschar)\n', '\n', ' # replace escaped characters\n', ' if self.escChar:\n', ' ret = re.sub(self.escCharReplacePattern, r"\\g<1>", ret)\n', '\n', ' # replace escaped quotes\n', ' if self.escQuote:\n', ' ret = ret.replace(self.escQuote, self.endQuoteChar)\n', '\n', ' return loc, ret\n', '\n', ' def __str__(self):\n', ' try:\n', ' return super(QuotedString, self).__str__()\n', ' except Exception:\n', ' pass\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)\n', '\n', ' return self.strRepr\n', '\n', '\n', 'class CharsNotIn(Token):\n', ' """Token for matching words composed of characters *not* in a given\n', ' set (will include whitespace in matched characters if not listed in\n', ' the provided exclusion set - see example). Defined with string\n', ' containing all disallowed characters, and an optional minimum,\n', ' maximum, and/or exact length. The default value for ``min`` is\n', ' 1 (a minimum value < 1 is not valid); the default values for\n', ' ``max`` and ``exact`` are 0, meaning no maximum or exact\n', ' length restriction.\n', '\n', ' Example::\n', '\n', " # define a comma-separated-value as anything that is not a ','\n", " csv_value = CharsNotIn(',')\n", ' print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))\n', '\n', ' prints::\n', '\n', " ['dkls', 'lsdkjf', 's12 34', '@!#', '213']\n", ' """\n', ' def __init__(self, notChars, min=1, max=0, exact=0):\n', ' super(CharsNotIn, self).__init__()\n', ' self.skipWhitespace = False\n', ' self.notChars = notChars\n', '\n', ' if min < 1:\n', ' raise ValueError("cannot specify a minimum length < 1; use "\n', ' "Optional(CharsNotIn()) if zero-length char group is permitted")\n', '\n', ' self.minLen = min\n', '\n', ' if max > 0:\n', ' self.maxLen = max\n', ' else:\n', ' self.maxLen = _MAX_INT\n', '\n', ' if exact > 0:\n', ' self.maxLen = exact\n', ' self.minLen = exact\n', '\n', ' self.name = _ustr(self)\n', ' self.errmsg = "Expected " + self.name\n', ' self.mayReturnEmpty = (self.minLen == 0)\n', ' self.mayIndexError = False\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if instring[loc] in self.notChars:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' start = loc\n', ' loc += 1\n', ' notchars = self.notChars\n', ' maxlen = min(start + self.maxLen, len(instring))\n', ' while loc < maxlen and instring[loc] not in notchars:\n', ' loc += 1\n', '\n', ' if loc - start < self.minLen:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' return loc, instring[start:loc]\n', '\n', ' def __str__(self):\n', ' try:\n', ' return super(CharsNotIn, self).__str__()\n', ' except Exception:\n', ' pass\n', '\n', ' if self.strRepr is None:\n', ' if len(self.notChars) > 4:\n', ' self.strRepr = "!W:(%s...)" % self.notChars[:4]\n', ' else:\n', ' self.strRepr = "!W:(%s)" % self.notChars\n', '\n', ' return self.strRepr\n', '\n', 'class White(Token):\n', ' """Special matching class for matching whitespace. Normally,\n', ' whitespace is ignored by pyparsing grammars. This class is included\n', ' when some whitespace structures are significant. Define with\n', ' a string containing the whitespace characters to be matched; default\n', ' is ``" \\\\t\\\\r\\\\n"``. Also takes optional ``min``,\n', ' ``max``, and ``exact`` arguments, as defined for the\n', ' :class:`Word` class.\n', ' """\n', ' whiteStrs = {\n', " ' ' : '<SP>',\n", " '\\t': '<TAB>',\n", " '\\n': '<LF>',\n", " '\\r': '<CR>',\n", " '\\f': '<FF>',\n", " u'\\u00A0': '<NBSP>',\n", " u'\\u1680': '<OGHAM_SPACE_MARK>',\n", " u'\\u180E': '<MONGOLIAN_VOWEL_SEPARATOR>',\n", " u'\\u2000': '<EN_QUAD>',\n", " u'\\u2001': '<EM_QUAD>',\n", " u'\\u2002': '<EN_SPACE>',\n", " u'\\u2003': '<EM_SPACE>',\n", " u'\\u2004': '<THREE-PER-EM_SPACE>',\n", " u'\\u2005': '<FOUR-PER-EM_SPACE>',\n", " u'\\u2006': '<SIX-PER-EM_SPACE>',\n", " u'\\u2007': '<FIGURE_SPACE>',\n", " u'\\u2008': '<PUNCTUATION_SPACE>',\n", " u'\\u2009': '<THIN_SPACE>',\n", " u'\\u200A': '<HAIR_SPACE>',\n", " u'\\u200B': '<ZERO_WIDTH_SPACE>',\n", " u'\\u202F': '<NNBSP>',\n", " u'\\u205F': '<MMSP>',\n", " u'\\u3000': '<IDEOGRAPHIC_SPACE>',\n", ' }\n', ' def __init__(self, ws=" \\t\\r\\n", min=1, max=0, exact=0):\n', ' super(White, self).__init__()\n', ' self.matchWhite = ws\n', ' self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite))\n', ' # ~ self.leaveWhitespace()\n', ' self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))\n', ' self.mayReturnEmpty = True\n', ' self.errmsg = "Expected " + self.name\n', '\n', ' self.minLen = min\n', '\n', ' if max > 0:\n', ' self.maxLen = max\n', ' else:\n', ' self.maxLen = _MAX_INT\n', '\n', ' if exact > 0:\n', ' self.maxLen = exact\n', ' self.minLen = exact\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if instring[loc] not in self.matchWhite:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', ' start = loc\n', ' loc += 1\n', ' maxloc = start + self.maxLen\n', ' maxloc = min(maxloc, len(instring))\n', ' while loc < maxloc and instring[loc] in self.matchWhite:\n', ' loc += 1\n', '\n', ' if loc - start < self.minLen:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' return loc, instring[start:loc]\n', '\n', '\n', 'class _PositionToken(Token):\n', ' def __init__(self):\n', ' super(_PositionToken, self).__init__()\n', ' self.name = self.__class__.__name__\n', ' self.mayReturnEmpty = True\n', ' self.mayIndexError = False\n', '\n', 'class GoToColumn(_PositionToken):\n', ' """Token to advance to a specific column of input text; useful for\n', ' tabular report scraping.\n', ' """\n', ' def __init__(self, colno):\n', ' super(GoToColumn, self).__init__()\n', ' self.col = colno\n', '\n', ' def preParse(self, instring, loc):\n', ' if col(loc, instring) != self.col:\n', ' instrlen = len(instring)\n', ' if self.ignoreExprs:\n', ' loc = self._skipIgnorables(instring, loc)\n', ' while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col:\n', ' loc += 1\n', ' return loc\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' thiscol = col(loc, instring)\n', ' if thiscol > self.col:\n', ' raise ParseException(instring, loc, "Text not in expected column", self)\n', ' newloc = loc + self.col - thiscol\n', ' ret = instring[loc: newloc]\n', ' return newloc, ret\n', '\n', '\n', 'class LineStart(_PositionToken):\n', ' r"""Matches if current position is at the beginning of a line within\n', ' the parse string\n', '\n', ' Example::\n', '\n', " test = '''\\\n", ' AAA this line\n', ' AAA and this line\n', ' AAA but not this one\n', ' B AAA and definitely not this one\n', " '''\n", '\n', " for t in (LineStart() + 'AAA' + restOfLine).searchString(test):\n", ' print(t)\n', '\n', ' prints::\n', '\n', " ['AAA', ' this line']\n", " ['AAA', ' and this line']\n", '\n', ' """\n', ' def __init__(self):\n', ' super(LineStart, self).__init__()\n', ' self.errmsg = "Expected start of line"\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if col(loc, instring) == 1:\n', ' return loc, []\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class LineEnd(_PositionToken):\n', ' """Matches if current position is at the end of a line within the\n', ' parse string\n', ' """\n', ' def __init__(self):\n', ' super(LineEnd, self).__init__()\n', ' self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\\n", ""))\n', ' self.errmsg = "Expected end of line"\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if loc < len(instring):\n', ' if instring[loc] == "\\n":\n', ' return loc + 1, "\\n"\n', ' else:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', ' elif loc == len(instring):\n', ' return loc + 1, []\n', ' else:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class StringStart(_PositionToken):\n', ' """Matches if current position is at the beginning of the parse\n', ' string\n', ' """\n', ' def __init__(self):\n', ' super(StringStart, self).__init__()\n', ' self.errmsg = "Expected start of text"\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if loc != 0:\n', ' # see if entire string up to here is just whitespace and ignoreables\n', ' if loc != self.preParse(instring, 0):\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', ' return loc, []\n', '\n', 'class StringEnd(_PositionToken):\n', ' """Matches if current position is at the end of the parse string\n', ' """\n', ' def __init__(self):\n', ' super(StringEnd, self).__init__()\n', ' self.errmsg = "Expected end of text"\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if loc < len(instring):\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', ' elif loc == len(instring):\n', ' return loc + 1, []\n', ' elif loc > len(instring):\n', ' return loc, []\n', ' else:\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', 'class WordStart(_PositionToken):\n', ' """Matches if the current position is at the beginning of a Word,\n', ' and is not preceded by any character in a given set of\n', ' ``wordChars`` (default= ``printables``). To emulate the\n', ' ``\\b`` behavior of regular expressions, use\n', ' ``WordStart(alphanums)``. ``WordStart`` will also match at\n', ' the beginning of the string being parsed, or at the beginning of\n', ' a line.\n', ' """\n', ' def __init__(self, wordChars=printables):\n', ' super(WordStart, self).__init__()\n', ' self.wordChars = set(wordChars)\n', ' self.errmsg = "Not at the start of a word"\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if loc != 0:\n', ' if (instring[loc - 1] in self.wordChars\n', ' or instring[loc] not in self.wordChars):\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', ' return loc, []\n', '\n', 'class WordEnd(_PositionToken):\n', ' """Matches if the current position is at the end of a Word, and is\n', ' not followed by any character in a given set of ``wordChars``\n', ' (default= ``printables``). To emulate the ``\\b`` behavior of\n', ' regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``\n', ' will also match at the end of the string being parsed, or at the end\n', ' of a line.\n', ' """\n', ' def __init__(self, wordChars=printables):\n', ' super(WordEnd, self).__init__()\n', ' self.wordChars = set(wordChars)\n', ' self.skipWhitespace = False\n', ' self.errmsg = "Not at the end of a word"\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' instrlen = len(instring)\n', ' if instrlen > 0 and loc < instrlen:\n', ' if (instring[loc] in self.wordChars or\n', ' instring[loc - 1] not in self.wordChars):\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', ' return loc, []\n', '\n', '\n', 'class ParseExpression(ParserElement):\n', ' """Abstract subclass of ParserElement, for combining and\n', ' post-processing parsed tokens.\n', ' """\n', ' def __init__(self, exprs, savelist=False):\n', ' super(ParseExpression, self).__init__(savelist)\n', ' if isinstance(exprs, _generatorType):\n', ' exprs = list(exprs)\n', '\n', ' if isinstance(exprs, basestring):\n', ' self.exprs = [self._literalStringClass(exprs)]\n', ' elif isinstance(exprs, ParserElement):\n', ' self.exprs = [exprs]\n', ' elif isinstance(exprs, Iterable):\n', ' exprs = list(exprs)\n', ' # if sequence of strings provided, wrap with Literal\n', ' if any(isinstance(expr, basestring) for expr in exprs):\n', ' exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs)\n', ' self.exprs = list(exprs)\n', ' else:\n', ' try:\n', ' self.exprs = list(exprs)\n', ' except TypeError:\n', ' self.exprs = [exprs]\n', ' self.callPreparse = False\n', '\n', ' def append(self, other):\n', ' self.exprs.append(other)\n', ' self.strRepr = None\n', ' return self\n', '\n', ' def leaveWhitespace(self):\n', ' """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on\n', ' all contained expressions."""\n', ' self.skipWhitespace = False\n', ' self.exprs = [e.copy() for e in self.exprs]\n', ' for e in self.exprs:\n', ' e.leaveWhitespace()\n', ' return self\n', '\n', ' def ignore(self, other):\n', ' if isinstance(other, Suppress):\n', ' if other not in self.ignoreExprs:\n', ' super(ParseExpression, self).ignore(other)\n', ' for e in self.exprs:\n', ' e.ignore(self.ignoreExprs[-1])\n', ' else:\n', ' super(ParseExpression, self).ignore(other)\n', ' for e in self.exprs:\n', ' e.ignore(self.ignoreExprs[-1])\n', ' return self\n', '\n', ' def __str__(self):\n', ' try:\n', ' return super(ParseExpression, self).__str__()\n', ' except Exception:\n', ' pass\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs))\n', ' return self.strRepr\n', '\n', ' def streamline(self):\n', ' super(ParseExpression, self).streamline()\n', '\n', ' for e in self.exprs:\n', ' e.streamline()\n', '\n', " # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d)\n", " # but only if there are no parse actions or resultsNames on the nested And's\n", " # (likewise for Or's and MatchFirst's)\n", ' if len(self.exprs) == 2:\n', ' other = self.exprs[0]\n', ' if (isinstance(other, self.__class__)\n', ' and not other.parseAction\n', ' and other.resultsName is None\n', ' and not other.debug):\n', ' self.exprs = other.exprs[:] + [self.exprs[1]]\n', ' self.strRepr = None\n', ' self.mayReturnEmpty |= other.mayReturnEmpty\n', ' self.mayIndexError |= other.mayIndexError\n', '\n', ' other = self.exprs[-1]\n', ' if (isinstance(other, self.__class__)\n', ' and not other.parseAction\n', ' and other.resultsName is None\n', ' and not other.debug):\n', ' self.exprs = self.exprs[:-1] + other.exprs[:]\n', ' self.strRepr = None\n', ' self.mayReturnEmpty |= other.mayReturnEmpty\n', ' self.mayIndexError |= other.mayIndexError\n', '\n', ' self.errmsg = "Expected " + _ustr(self)\n', '\n', ' return self\n', '\n', ' def validate(self, validateTrace=None):\n', ' tmp = (validateTrace if validateTrace is not None else [])[:] + [self]\n', ' for e in self.exprs:\n', ' e.validate(tmp)\n', ' self.checkRecursion([])\n', '\n', ' def copy(self):\n', ' ret = super(ParseExpression, self).copy()\n', ' ret.exprs = [e.copy() for e in self.exprs]\n', ' return ret\n', '\n', ' def _setResultsName(self, name, listAllMatches=False):\n', ' if __diag__.warn_ungrouped_named_tokens_in_collection:\n', ' for e in self.exprs:\n', ' if isinstance(e, ParserElement) and e.resultsName:\n', ' warnings.warn("{0}: setting results name {1!r} on {2} expression "\n', ' "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",\n', ' name,\n', ' type(self).__name__,\n', ' e.resultsName),\n', ' stacklevel=3)\n', '\n', ' return super(ParseExpression, self)._setResultsName(name, listAllMatches)\n', '\n', '\n', 'class And(ParseExpression):\n', ' """\n', ' Requires all given :class:`ParseExpression` s to be found in the given order.\n', ' Expressions may be separated by whitespace.\n', " May be constructed using the ``'+'`` operator.\n", " May also be constructed using the ``'-'`` operator, which will\n", ' suppress backtracking.\n', '\n', ' Example::\n', '\n', ' integer = Word(nums)\n', ' name_expr = OneOrMore(Word(alphas))\n', '\n', ' expr = And([integer("id"), name_expr("name"), integer("age")])\n', ' # more easily written as:\n', ' expr = integer("id") + name_expr("name") + integer("age")\n', ' """\n', '\n', ' class _ErrorStop(Empty):\n', ' def __init__(self, *args, **kwargs):\n', ' super(And._ErrorStop, self).__init__(*args, **kwargs)\n', " self.name = '-'\n", ' self.leaveWhitespace()\n', '\n', ' def __init__(self, exprs, savelist=True):\n', ' exprs = list(exprs)\n', ' if exprs and Ellipsis in exprs:\n', ' tmp = []\n', ' for i, expr in enumerate(exprs):\n', ' if expr is Ellipsis:\n', ' if i < len(exprs) - 1:\n', ' skipto_arg = (Empty() + exprs[i + 1]).exprs[-1]\n', ' tmp.append(SkipTo(skipto_arg)("_skipped*"))\n', ' else:\n', ' raise Exception("cannot construct And with sequence ending in ...")\n', ' else:\n', ' tmp.append(expr)\n', ' exprs[:] = tmp\n', ' super(And, self).__init__(exprs, savelist)\n', ' self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)\n', ' self.setWhitespaceChars(self.exprs[0].whiteChars)\n', ' self.skipWhitespace = self.exprs[0].skipWhitespace\n', ' self.callPreparse = True\n', '\n', ' def streamline(self):\n', " # collapse any _PendingSkip's\n", ' if self.exprs:\n', ' if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip)\n', ' for e in self.exprs[:-1]):\n', ' for i, e in enumerate(self.exprs[:-1]):\n', ' if e is None:\n', ' continue\n', ' if (isinstance(e, ParseExpression)\n', ' and e.exprs and isinstance(e.exprs[-1], _PendingSkip)):\n', ' e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]\n', ' self.exprs[i + 1] = None\n', ' self.exprs = [e for e in self.exprs if e is not None]\n', '\n', ' super(And, self).streamline()\n', ' self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)\n', ' return self\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' # pass False as last arg to _parse for first element, since we already\n', ' # pre-parsed the string as part of our And pre-parsing\n', ' loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False)\n', ' errorStop = False\n', ' for e in self.exprs[1:]:\n', ' if isinstance(e, And._ErrorStop):\n', ' errorStop = True\n', ' continue\n', ' if errorStop:\n', ' try:\n', ' loc, exprtokens = e._parse(instring, loc, doActions)\n', ' except ParseSyntaxException:\n', ' raise\n', ' except ParseBaseException as pe:\n', ' pe.__traceback__ = None\n', ' raise ParseSyntaxException._from_exception(pe)\n', ' except IndexError:\n', ' raise ParseSyntaxException(instring, len(instring), self.errmsg, self)\n', ' else:\n', ' loc, exprtokens = e._parse(instring, loc, doActions)\n', ' if exprtokens or exprtokens.haskeys():\n', ' resultlist += exprtokens\n', ' return loc, resultlist\n', '\n', ' def __iadd__(self, other):\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' return self.append(other) # And([self, other])\n', '\n', ' def checkRecursion(self, parseElementList):\n', ' subRecCheckList = parseElementList[:] + [self]\n', ' for e in self.exprs:\n', ' e.checkRecursion(subRecCheckList)\n', ' if not e.mayReturnEmpty:\n', ' break\n', '\n', ' def __str__(self):\n', ' if hasattr(self, "name"):\n', ' return self.name\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"\n', '\n', ' return self.strRepr\n', '\n', '\n', 'class Or(ParseExpression):\n', ' """Requires that at least one :class:`ParseExpression` is found. If\n', ' two expressions match, the expression that matches the longest\n', " string will be used. May be constructed using the ``'^'``\n", ' operator.\n', '\n', ' Example::\n', '\n', " # construct Or using '^' operator\n", '\n', " number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))\n", ' print(number.searchString("123 3.1416 789"))\n', '\n', ' prints::\n', '\n', " [['123'], ['3.1416'], ['789']]\n", ' """\n', ' def __init__(self, exprs, savelist=False):\n', ' super(Or, self).__init__(exprs, savelist)\n', ' if self.exprs:\n', ' self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)\n', ' else:\n', ' self.mayReturnEmpty = True\n', '\n', ' def streamline(self):\n', ' super(Or, self).streamline()\n', ' if __compat__.collect_all_And_tokens:\n', ' self.saveAsList = any(e.saveAsList for e in self.exprs)\n', ' return self\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' maxExcLoc = -1\n', ' maxException = None\n', ' matches = []\n', ' for e in self.exprs:\n', ' try:\n', ' loc2 = e.tryParse(instring, loc)\n', ' except ParseException as err:\n', ' err.__traceback__ = None\n', ' if err.loc > maxExcLoc:\n', ' maxException = err\n', ' maxExcLoc = err.loc\n', ' except IndexError:\n', ' if len(instring) > maxExcLoc:\n', ' maxException = ParseException(instring, len(instring), e.errmsg, self)\n', ' maxExcLoc = len(instring)\n', ' else:\n', ' # save match among all matches, to retry longest to shortest\n', ' matches.append((loc2, e))\n', '\n', ' if matches:\n', ' # re-evaluate all matches in descending order of length of match, in case attached actions\n', ' # might change whether or how much they match of the input.\n', ' matches.sort(key=itemgetter(0), reverse=True)\n', '\n', ' if not doActions:\n', ' # no further conditions or parse actions to change the selection of\n', ' # alternative, so the first match will be the best match\n', ' best_expr = matches[0][1]\n', ' return best_expr._parse(instring, loc, doActions)\n', '\n', ' longest = -1, None\n', ' for loc1, expr1 in matches:\n', ' if loc1 <= longest[0]:\n', ' # already have a longer match than this one will deliver, we are done\n', ' return longest\n', '\n', ' try:\n', ' loc2, toks = expr1._parse(instring, loc, doActions)\n', ' except ParseException as err:\n', ' err.__traceback__ = None\n', ' if err.loc > maxExcLoc:\n', ' maxException = err\n', ' maxExcLoc = err.loc\n', ' else:\n', ' if loc2 >= loc1:\n', ' return loc2, toks\n', " # didn't match as much as before\n", ' elif loc2 > longest[0]:\n', ' longest = loc2, toks\n', '\n', ' if longest != (-1, None):\n', ' return longest\n', '\n', ' if maxException is not None:\n', ' maxException.msg = self.errmsg\n', ' raise maxException\n', ' else:\n', ' raise ParseException(instring, loc, "no defined alternatives to match", self)\n', '\n', '\n', ' def __ixor__(self, other):\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' return self.append(other) # Or([self, other])\n', '\n', ' def __str__(self):\n', ' if hasattr(self, "name"):\n', ' return self.name\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"\n', '\n', ' return self.strRepr\n', '\n', ' def checkRecursion(self, parseElementList):\n', ' subRecCheckList = parseElementList[:] + [self]\n', ' for e in self.exprs:\n', ' e.checkRecursion(subRecCheckList)\n', '\n', ' def _setResultsName(self, name, listAllMatches=False):\n', ' if (not __compat__.collect_all_And_tokens\n', ' and __diag__.warn_multiple_tokens_in_named_alternation):\n', ' if any(isinstance(e, And) for e in self.exprs):\n', ' warnings.warn("{0}: setting results name {1!r} on {2} expression "\n', ' "may only return a single token for an And alternative, "\n', ' "in future will return the full list of tokens".format(\n', ' "warn_multiple_tokens_in_named_alternation", name, type(self).__name__),\n', ' stacklevel=3)\n', '\n', ' return super(Or, self)._setResultsName(name, listAllMatches)\n', '\n', '\n', 'class MatchFirst(ParseExpression):\n', ' """Requires that at least one :class:`ParseExpression` is found. If\n', ' two expressions match, the first one listed is the one that will\n', " match. May be constructed using the ``'|'`` operator.\n", '\n', ' Example::\n', '\n', " # construct MatchFirst using '|' operator\n", '\n', ' # watch the order of expressions to match\n', " number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))\n", ' print(number.searchString("123 3.1416 789")) # Fail! -> [[\'123\'], [\'3\'], [\'1416\'], [\'789\']]\n', '\n', ' # put more selective expression first\n', " number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)\n", ' print(number.searchString("123 3.1416 789")) # Better -> [[\'123\'], [\'3.1416\'], [\'789\']]\n', ' """\n', ' def __init__(self, exprs, savelist=False):\n', ' super(MatchFirst, self).__init__(exprs, savelist)\n', ' if self.exprs:\n', ' self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)\n', ' else:\n', ' self.mayReturnEmpty = True\n', '\n', ' def streamline(self):\n', ' super(MatchFirst, self).streamline()\n', ' if __compat__.collect_all_And_tokens:\n', ' self.saveAsList = any(e.saveAsList for e in self.exprs)\n', ' return self\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' maxExcLoc = -1\n', ' maxException = None\n', ' for e in self.exprs:\n', ' try:\n', ' ret = e._parse(instring, loc, doActions)\n', ' return ret\n', ' except ParseException as err:\n', ' if err.loc > maxExcLoc:\n', ' maxException = err\n', ' maxExcLoc = err.loc\n', ' except IndexError:\n', ' if len(instring) > maxExcLoc:\n', ' maxException = ParseException(instring, len(instring), e.errmsg, self)\n', ' maxExcLoc = len(instring)\n', '\n', ' # only got here if no expression matched, raise exception for match that made it the furthest\n', ' else:\n', ' if maxException is not None:\n', ' maxException.msg = self.errmsg\n', ' raise maxException\n', ' else:\n', ' raise ParseException(instring, loc, "no defined alternatives to match", self)\n', '\n', ' def __ior__(self, other):\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' return self.append(other) # MatchFirst([self, other])\n', '\n', ' def __str__(self):\n', ' if hasattr(self, "name"):\n', ' return self.name\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"\n', '\n', ' return self.strRepr\n', '\n', ' def checkRecursion(self, parseElementList):\n', ' subRecCheckList = parseElementList[:] + [self]\n', ' for e in self.exprs:\n', ' e.checkRecursion(subRecCheckList)\n', '\n', ' def _setResultsName(self, name, listAllMatches=False):\n', ' if (not __compat__.collect_all_And_tokens\n', ' and __diag__.warn_multiple_tokens_in_named_alternation):\n', ' if any(isinstance(e, And) for e in self.exprs):\n', ' warnings.warn("{0}: setting results name {1!r} on {2} expression "\n', ' "may only return a single token for an And alternative, "\n', ' "in future will return the full list of tokens".format(\n', ' "warn_multiple_tokens_in_named_alternation", name, type(self).__name__),\n', ' stacklevel=3)\n', '\n', ' return super(MatchFirst, self)._setResultsName(name, listAllMatches)\n', '\n', '\n', 'class Each(ParseExpression):\n', ' """Requires all given :class:`ParseExpression` s to be found, but in\n', ' any order. Expressions may be separated by whitespace.\n', '\n', " May be constructed using the ``'&'`` operator.\n", '\n', ' Example::\n', '\n', ' color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")\n', ' shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")\n', ' integer = Word(nums)\n', ' shape_attr = "shape:" + shape_type("shape")\n', ' posn_attr = "posn:" + Group(integer("x") + \',\' + integer("y"))("posn")\n', ' color_attr = "color:" + color("color")\n', ' size_attr = "size:" + integer("size")\n', '\n', " # use Each (using operator '&') to accept attributes in any order\n", ' # (shape and posn are required, color and size are optional)\n', ' shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)\n', '\n', " shape_spec.runTests('''\n", ' shape: SQUARE color: BLACK posn: 100, 120\n', ' shape: CIRCLE size: 50 color: BLUE posn: 50,80\n', ' color:GREEN size:20 shape:TRIANGLE posn:20,40\n', " '''\n", ' )\n', '\n', ' prints::\n', '\n', ' shape: SQUARE color: BLACK posn: 100, 120\n', " ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]\n", ' - color: BLACK\n', " - posn: ['100', ',', '120']\n", ' - x: 100\n', ' - y: 120\n', ' - shape: SQUARE\n', '\n', '\n', ' shape: CIRCLE size: 50 color: BLUE posn: 50,80\n', " ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]\n", ' - color: BLUE\n', " - posn: ['50', ',', '80']\n", ' - x: 50\n', ' - y: 80\n', ' - shape: CIRCLE\n', ' - size: 50\n', '\n', '\n', ' color: GREEN size: 20 shape: TRIANGLE posn: 20,40\n', " ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]\n", ' - color: GREEN\n', " - posn: ['20', ',', '40']\n", ' - x: 20\n', ' - y: 40\n', ' - shape: TRIANGLE\n', ' - size: 20\n', ' """\n', ' def __init__(self, exprs, savelist=True):\n', ' super(Each, self).__init__(exprs, savelist)\n', ' self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)\n', ' self.skipWhitespace = True\n', ' self.initExprGroups = True\n', ' self.saveAsList = True\n', '\n', ' def streamline(self):\n', ' super(Each, self).streamline()\n', ' self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)\n', ' return self\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if self.initExprGroups:\n', ' self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional))\n', ' opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)]\n', ' opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))]\n', ' self.optionals = opt1 + opt2\n', ' self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)]\n', ' self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)]\n', ' self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))]\n', ' self.required += self.multirequired\n', ' self.initExprGroups = False\n', ' tmpLoc = loc\n', ' tmpReqd = self.required[:]\n', ' tmpOpt = self.optionals[:]\n', ' matchOrder = []\n', '\n', ' keepMatching = True\n', ' while keepMatching:\n', ' tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired\n', ' failed = []\n', ' for e in tmpExprs:\n', ' try:\n', ' tmpLoc = e.tryParse(instring, tmpLoc)\n', ' except ParseException:\n', ' failed.append(e)\n', ' else:\n', ' matchOrder.append(self.opt1map.get(id(e), e))\n', ' if e in tmpReqd:\n', ' tmpReqd.remove(e)\n', ' elif e in tmpOpt:\n', ' tmpOpt.remove(e)\n', ' if len(failed) == len(tmpExprs):\n', ' keepMatching = False\n', '\n', ' if tmpReqd:\n', ' missing = ", ".join(_ustr(e) for e in tmpReqd)\n', ' raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing)\n', '\n', ' # add any unmatched Optionals, in case they have default values defined\n', ' matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt]\n', '\n', ' resultlist = []\n', ' for e in matchOrder:\n', ' loc, results = e._parse(instring, loc, doActions)\n', ' resultlist.append(results)\n', '\n', ' finalResults = sum(resultlist, ParseResults([]))\n', ' return loc, finalResults\n', '\n', ' def __str__(self):\n', ' if hasattr(self, "name"):\n', ' return self.name\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"\n', '\n', ' return self.strRepr\n', '\n', ' def checkRecursion(self, parseElementList):\n', ' subRecCheckList = parseElementList[:] + [self]\n', ' for e in self.exprs:\n', ' e.checkRecursion(subRecCheckList)\n', '\n', '\n', 'class ParseElementEnhance(ParserElement):\n', ' """Abstract subclass of :class:`ParserElement`, for combining and\n', ' post-processing parsed tokens.\n', ' """\n', ' def __init__(self, expr, savelist=False):\n', ' super(ParseElementEnhance, self).__init__(savelist)\n', ' if isinstance(expr, basestring):\n', ' if issubclass(self._literalStringClass, Token):\n', ' expr = self._literalStringClass(expr)\n', ' else:\n', ' expr = self._literalStringClass(Literal(expr))\n', ' self.expr = expr\n', ' self.strRepr = None\n', ' if expr is not None:\n', ' self.mayIndexError = expr.mayIndexError\n', ' self.mayReturnEmpty = expr.mayReturnEmpty\n', ' self.setWhitespaceChars(expr.whiteChars)\n', ' self.skipWhitespace = expr.skipWhitespace\n', ' self.saveAsList = expr.saveAsList\n', ' self.callPreparse = expr.callPreparse\n', ' self.ignoreExprs.extend(expr.ignoreExprs)\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if self.expr is not None:\n', ' return self.expr._parse(instring, loc, doActions, callPreParse=False)\n', ' else:\n', ' raise ParseException("", loc, self.errmsg, self)\n', '\n', ' def leaveWhitespace(self):\n', ' self.skipWhitespace = False\n', ' self.expr = self.expr.copy()\n', ' if self.expr is not None:\n', ' self.expr.leaveWhitespace()\n', ' return self\n', '\n', ' def ignore(self, other):\n', ' if isinstance(other, Suppress):\n', ' if other not in self.ignoreExprs:\n', ' super(ParseElementEnhance, self).ignore(other)\n', ' if self.expr is not None:\n', ' self.expr.ignore(self.ignoreExprs[-1])\n', ' else:\n', ' super(ParseElementEnhance, self).ignore(other)\n', ' if self.expr is not None:\n', ' self.expr.ignore(self.ignoreExprs[-1])\n', ' return self\n', '\n', ' def streamline(self):\n', ' super(ParseElementEnhance, self).streamline()\n', ' if self.expr is not None:\n', ' self.expr.streamline()\n', ' return self\n', '\n', ' def checkRecursion(self, parseElementList):\n', ' if self in parseElementList:\n', ' raise RecursiveGrammarException(parseElementList + [self])\n', ' subRecCheckList = parseElementList[:] + [self]\n', ' if self.expr is not None:\n', ' self.expr.checkRecursion(subRecCheckList)\n', '\n', ' def validate(self, validateTrace=None):\n', ' if validateTrace is None:\n', ' validateTrace = []\n', ' tmp = validateTrace[:] + [self]\n', ' if self.expr is not None:\n', ' self.expr.validate(tmp)\n', ' self.checkRecursion([])\n', '\n', ' def __str__(self):\n', ' try:\n', ' return super(ParseElementEnhance, self).__str__()\n', ' except Exception:\n', ' pass\n', '\n', ' if self.strRepr is None and self.expr is not None:\n', ' self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr))\n', ' return self.strRepr\n', '\n', '\n', 'class FollowedBy(ParseElementEnhance):\n', ' """Lookahead matching of the given parse expression.\n', ' ``FollowedBy`` does *not* advance the parsing position within\n', ' the input string, it only verifies that the specified parse\n', ' expression matches at the current position. ``FollowedBy``\n', ' always returns a null token list. If any results names are defined\n', ' in the lookahead expression, those *will* be returned for access by\n', ' name.\n', '\n', ' Example::\n', '\n', " # use FollowedBy to match a label only if it is followed by a ':'\n", ' data_word = Word(alphas)\n', " label = data_word + FollowedBy(':')\n", " attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))\n", '\n', ' OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()\n', '\n', ' prints::\n', '\n', " [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]\n", ' """\n', ' def __init__(self, expr):\n', ' super(FollowedBy, self).__init__(expr)\n', ' self.mayReturnEmpty = True\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' # by using self._expr.parse and deleting the contents of the returned ParseResults list\n', ' # we keep any named results that were defined in the FollowedBy expression\n', ' _, ret = self.expr._parse(instring, loc, doActions=doActions)\n', ' del ret[:]\n', '\n', ' return loc, ret\n', '\n', '\n', 'class PrecededBy(ParseElementEnhance):\n', ' """Lookbehind matching of the given parse expression.\n', ' ``PrecededBy`` does not advance the parsing position within the\n', ' input string, it only verifies that the specified parse expression\n', ' matches prior to the current position. ``PrecededBy`` always\n', ' returns a null token list, but if a results name is defined on the\n', ' given expression, it is returned.\n', '\n', ' Parameters:\n', '\n', ' - expr - expression that must match prior to the current parse\n', ' location\n', ' - retreat - (default= ``None``) - (int) maximum number of characters\n', ' to lookbehind prior to the current parse location\n', '\n', ' If the lookbehind expression is a string, Literal, Keyword, or\n', ' a Word or CharsNotIn with a specified exact or maximum length, then\n', ' the retreat parameter is not required. Otherwise, retreat must be\n', ' specified to give a maximum number of characters to look back from\n', ' the current parse position for a lookbehind match.\n', '\n', ' Example::\n', '\n', ' # VB-style variable names with type prefixes\n', ' int_var = PrecededBy("#") + pyparsing_common.identifier\n', ' str_var = PrecededBy("$") + pyparsing_common.identifier\n', '\n', ' """\n', ' def __init__(self, expr, retreat=None):\n', ' super(PrecededBy, self).__init__(expr)\n', ' self.expr = self.expr().leaveWhitespace()\n', ' self.mayReturnEmpty = True\n', ' self.mayIndexError = False\n', ' self.exact = False\n', ' if isinstance(expr, str):\n', ' retreat = len(expr)\n', ' self.exact = True\n', ' elif isinstance(expr, (Literal, Keyword)):\n', ' retreat = expr.matchLen\n', ' self.exact = True\n', ' elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:\n', ' retreat = expr.maxLen\n', ' self.exact = True\n', ' elif isinstance(expr, _PositionToken):\n', ' retreat = 0\n', ' self.exact = True\n', ' self.retreat = retreat\n', ' self.errmsg = "not preceded by " + str(expr)\n', ' self.skipWhitespace = False\n', ' self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))\n', '\n', ' def parseImpl(self, instring, loc=0, doActions=True):\n', ' if self.exact:\n', ' if loc < self.retreat:\n', ' raise ParseException(instring, loc, self.errmsg)\n', ' start = loc - self.retreat\n', ' _, ret = self.expr._parse(instring, start)\n', ' else:\n', ' # retreat specified a maximum lookbehind window, iterate\n', ' test_expr = self.expr + StringEnd()\n', ' instring_slice = instring[max(0, loc - self.retreat):loc]\n', ' last_expr = ParseException(instring, loc, self.errmsg)\n', ' for offset in range(1, min(loc, self.retreat + 1)+1):\n', ' try:\n', " # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))\n", ' _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset)\n', ' except ParseBaseException as pbe:\n', ' last_expr = pbe\n', ' else:\n', ' break\n', ' else:\n', ' raise last_expr\n', ' return loc, ret\n', '\n', '\n', 'class NotAny(ParseElementEnhance):\n', ' """Lookahead to disallow matching with the given parse expression.\n', ' ``NotAny`` does *not* advance the parsing position within the\n', ' input string, it only verifies that the specified parse expression\n', ' does *not* match at the current position. Also, ``NotAny`` does\n', ' *not* skip over leading whitespace. ``NotAny`` always returns\n', " a null token list. May be constructed using the '~' operator.\n", '\n', ' Example::\n', '\n', ' AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())\n', '\n', ' # take care not to mistake keywords for identifiers\n', ' ident = ~(AND | OR | NOT) + Word(alphas)\n', ' boolean_term = Optional(NOT) + ident\n', '\n', ' # very crude boolean expression - to support parenthesis groups and\n', ' # operation hierarchy, use infixNotation\n', ' boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)\n', '\n', ' # integers that are followed by "." are actually floats\n', ' integer = Word(nums) + ~Char(".")\n', ' """\n', ' def __init__(self, expr):\n', ' super(NotAny, self).__init__(expr)\n', ' # ~ self.leaveWhitespace()\n', " self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs\n", ' self.mayReturnEmpty = True\n', ' self.errmsg = "Found unwanted token, " + _ustr(self.expr)\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' if self.expr.canParseNext(instring, loc):\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', ' return loc, []\n', '\n', ' def __str__(self):\n', ' if hasattr(self, "name"):\n', ' return self.name\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "~{" + _ustr(self.expr) + "}"\n', '\n', ' return self.strRepr\n', '\n', 'class _MultipleMatch(ParseElementEnhance):\n', ' def __init__(self, expr, stopOn=None):\n', ' super(_MultipleMatch, self).__init__(expr)\n', ' self.saveAsList = True\n', ' ender = stopOn\n', ' if isinstance(ender, basestring):\n', ' ender = self._literalStringClass(ender)\n', ' self.stopOn(ender)\n', '\n', ' def stopOn(self, ender):\n', ' if isinstance(ender, basestring):\n', ' ender = self._literalStringClass(ender)\n', ' self.not_ender = ~ender if ender is not None else None\n', ' return self\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' self_expr_parse = self.expr._parse\n', ' self_skip_ignorables = self._skipIgnorables\n', ' check_ender = self.not_ender is not None\n', ' if check_ender:\n', ' try_not_ender = self.not_ender.tryParse\n', '\n', ' # must be at least one (but first see if we are the stopOn sentinel;\n', ' # if so, fail)\n', ' if check_ender:\n', ' try_not_ender(instring, loc)\n', ' loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False)\n', ' try:\n', ' hasIgnoreExprs = (not not self.ignoreExprs)\n', ' while 1:\n', ' if check_ender:\n', ' try_not_ender(instring, loc)\n', ' if hasIgnoreExprs:\n', ' preloc = self_skip_ignorables(instring, loc)\n', ' else:\n', ' preloc = loc\n', ' loc, tmptokens = self_expr_parse(instring, preloc, doActions)\n', ' if tmptokens or tmptokens.haskeys():\n', ' tokens += tmptokens\n', ' except (ParseException, IndexError):\n', ' pass\n', '\n', ' return loc, tokens\n', '\n', ' def _setResultsName(self, name, listAllMatches=False):\n', ' if __diag__.warn_ungrouped_named_tokens_in_collection:\n', " for e in [self.expr] + getattr(self.expr, 'exprs', []):\n", ' if isinstance(e, ParserElement) and e.resultsName:\n', ' warnings.warn("{0}: setting results name {1!r} on {2} expression "\n', ' "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",\n', ' name,\n', ' type(self).__name__,\n', ' e.resultsName),\n', ' stacklevel=3)\n', '\n', ' return super(_MultipleMatch, self)._setResultsName(name, listAllMatches)\n', '\n', '\n', 'class OneOrMore(_MultipleMatch):\n', ' """Repetition of one or more of the given expression.\n', '\n', ' Parameters:\n', ' - expr - expression that must match one or more times\n', ' - stopOn - (default= ``None``) - expression for a terminating sentinel\n', ' (only required if the sentinel would ordinarily match the repetition\n', ' expression)\n', '\n', ' Example::\n', '\n', ' data_word = Word(alphas)\n', " label = data_word + FollowedBy(':')\n", " attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))\n", '\n', ' text = "shape: SQUARE posn: upper left color: BLACK"\n', " OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]\n", '\n', ' # use stopOn attribute for OneOrMore to avoid reading label string as part of the data\n', " attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))\n", " OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]\n", '\n', ' # could also be written as\n', ' (attr_expr * (1,)).parseString(text).pprint()\n', ' """\n', '\n', ' def __str__(self):\n', ' if hasattr(self, "name"):\n', ' return self.name\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "{" + _ustr(self.expr) + "}..."\n', '\n', ' return self.strRepr\n', '\n', 'class ZeroOrMore(_MultipleMatch):\n', ' """Optional repetition of zero or more of the given expression.\n', '\n', ' Parameters:\n', ' - expr - expression that must match zero or more times\n', ' - stopOn - (default= ``None``) - expression for a terminating sentinel\n', ' (only required if the sentinel would ordinarily match the repetition\n', ' expression)\n', '\n', ' Example: similar to :class:`OneOrMore`\n', ' """\n', ' def __init__(self, expr, stopOn=None):\n', ' super(ZeroOrMore, self).__init__(expr, stopOn=stopOn)\n', ' self.mayReturnEmpty = True\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' try:\n', ' return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)\n', ' except (ParseException, IndexError):\n', ' return loc, []\n', '\n', ' def __str__(self):\n', ' if hasattr(self, "name"):\n', ' return self.name\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "[" + _ustr(self.expr) + "]..."\n', '\n', ' return self.strRepr\n', '\n', '\n', 'class _NullToken(object):\n', ' def __bool__(self):\n', ' return False\n', ' __nonzero__ = __bool__\n', ' def __str__(self):\n', ' return ""\n', '\n', 'class Optional(ParseElementEnhance):\n', ' """Optional matching of the given expression.\n', '\n', ' Parameters:\n', ' - expr - expression that must match zero or more times\n', ' - default (optional) - value to be returned if the optional expression is not found.\n', '\n', ' Example::\n', '\n', ' # US postal code can be a 5-digit zip, plus optional 4-digit qualifier\n', " zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))\n", " zip.runTests('''\n", ' # traditional ZIP code\n', ' 12345\n', '\n', ' # ZIP+4 form\n', ' 12101-0001\n', '\n', ' # invalid ZIP\n', ' 98765-\n', " ''')\n", '\n', ' prints::\n', '\n', ' # traditional ZIP code\n', ' 12345\n', " ['12345']\n", '\n', ' # ZIP+4 form\n', ' 12101-0001\n', " ['12101-0001']\n", '\n', ' # invalid ZIP\n', ' 98765-\n', ' ^\n', ' FAIL: Expected end of text (at char 5), (line:1, col:6)\n', ' """\n', ' __optionalNotMatched = _NullToken()\n', '\n', ' def __init__(self, expr, default=__optionalNotMatched):\n', ' super(Optional, self).__init__(expr, savelist=False)\n', ' self.saveAsList = self.expr.saveAsList\n', ' self.defaultValue = default\n', ' self.mayReturnEmpty = True\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' try:\n', ' loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)\n', ' except (ParseException, IndexError):\n', ' if self.defaultValue is not self.__optionalNotMatched:\n', ' if self.expr.resultsName:\n', ' tokens = ParseResults([self.defaultValue])\n', ' tokens[self.expr.resultsName] = self.defaultValue\n', ' else:\n', ' tokens = [self.defaultValue]\n', ' else:\n', ' tokens = []\n', ' return loc, tokens\n', '\n', ' def __str__(self):\n', ' if hasattr(self, "name"):\n', ' return self.name\n', '\n', ' if self.strRepr is None:\n', ' self.strRepr = "[" + _ustr(self.expr) + "]"\n', '\n', ' return self.strRepr\n', '\n', 'class SkipTo(ParseElementEnhance):\n', ' """Token for skipping over all undefined text until the matched\n', ' expression is found.\n', '\n', ' Parameters:\n', ' - expr - target expression marking the end of the data to be skipped\n', ' - include - (default= ``False``) if True, the target expression is also parsed\n', ' (the skipped text and target expression are returned as a 2-element list).\n', ' - ignore - (default= ``None``) used to define grammars (typically quoted strings and\n', ' comments) that might contain false matches to the target expression\n', ' - failOn - (default= ``None``) define expressions that are not allowed to be\n', ' included in the skipped test; if found before the target expression is found,\n', ' the SkipTo is not a match\n', '\n', ' Example::\n', '\n', " report = '''\n", ' Outstanding Issues Report - 1 Jan 2000\n', '\n', ' # | Severity | Description | Days Open\n', ' -----+----------+-------------------------------------------+-----------\n', ' 101 | Critical | Intermittent system crash | 6\n', " 94 | Cosmetic | Spelling error on Login ('log|n') | 14\n", ' 79 | Minor | System slow when running too many reports | 47\n', " '''\n", ' integer = Word(nums)\n', " SEP = Suppress('|')\n", ' # use SkipTo to simply match everything up until the next SEP\n', " # - ignore quoted strings, so that a '|' character inside a quoted string does not match\n", ' # - parse action will call token.strip() for each matched token, i.e., the description body\n', ' string_data = SkipTo(SEP, ignore=quotedString)\n', ' string_data.setParseAction(tokenMap(str.strip))\n', ' ticket_expr = (integer("issue_num") + SEP\n', ' + string_data("sev") + SEP\n', ' + string_data("desc") + SEP\n', ' + integer("days_open"))\n', '\n', ' for tkt in ticket_expr.searchString(report):\n', ' print tkt.dump()\n', '\n', ' prints::\n', '\n', " ['101', 'Critical', 'Intermittent system crash', '6']\n", ' - days_open: 6\n', ' - desc: Intermittent system crash\n', ' - issue_num: 101\n', ' - sev: Critical\n', ' [\'94\', \'Cosmetic\', "Spelling error on Login (\'log|n\')", \'14\']\n', ' - days_open: 14\n', " - desc: Spelling error on Login ('log|n')\n", ' - issue_num: 94\n', ' - sev: Cosmetic\n', " ['79', 'Minor', 'System slow when running too many reports', '47']\n", ' - days_open: 47\n', ' - desc: System slow when running too many reports\n', ' - issue_num: 79\n', ' - sev: Minor\n', ' """\n', ' def __init__(self, other, include=False, ignore=None, failOn=None):\n', ' super(SkipTo, self).__init__(other)\n', ' self.ignoreExpr = ignore\n', ' self.mayReturnEmpty = True\n', ' self.mayIndexError = False\n', ' self.includeMatch = include\n', ' self.saveAsList = False\n', ' if isinstance(failOn, basestring):\n', ' self.failOn = self._literalStringClass(failOn)\n', ' else:\n', ' self.failOn = failOn\n', ' self.errmsg = "No match found for " + _ustr(self.expr)\n', '\n', ' def parseImpl(self, instring, loc, doActions=True):\n', ' startloc = loc\n', ' instrlen = len(instring)\n', ' expr = self.expr\n', ' expr_parse = self.expr._parse\n', ' self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None\n', ' self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None\n', '\n', ' tmploc = loc\n', ' while tmploc <= instrlen:\n', ' if self_failOn_canParseNext is not None:\n', ' # break if failOn expression matches\n', ' if self_failOn_canParseNext(instring, tmploc):\n', ' break\n', '\n', ' if self_ignoreExpr_tryParse is not None:\n', ' # advance past ignore expressions\n', ' while 1:\n', ' try:\n', ' tmploc = self_ignoreExpr_tryParse(instring, tmploc)\n', ' except ParseBaseException:\n', ' break\n', '\n', ' try:\n', ' expr_parse(instring, tmploc, doActions=False, callPreParse=False)\n', ' except (ParseException, IndexError):\n', ' # no match, advance loc in string\n', ' tmploc += 1\n', ' else:\n', ' # matched skipto expr, done\n', ' break\n', '\n', ' else:\n', ' # ran off the end of the input string without matching skipto expr, fail\n', ' raise ParseException(instring, loc, self.errmsg, self)\n', '\n', ' # build up return values\n', ' loc = tmploc\n', ' skiptext = instring[startloc:loc]\n', ' skipresult = ParseResults(skiptext)\n', '\n', ' if self.includeMatch:\n', ' loc, mat = expr_parse(instring, loc, doActions, callPreParse=False)\n', ' skipresult += mat\n', '\n', ' return loc, skipresult\n', '\n', 'class Forward(ParseElementEnhance):\n', ' """Forward declaration of an expression to be defined later -\n', ' used for recursive grammars, such as algebraic infix notation.\n', ' When the expression is known, it is assigned to the ``Forward``\n', " variable using the '<<' operator.\n", '\n', ' Note: take care when assigning to ``Forward`` not to overlook\n', ' precedence of operators.\n', '\n', " Specifically, '|' has a lower precedence than '<<', so that::\n", '\n', ' fwdExpr << a | b | c\n', '\n', ' will actually be evaluated as::\n', '\n', ' (fwdExpr << a) | b | c\n', '\n', ' thereby leaving b and c out as parseable alternatives. It is recommended that you\n', ' explicitly group the values inserted into the ``Forward``::\n', '\n', ' fwdExpr << (a | b | c)\n', '\n', " Converting to use the '<<=' operator instead will avoid this problem.\n", '\n', ' See :class:`ParseResults.pprint` for an example of a recursive\n', ' parser created using ``Forward``.\n', ' """\n', ' def __init__(self, other=None):\n', ' super(Forward, self).__init__(other, savelist=False)\n', '\n', ' def __lshift__(self, other):\n', ' if isinstance(other, basestring):\n', ' other = self._literalStringClass(other)\n', ' self.expr = other\n', ' self.strRepr = None\n', ' self.mayIndexError = self.expr.mayIndexError\n', ' self.mayReturnEmpty = self.expr.mayReturnEmpty\n', ' self.setWhitespaceChars(self.expr.whiteChars)\n', ' self.skipWhitespace = self.expr.skipWhitespace\n', ' self.saveAsList = self.expr.saveAsList\n', ' self.ignoreExprs.extend(self.expr.ignoreExprs)\n', ' return self\n', '\n', ' def __ilshift__(self, other):\n', ' return self << other\n', '\n', ' def leaveWhitespace(self):\n', ' self.skipWhitespace = False\n', ' return self\n', '\n', ' def streamline(self):\n', ' if not self.streamlined:\n', ' self.streamlined = True\n', ' if self.expr is not None:\n', ' self.expr.streamline()\n', ' return self\n', '\n', ' def validate(self, validateTrace=None):\n', ' if validateTrace is None:\n', ' validateTrace = []\n', '\n', ' if self not in validateTrace:\n', ' tmp = validateTrace[:] + [self]\n', ' if self.expr is not None:\n', ' self.expr.validate(tmp)\n', ' self.checkRecursion([])\n', '\n', ' def __str__(self):\n', ' if hasattr(self, "name"):\n', ' return self.name\n', ' if self.strRepr is not None:\n', ' return self.strRepr\n', '\n', ' # Avoid infinite recursion by setting a temporary strRepr\n', ' self.strRepr = ": ..."\n', '\n', ' # Use the string representation of main expression.\n', " retString = '...'\n", ' try:\n', ' if self.expr is not None:\n', ' retString = _ustr(self.expr)[:1000]\n', ' else:\n', ' retString = "None"\n', ' finally:\n', ' self.strRepr = self.__class__.__name__ + ": " + retString\n', ' return self.strRepr\n', '\n', ' def copy(self):\n', ' if self.expr is not None:\n', ' return super(Forward, self).copy()\n', ' else:\n', ' ret = Forward()\n', ' ret <<= self\n', ' return ret\n', '\n', ' def _setResultsName(self, name, listAllMatches=False):\n', ' if __diag__.warn_name_set_on_empty_Forward:\n', ' if self.expr is None:\n', ' warnings.warn("{0}: setting results name {0!r} on {1} expression "\n', ' "that has no contained expression".format("warn_name_set_on_empty_Forward",\n', ' name,\n', ' type(self).__name__),\n', ' stacklevel=3)\n', '\n', ' return super(Forward, self)._setResultsName(name, listAllMatches)\n', '\n', 'class TokenConverter(ParseElementEnhance):\n', ' """\n', ' Abstract subclass of :class:`ParseExpression`, for converting parsed results.\n', ' """\n', ' def __init__(self, expr, savelist=False):\n', ' super(TokenConverter, self).__init__(expr) # , savelist)\n', ' self.saveAsList = False\n', '\n', 'class Combine(TokenConverter):\n', ' """Converter to concatenate all matching tokens to a single string.\n', ' By default, the matching patterns must also be contiguous in the\n', ' input string; this can be disabled by specifying\n', " ``'adjacent=False'`` in the constructor.\n", '\n', ' Example::\n', '\n', " real = Word(nums) + '.' + Word(nums)\n", " print(real.parseString('3.1416')) # -> ['3', '.', '1416']\n", ' # will also erroneously match the following\n', " print(real.parseString('3. 1416')) # -> ['3', '.', '1416']\n", '\n', " real = Combine(Word(nums) + '.' + Word(nums))\n", " print(real.parseString('3.1416')) # -> ['3.1416']\n", ' # no match when there are internal spaces\n', " print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)\n", ' """\n', ' def __init__(self, expr, joinString="", adjacent=True):\n', ' super(Combine, self).__init__(expr)\n', ' # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself\n', ' if adjacent:\n', ' self.leaveWhitespace()\n', ' self.adjacent = adjacent\n', ' self.skipWhitespace = True\n', ' self.joinString = joinString\n', ' self.callPreparse = True\n', '\n', ' def ignore(self, other):\n', ' if self.adjacent:\n', ' ParserElement.ignore(self, other)\n', ' else:\n', ' super(Combine, self).ignore(other)\n', ' return self\n', '\n', ' def postParse(self, instring, loc, tokenlist):\n', ' retToks = tokenlist.copy()\n', ' del retToks[:]\n', ' retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults)\n', '\n', ' if self.resultsName and retToks.haskeys():\n', ' return [retToks]\n', ' else:\n', ' return retToks\n', '\n', 'class Group(TokenConverter):\n', ' """Converter to return the matched tokens as a list - useful for\n', ' returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.\n', '\n', ' Example::\n', '\n', ' ident = Word(alphas)\n', ' num = Word(nums)\n', ' term = ident | num\n', ' func = ident + Optional(delimitedList(term))\n', ' print(func.parseString("fn a, b, 100")) # -> [\'fn\', \'a\', \'b\', \'100\']\n', '\n', ' func = ident + Group(Optional(delimitedList(term)))\n', ' print(func.parseString("fn a, b, 100")) # -> [\'fn\', [\'a\', \'b\', \'100\']]\n', ' """\n', ' def __init__(self, expr):\n', ' super(Group, self).__init__(expr)\n', ' self.saveAsList = True\n', '\n', ' def postParse(self, instring, loc, tokenlist):\n', ' return [tokenlist]\n', '\n', 'class Dict(TokenConverter):\n', ' """Converter to return a repetitive expression as a list, but also\n', ' as a dictionary. Each element can also be referenced using the first\n', ' token in the expression as its key. Useful for tabular report\n', ' scraping when the first column can be used as a item key.\n', '\n', ' Example::\n', '\n', ' data_word = Word(alphas)\n', " label = data_word + FollowedBy(':')\n", " attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))\n", '\n', ' text = "shape: SQUARE posn: upper left color: light blue texture: burlap"\n', " attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))\n", '\n', ' # print attributes as plain groups\n', ' print(OneOrMore(attr_expr).parseString(text).dump())\n', '\n', ' # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names\n', ' result = Dict(OneOrMore(Group(attr_expr))).parseString(text)\n', ' print(result.dump())\n', '\n', ' # access named fields as dict entries, or output as dict\n', " print(result['shape'])\n", ' print(result.asDict())\n', '\n', ' prints::\n', '\n', " ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']\n", " [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]\n", ' - color: light blue\n', ' - posn: upper left\n', ' - shape: SQUARE\n', ' - texture: burlap\n', ' SQUARE\n', " {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}\n", '\n', ' See more examples at :class:`ParseResults` of accessing fields by results name.\n', ' """\n', ' def __init__(self, expr):\n', ' super(Dict, self).__init__(expr)\n', ' self.saveAsList = True\n', '\n', ' def postParse(self, instring, loc, tokenlist):\n', ' for i, tok in enumerate(tokenlist):\n', ' if len(tok) == 0:\n', ' continue\n', ' ikey = tok[0]\n', ' if isinstance(ikey, int):\n', ' ikey = _ustr(tok[0]).strip()\n', ' if len(tok) == 1:\n', ' tokenlist[ikey] = _ParseResultsWithOffset("", i)\n', ' elif len(tok) == 2 and not isinstance(tok[1], ParseResults):\n', ' tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)\n', ' else:\n', ' dictvalue = tok.copy() # ParseResults(i)\n', ' del dictvalue[0]\n', ' if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()):\n', ' tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)\n', ' else:\n', ' tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)\n', '\n', ' if self.resultsName:\n', ' return [tokenlist]\n', ' else:\n', ' return tokenlist\n', '\n', '\n', 'class Suppress(TokenConverter):\n', ' """Converter for ignoring the results of a parsed expression.\n', '\n', ' Example::\n', '\n', ' source = "a, b, c,d"\n', ' wd = Word(alphas)\n', " wd_list1 = wd + ZeroOrMore(',' + wd)\n", ' print(wd_list1.parseString(source))\n', '\n', ' # often, delimiters that are useful during parsing are just in the\n', ' # way afterward - use Suppress to keep them out of the parsed output\n', " wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)\n", ' print(wd_list2.parseString(source))\n', '\n', ' prints::\n', '\n', " ['a', ',', 'b', ',', 'c', ',', 'd']\n", " ['a', 'b', 'c', 'd']\n", '\n', ' (See also :class:`delimitedList`.)\n', ' """\n', ' def postParse(self, instring, loc, tokenlist):\n', ' return []\n', '\n', ' def suppress(self):\n', ' return self\n', '\n', '\n', 'class OnlyOnce(object):\n', ' """Wrapper for parse actions, to ensure they are only called once.\n', ' """\n', ' def __init__(self, methodCall):\n', ' self.callable = _trim_arity(methodCall)\n', ' self.called = False\n', ' def __call__(self, s, l, t):\n', ' if not self.called:\n', ' results = self.callable(s, l, t)\n', ' self.called = True\n', ' return results\n', ' raise ParseException(s, l, "")\n', ' def reset(self):\n', ' self.called = False\n', '\n', 'def traceParseAction(f):\n', ' """Decorator for debugging parse actions.\n', '\n', ' When the parse action is called, this decorator will print\n', ' ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.\n', ' When the parse action completes, the decorator will print\n', ' ``"<<"`` followed by the returned value, or any exception that the parse action raised.\n', '\n', ' Example::\n', '\n', ' wd = Word(alphas)\n', '\n', ' @traceParseAction\n', ' def remove_duplicate_chars(tokens):\n', " return ''.join(sorted(set(''.join(tokens))))\n", '\n', ' wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)\n', ' print(wds.parseString("slkdjs sld sldd sdlf sdljf"))\n', '\n', ' prints::\n', '\n', " >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))\n", " <<leaving remove_duplicate_chars (ret: 'dfjkls')\n", " ['dfjkls']\n", ' """\n', ' f = _trim_arity(f)\n', ' def z(*paArgs):\n', ' thisFunc = f.__name__\n', ' s, l, t = paArgs[-3:]\n', ' if len(paArgs) > 3:\n', " thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc\n", ' sys.stderr.write(">>entering %s(line: \'%s\', %d, %r)\\n" % (thisFunc, line(l, s), l, t))\n', ' try:\n', ' ret = f(*paArgs)\n', ' except Exception as exc:\n', ' sys.stderr.write("<<leaving %s (exception: %s)\\n" % (thisFunc, exc))\n', ' raise\n', ' sys.stderr.write("<<leaving %s (ret: %r)\\n" % (thisFunc, ret))\n', ' return ret\n', ' try:\n', ' z.__name__ = f.__name__\n', ' except AttributeError:\n', ' pass\n', ' return z\n', '\n', '#\n', '# global helpers\n', '#\n', 'def delimitedList(expr, delim=",", combine=False):\n', ' """Helper to define a delimited list of expressions - the delimiter\n', " defaults to ','. By default, the list elements and delimiters can\n", ' have intervening whitespace, and comments, but this can be\n', ' overridden by passing ``combine=True`` in the constructor. If\n', ' ``combine`` is set to ``True``, the matching tokens are\n', ' returned as a single token string, with the delimiters included;\n', ' otherwise, the matching tokens are returned as a list of tokens,\n', ' with the delimiters suppressed.\n', '\n', ' Example::\n', '\n', ' delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> [\'aa\', \'bb\', \'cc\']\n', ' delimitedList(Word(hexnums), delim=\':\', combine=True).parseString("AA:BB:CC:DD:EE") # -> [\'AA:BB:CC:DD:EE\']\n', ' """\n', ' dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..."\n', ' if combine:\n', ' return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)\n', ' else:\n', ' return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)\n', '\n', 'def countedArray(expr, intExpr=None):\n', ' """Helper to define a counted list of expressions.\n', '\n', ' This helper defines a pattern of the form::\n', '\n', ' integer expr expr expr...\n', '\n', ' where the leading integer tells how many expr expressions follow.\n', ' The matched tokens returns the array of expr tokens as a list - the\n', ' leading count token is suppressed.\n', '\n', ' If ``intExpr`` is specified, it should be a pyparsing expression\n', ' that produces an integer value.\n', '\n', ' Example::\n', '\n', " countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']\n", '\n', ' # in this parser, the leading integer value is given in binary,\n', " # '10' indicating that 2 values are in the array\n", " binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))\n", " countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']\n", ' """\n', ' arrayExpr = Forward()\n', ' def countFieldParseAction(s, l, t):\n', ' n = t[0]\n', ' arrayExpr << (n and Group(And([expr] * n)) or Group(empty))\n', ' return []\n', ' if intExpr is None:\n', ' intExpr = Word(nums).setParseAction(lambda t: int(t[0]))\n', ' else:\n', ' intExpr = intExpr.copy()\n', ' intExpr.setName("arrayLen")\n', ' intExpr.addParseAction(countFieldParseAction, callDuringTry=True)\n', " return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...')\n", '\n', 'def _flatten(L):\n', ' ret = []\n', ' for i in L:\n', ' if isinstance(i, list):\n', ' ret.extend(_flatten(i))\n', ' else:\n', ' ret.append(i)\n', ' return ret\n', '\n', 'def matchPreviousLiteral(expr):\n', ' """Helper to define an expression that is indirectly defined from\n', ' the tokens matched in a previous expression, that is, it looks for\n', " a 'repeat' of a previous expression. For example::\n", '\n', ' first = Word(nums)\n', ' second = matchPreviousLiteral(first)\n', ' matchExpr = first + ":" + second\n', '\n', ' will match ``"1:1"``, but not ``"1:2"``. Because this\n', ' matches a previous literal, will also match the leading\n', ' ``"1:1"`` in ``"1:10"``. If this is not desired, use\n', ' :class:`matchPreviousExpr`. Do *not* use with packrat parsing\n', ' enabled.\n', ' """\n', ' rep = Forward()\n', ' def copyTokenToRepeater(s, l, t):\n', ' if t:\n', ' if len(t) == 1:\n', ' rep << t[0]\n', ' else:\n', ' # flatten t tokens\n', ' tflat = _flatten(t.asList())\n', ' rep << And(Literal(tt) for tt in tflat)\n', ' else:\n', ' rep << Empty()\n', ' expr.addParseAction(copyTokenToRepeater, callDuringTry=True)\n', " rep.setName('(prev) ' + _ustr(expr))\n", ' return rep\n', '\n', 'def matchPreviousExpr(expr):\n', ' """Helper to define an expression that is indirectly defined from\n', ' the tokens matched in a previous expression, that is, it looks for\n', " a 'repeat' of a previous expression. For example::\n", '\n', ' first = Word(nums)\n', ' second = matchPreviousExpr(first)\n', ' matchExpr = first + ":" + second\n', '\n', ' will match ``"1:1"``, but not ``"1:2"``. Because this\n', ' matches by expressions, will *not* match the leading ``"1:1"``\n', ' in ``"1:10"``; the expressions are evaluated first, and then\n', ' compared, so ``"1"`` is compared with ``"10"``. Do *not* use\n', ' with packrat parsing enabled.\n', ' """\n', ' rep = Forward()\n', ' e2 = expr.copy()\n', ' rep <<= e2\n', ' def copyTokenToRepeater(s, l, t):\n', ' matchTokens = _flatten(t.asList())\n', ' def mustMatchTheseTokens(s, l, t):\n', ' theseTokens = _flatten(t.asList())\n', ' if theseTokens != matchTokens:\n', " raise ParseException('', 0, '')\n", ' rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)\n', ' expr.addParseAction(copyTokenToRepeater, callDuringTry=True)\n', " rep.setName('(prev) ' + _ustr(expr))\n", ' return rep\n', '\n', 'def _escapeRegexRangeChars(s):\n', ' # ~ escape these chars: ^-[]\n', ' for c in r"\\^-[]":\n', ' s = s.replace(c, _bslash + c)\n', ' s = s.replace("\\n", r"\\n")\n', ' s = s.replace("\\t", r"\\t")\n', ' return _ustr(s)\n', '\n', 'def oneOf(strs, caseless=False, useRegex=True, asKeyword=False):\n', ' """Helper to quickly define a set of alternative Literals, and makes\n', ' sure to do longest-first testing when there is a conflict,\n', ' regardless of the input order, but returns\n', ' a :class:`MatchFirst` for best performance.\n', '\n', ' Parameters:\n', '\n', ' - strs - a string of space-delimited literals, or a collection of\n', ' string literals\n', ' - caseless - (default= ``False``) - treat all literals as\n', ' caseless\n', ' - useRegex - (default= ``True``) - as an optimization, will\n', ' generate a Regex object; otherwise, will generate\n', ' a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if\n', ' creating a :class:`Regex` raises an exception)\n', ' - asKeyword - (default=``False``) - enforce Keyword-style matching on the\n', ' generated expressions\n', '\n', ' Example::\n', '\n', ' comp_oper = oneOf("< = > <= >= !=")\n', ' var = Word(alphas)\n', ' number = Word(nums)\n', ' term = var | number\n', ' comparison_expr = term + comp_oper + term\n', ' print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))\n', '\n', ' prints::\n', '\n', " [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]\n", ' """\n', ' if isinstance(caseless, basestring):\n', ' warnings.warn("More than one string argument passed to oneOf, pass "\n', ' "choices as a list or space-delimited string", stacklevel=2)\n', '\n', ' if caseless:\n', ' isequal = (lambda a, b: a.upper() == b.upper())\n', ' masks = (lambda a, b: b.upper().startswith(a.upper()))\n', ' parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral\n', ' else:\n', ' isequal = (lambda a, b: a == b)\n', ' masks = (lambda a, b: b.startswith(a))\n', ' parseElementClass = Keyword if asKeyword else Literal\n', '\n', ' symbols = []\n', ' if isinstance(strs, basestring):\n', ' symbols = strs.split()\n', ' elif isinstance(strs, Iterable):\n', ' symbols = list(strs)\n', ' else:\n', ' warnings.warn("Invalid argument to oneOf, expected string or iterable",\n', ' SyntaxWarning, stacklevel=2)\n', ' if not symbols:\n', ' return NoMatch()\n', '\n', ' if not asKeyword:\n', ' # if not producing keywords, need to reorder to take care to avoid masking\n', ' # longer choices with shorter ones\n', ' i = 0\n', ' while i < len(symbols) - 1:\n', ' cur = symbols[i]\n', ' for j, other in enumerate(symbols[i + 1:]):\n', ' if isequal(other, cur):\n', ' del symbols[i + j + 1]\n', ' break\n', ' elif masks(cur, other):\n', ' del symbols[i + j + 1]\n', ' symbols.insert(i, other)\n', ' break\n', ' else:\n', ' i += 1\n', '\n', ' if not (caseless or asKeyword) and useRegex:\n', ' # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols]))\n', ' try:\n', ' if len(symbols) == len("".join(symbols)):\n', ' return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(\' | \'.join(symbols))\n', ' else:\n', ' return Regex("|".join(re.escape(sym) for sym in symbols)).setName(\' | \'.join(symbols))\n', ' except Exception:\n', ' warnings.warn("Exception creating Regex for oneOf, building MatchFirst",\n', ' SyntaxWarning, stacklevel=2)\n', '\n', ' # last resort, just use MatchFirst\n', " return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))\n", '\n', 'def dictOf(key, value):\n', ' """Helper to easily and clearly define a dictionary by specifying\n', ' the respective patterns for the key and value. Takes care of\n', ' defining the :class:`Dict`, :class:`ZeroOrMore`, and\n', ' :class:`Group` tokens in the proper order. The key pattern\n', ' can include delimiting markers or punctuation, as long as they are\n', ' suppressed, thereby leaving the significant key text. The value\n', ' pattern can include named results, so that the :class:`Dict` results\n', ' can include named token fields.\n', '\n', ' Example::\n', '\n', ' text = "shape: SQUARE posn: upper left color: light blue texture: burlap"\n', " attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))\n", ' print(OneOrMore(attr_expr).parseString(text).dump())\n', '\n', ' attr_label = label\n', " attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)\n", '\n', ' # similar to Dict, but simpler call format\n', ' result = dictOf(attr_label, attr_value).parseString(text)\n', ' print(result.dump())\n', " print(result['shape'])\n", ' print(result.shape) # object attribute access works too\n', ' print(result.asDict())\n', '\n', ' prints::\n', '\n', " [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]\n", ' - color: light blue\n', ' - posn: upper left\n', ' - shape: SQUARE\n', ' - texture: burlap\n', ' SQUARE\n', ' SQUARE\n', " {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}\n", ' """\n', ' return Dict(OneOrMore(Group(key + value)))\n', '\n', 'def originalTextFor(expr, asString=True):\n', ' """Helper to return the original, untokenized text for a given\n', ' expression. Useful to restore the parsed fields of an HTML start\n', ' tag into the raw tag text itself, or to revert separate tokens with\n', ' intervening whitespace back to the original matching input text. By\n', ' default, returns astring containing the original parsed text.\n', '\n', ' If the optional ``asString`` argument is passed as\n', ' ``False``, then the return value is\n', ' a :class:`ParseResults` containing any results names that\n', ' were originally matched, and a single token containing the original\n', ' matched text from the input string. So if the expression passed to\n', ' :class:`originalTextFor` contains expressions with defined\n', ' results names, you must set ``asString`` to ``False`` if you\n', ' want to preserve those results name values.\n', '\n', ' Example::\n', '\n', ' src = "this is test <b> bold <i>text</i> </b> normal text "\n', ' for tag in ("b", "i"):\n', ' opener, closer = makeHTMLTags(tag)\n', ' patt = originalTextFor(opener + SkipTo(closer) + closer)\n', ' print(patt.searchString(src)[0])\n', '\n', ' prints::\n', '\n', " ['<b> bold <i>text</i> </b>']\n", " ['<i>text</i>']\n", ' """\n', ' locMarker = Empty().setParseAction(lambda s, loc, t: loc)\n', ' endlocMarker = locMarker.copy()\n', ' endlocMarker.callPreparse = False\n', ' matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")\n', ' if asString:\n', ' extractText = lambda s, l, t: s[t._original_start: t._original_end]\n', ' else:\n', ' def extractText(s, l, t):\n', " t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]\n", ' matchExpr.setParseAction(extractText)\n', ' matchExpr.ignoreExprs = expr.ignoreExprs\n', ' return matchExpr\n', '\n', 'def ungroup(expr):\n', ' """Helper to undo pyparsing\'s default grouping of And expressions,\n', ' even if all but one are non-empty.\n', ' """\n', ' return TokenConverter(expr).addParseAction(lambda t: t[0])\n', '\n', 'def locatedExpr(expr):\n', ' """Helper to decorate a returned token with its starting and ending\n', ' locations in the input string.\n', '\n', ' This helper adds the following results names:\n', '\n', ' - locn_start = location where matched expression begins\n', ' - locn_end = location where matched expression ends\n', ' - value = the actual parsed results\n', '\n', ' Be careful if the input text contains ``<TAB>`` characters, you\n', ' may want to call :class:`ParserElement.parseWithTabs`\n', '\n', ' Example::\n', '\n', ' wd = Word(alphas)\n', ' for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):\n', ' print(match)\n', '\n', ' prints::\n', '\n', " [[0, 'ljsdf', 5]]\n", " [[8, 'lksdjjf', 15]]\n", " [[18, 'lkkjj', 23]]\n", ' """\n', ' locator = Empty().setParseAction(lambda s, l, t: l)\n', ' return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))\n', '\n', '\n', '# convenience constants for positional expressions\n', 'empty = Empty().setName("empty")\n', 'lineStart = LineStart().setName("lineStart")\n', 'lineEnd = LineEnd().setName("lineEnd")\n', 'stringStart = StringStart().setName("stringStart")\n', 'stringEnd = StringEnd().setName("stringEnd")\n', '\n', '_escapedPunc = Word(_bslash, r"\\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1])\n', '_escapedHexChar = Regex(r"\\\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r\'\\0x\'), 16)))\n', '_escapedOctChar = Regex(r"\\\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8)))\n', "_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\\]', exact=1)\n", '_charRange = Group(_singleChar + Suppress("-") + _singleChar)\n', '_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]"\n', '\n', 'def srange(s):\n', ' r"""Helper to easily define string ranges for use in Word\n', " construction. Borrows syntax from regexp '[]' string range\n", ' definitions::\n', '\n', ' srange("[0-9]") -> "0123456789"\n', ' srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"\n', ' srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"\n', '\n', " The input string must be enclosed in []'s, and the returned string\n", ' is the expanded character set joined into a single string. The\n', " values enclosed in the []'s may be:\n", '\n', ' - a single character\n', ' - an escaped character with a leading backslash (such as ``\\-``\n', ' or ``\\]``)\n', " - an escaped hex character with a leading ``'\\x'``\n", " (``\\x21``, which is a ``'!'`` character) (``\\0x##``\n", ' is also supported for backwards compatibility)\n', " - an escaped octal character with a leading ``'\\0'``\n", " (``\\041``, which is a ``'!'`` character)\n", " - a range of any of the above, separated by a dash (``'a-z'``,\n", ' etc.)\n', " - any combination of the above (``'aeiouy'``,\n", " ``'a-zA-Z0-9_