💾 Archived View for tris.fyi › pydoc › linecache captured on 2022-01-08 at 13:41:08. Gemini links have been rewritten to link to archived content
-=-=-=-=-=-=-
Cache lines from Python source files. This is intended to read lines from modules imported -- hence if a filename is not found, it will look down the module search path for a file by that name.
checkcache(filename=None) Discard cache entries that are out of date. (This is not checked upon each call!)
clearcache() Clear the cache entirely.
getline(filename, lineno, module_globals=None) Get a line for a Python source file from the cache. Update the cache if it doesn't contain an entry for this file already.
getlines(filename, module_globals=None) Get the lines for a Python source file from the cache. Update the cache if it doesn't contain an entry for this file already.
lazycache(filename, module_globals) Seed the cache for filename with module_globals. The module loader will be asked for the source only when getlines is called, not immediately. If there is an entry in the cache already, it is not altered. :return: True if a lazy load is registered in the cache, otherwise False. To register such a load a module loader with a get_source method must be found, the filename must be a cachable filename, and the filename must not be already cached.
updatecache(filename, module_globals=None) Update a cache entry and return its list of lines. If something's wrong, print a message, discard the cache entry, and return an empty list.
cache = {'/nix/store/w0pyxlpxd2x7hp5nd3a90kaf7ywsprld-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/server.py': (2412, 1.0, ['#!/usr/bin/env python3\n', '\n', 'import asyncio\n', 'import logging\n', 'import signal\n', 'import traceback\n', 'from typing import TYPE_CHECKING\n', '\n', 'from .response import Response, Status\n', 'from .tls import make_sni_context\n', '\n', 'if TYPE_CHECKING:\n', ' from .config import Config\n', '\n', '\n', 'class Server():\n', ' def __init__(\n', ' self,\n', ' config: "Config",\n', ' ):\n', ' self.log = logging.getLogger("amethyst.server")\n', ' self.access_log = logging.getLogger("amethyst.access")\n', '\n', ' self.server = None\n', ' self.config = config\n', '\n', ' self.ssl_context = make_sni_context(config)\n', ' self.server = self.get_server()\n', '\n', ' def get_server(self):\n', ' loop = asyncio.get_event_loop()\n', '\n', ' return asyncio.start_server(\n', ' self.handle_connection, port=self.config.port,\n', ' ssl=self.ssl_context, loop=loop,\n', ' )\n', '\n', ' async def handle_connection(self, reader, writer):\n', ' from .request import Connection\n', '\n', ' peer_addr = writer.get_extra_info("peername")\n', ' peer_cert = writer.get_extra_info("peercert")\n', '\n', ' self.log.debug(f"Received connection from {peer_addr}")\n', '\n', ' url = "-"\n', ' try:\n', ' url = (await reader.readuntil(b"\\r\\n")).rstrip(b"\\r\\n").decode()\n', '\n', ' if len(url) > 1024:\n', ' response = Response(Status.BAD_REQUEST, "URL too long!")\n', ' else:\n', ' response = await self.config.handler(\n', ' url, Connection(self, peer_addr, peer_cert)\n', ' )\n', '\n', ' except UnicodeDecodeError:\n', ' response = Response(Status.BAD_REQUEST, "URL must be UTF-8")\n', '\n', ' except Exception:\n', ' self.log.error(f"While generating response; {traceback.format_exc()}")\n', '\n', ' response = Response(\n', ' Status.TEMPORARY_FAILURE,\n', ' "Exception thrown during request processing; see server logs for details."\n', ' )\n', '\n', ' self.access_log.info(\n', ' f"{url} {response.status_code.value}[{response.status_code.name}]"\n', ' f" {response.meta}"\n', ' )\n', '\n', ' try:\n', ' line = f"{response.status_code.value} {response.meta}\\r\\n".encode()\n', ' writer.write(line)\n', '\n', ' if response.status_code.is_success() and response.content is not None:\n', ' writer.write(response.content)\n', '\n', ' except Exception:\n', ' self.log.error(f"While writing response; {traceback.format_exc()}")\n', '\n', ' finally:\n', ' writer.close()\n'], '/nix/store/w0pyxlpxd2x7hp5nd3a90kaf7ywsprld-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/server.py'), '/nix/store/w0pyxlpxd2x7hp5nd3a90kaf7ywsprld-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/handler.py': (2535, 1.0, ['from .resource import Resource\n', 'from .response import Status, Response\n', 'from .request import Connection, Context\n', 'from .util import get_path_components\n', 'from urllib.parse import urlparse\n', 'from typing import Dict, Callable, Awaitable\n', '\n', 'import logging\n', 'import re\n', '\n', 'Handler = Callable[[str, Connection], Awaitable[Response]]\n', 'PORT_RE = re.compile(r":([0-9]{1,5})$")\n', '\n', '\n', 'class GenericHandler():\n', ' def __init__(self, url_map: Dict[str, Dict[str, Resource]]):\n', ' self.url_map = url_map\n', ' self.log = logging.getLogger("amethyst.handler.GenericHandler")\n', '\n', ' async def __call__(self, url: str, conn: Connection) -> Response:\n', ' result = urlparse(url)\n', '\n', ' if not result.scheme:\n', ' return Response(\n', ' Status.BAD_REQUEST,\n', ' f"Requested URL must have a scheme."\n', ' )\n', '\n', ' if result.scheme != "gemini":\n', ' # This is exclusively a Gemini server.\n', ' return Response(\n', ' Status.PROXY_REQUEST_REFUSED,\n', ' f"This server does not proxy non-Gemini URLs."\n', ' )\n', '\n', ' host = result.netloc\n', '\n', ' if (port_match := PORT_RE.search(host)):\n', ' if int(port_match.group(1)) != conn.server.config.port:\n', ' return Response(\n', ' Status.PROXY_REQUEST_REFUSED,\n', ' f"{host} is not served here."\n', ' )\n', '\n', ' host = PORT_RE.sub("", host) \n', '\n', ' if host not in self.url_map:\n', ' self.log.warn(f"Received request for host {host} not in URL map")\n', '\n', ' return Response(\n', ' Status.PROXY_REQUEST_REFUSED,\n', ' f"{host} is not served here.",\n', ' )\n', '\n', ' req_path = result.path\n', ' try:\n', ' req_path = get_path_components(req_path)\n', ' except ValueError:\n', ' return Response(Status.BAD_REQUEST, "Invalid URL")\n', '\n', ' paths = [\n', ' (get_path_components(i), v) for i, v in self.url_map[host].items()\n', ' ]\n', '\n', ' for path, resource in sorted(paths, key=lambda k: len(k[0]), reverse=True):\n', ' if len(req_path) < len(path) or req_path[:len(path)] != path:\n', ' continue\n', '\n', ' truncated_path = "/".join(req_path[len(path):])\n', ' if result.path.endswith("/"):\n', ' truncated_path += "/"\n', '\n', ' return await resource(Context(\n', ' result.netloc, result.path, truncated_path,\n', ' result.query, conn\n', ' ))\n', '\n', ' return Response(\n', ' Status.NOT_FOUND, f"{req_path} was not found on this server."\n', ' )\n'], '/nix/store/w0pyxlpxd2x7hp5nd3a90kaf7ywsprld-amethyst-0.0.1/lib/python3.9/site-packages/amethyst/handler.py'), '/nix/store/ljdrdryiv5dqlq0pl5snlgil6nrjlzmy-python3.9-amethyst_extensions-0.0.1/lib/python3.9/site-packages/amethyst_ext/pydoc.py': (6841, 1.0, ['from amethyst.response import Response, Status\n', '\n', 'import importlib\n', 'import inspect\n', 'import pkgutil\n', 'import re\n', 'import sys\n', 'import textwrap\n', '\n', 'SITE_PACKAGES_RE = re.compile(r"lib/python[^/]+/site-packages")\n', 'PYTHON3_RE = re.compile(r"python3[^-]*")\n', '\n', '\n', 'class PydocResource():\n', ' @staticmethod\n', ' def classify(thing):\n', ' if inspect.ismodule(thing):\n', ' return "module"\n', ' elif inspect.isclass(thing):\n', ' return "class"\n', ' elif (inspect.isfunction(thing) or inspect.ismethod(thing) or\n', ' inspect.ismethoddescriptor(thing) or inspect.isroutine(thing)):\n', ' return "function"\n', ' else:\n', ' return "other"\n', '\n', ' def doc_class(self, cls, name=None):\n', ' lines = []\n', '\n', ' if name is None:\n', ' name = cls.__name__\n', ' else:\n', ' name = f"{name}.{cls.__name__}"\n', '\n', ' lines.append(f"### {name}")\n', ' if (clsdoc := getattr(cls, "__doc__")):\n', ' lines.append(f"```\\n{clsdoc}\\n```\\n")\n', '\n', ' members = {}\n', ' members = {"class": [], "function": [], "other": []}\n', '\n', ' for name, member in inspect.getmembers(cls):\n', ' if name.startswith("_"):\n', ' continue\n', '\n', ' if (classification := self.classify(member)) in {"class", "function", "other"}:\n', ' members[classification].append((name, member))\n', '\n', ' members["class"].sort()\n', ' for _, scls in members["class"]:\n', ' lines.append(self.doc_class(scls, name))\n', '\n', ' members["function"].sort()\n', ' for name, func in members["function"]:\n', ' lines.append(self.doc_func(func))\n', '\n', ' members["other"].sort()\n', ' for name, other in members["other"]:\n', ' lines.append(self.doc_other(name, other))\n', '\n', ' return "\\n".join(lines)\n', '\n', ' def doc_func(self, func):\n', ' lines = []\n', '\n', ' lines.append("```")\n', ' try:\n', ' lines.append(f"{func.__name__}{inspect.signature(func)}")\n', ' except ValueError:\n', ' lines.append(f"{func.__name__}(...)")\n', '\n', ' if (funcdoc := getattr(func, "__doc__")):\n', ' lines.append(f"\\n{textwrap.indent(funcdoc, \' \')}\\n```\\n")\n', ' else:\n', ' lines.append("```\\n")\n', '\n', ' return "\\n".join(lines)\n', '\n', ' def doc_other(self, name, other):\n', ' doc = getattr(other, "__doc__", "")\n', ' if doc and doc != type(other).__doc__:\n', ' doc = textwrap.indent(doc, " ")\n', ' doc += "\\n```\\n"\n', ' else:\n', ' doc = "```"\n', '\n', ' return f"```\\n{name} = {other!r}\\n{doc}"\n', '\n', ' def doc_mod(self, modname):\n', ' lines = []\n', '\n', ' try:\n', ' module = importlib.import_module(modname)\n', ' except ImportError:\n', ' return None\n', '\n', ' ispkg = (getattr(module, "__package__", "") == modname)\n', '\n', ' lines.append("=> _ Back to module index")\n', ' lines.append("=> _/search Go to module by name")\n', ' if "." in modname:\n', ' components = modname.split(".")\n', ' for i in range(len(components) - 1, 0, -1):\n', ' lines.append("=> " + ".".join(components[:i]))\n', '\n', ' if ispkg:\n', ' lines.append(f"# {modname} (package)")\n', ' else:\n', ' lines.append(f"# {modname}")\n', '\n', ' if (moddoc := getattr(module, "__doc__")):\n', ' lines.append(f"```\\n{moddoc}\\n```")\n', ' else:\n', ' lines.append("This module has no docstring.")\n', '\n', ' members = {"module": [], "class": [], "function": [], "other": []}\n', ' for name, member in inspect.getmembers(module):\n', ' if name.startswith("_"):\n', ' continue\n', '\n', ' members[self.classify(member)].append((name, member))\n', '\n', ' if members["class"]:\n', ' members["class"].sort()\n', ' lines.append("## Classes")\n', ' for name, cls in members["class"]:\n', ' lines.append(self.doc_class(cls))\n', '\n', ' if members["function"]:\n', ' members["function"].sort()\n', ' lines.append("## Functions")\n', ' for name, func in members["function"]:\n', ' lines.append(f"### {name}")\n', ' lines.append(self.doc_func(func))\n', '\n', ' if members["other"]:\n', ' lines.append("## Other members")\n', ' members["other"].sort()\n', ' for name, other in members["other"]:\n', ' lines.append(self.doc_other(name, other))\n', '\n', ' if members["module"]:\n', ' members["module"].sort()\n', ' lines.append("## Modules")\n', ' for name, mod in members["module"]:\n', ' lines.append(f"=> {mod.__name__} {name}")\n', '\n', ' return "\\n".join(lines)\n', '\n', ' def index(self):\n', ' lines = []\n', '\n', ' lines.append("=> _/search Go to module by name")\n', '\n', ' lines.append("# Built-in modules")\n', ' names = [name for name in sys.builtin_module_names if name != "__main__"]\n', ' for name in sorted(names):\n', ' lines.append(f"=> {name}")\n', '\n', ' lines.append("# Python modules")\n', ' for dirname in sorted(sys.path):\n', ' display = dirname\n', ' if display.startswith("/nix/store/"):\n', ' display = f"(nix)/{display[44:]}"\n', '\n', ' display = SITE_PACKAGES_RE.sub("l/p/s-p", display)\n', ' display = PYTHON3_RE.sub("p3", display)\n', '\n', ' modpkgs = []\n', ' for importer, name, ispkg in pkgutil.iter_modules([dirname]):\n', ' if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):\n', ' # Ignore modules that contain surrogate characters\n', ' # (pydoc does this)\n', ' continue\n', '\n', ' if name == "setup":\n', ' # never import "setup.py"\n', ' continue\n', '\n', ' modpkgs.append((name, ispkg))\n', '\n', ' if modpkgs:\n', ' lines.append(f"## {display}")\n', ' for name, ispkg in sorted(modpkgs):\n', ' if ispkg:\n', ' lines.append(f"=> {name} {name} (package)")\n', ' else:\n', ' lines.append(f"=> {name}")\n', '\n', ' return "\\n".join(lines)\n', '\n', '\n', ' async def __call__(self, ctx):\n', ' path = ctx.path\n', ' if not path:\n', ' return Response(Status.REDIRECT_PERMANENT, ctx.orig_path + "/")\n', '\n', ' path = path.strip("/")\n', ' if not path or path == "_":\n', ' text = self.index()\n', '\n', ' elif path == "_/search":\n', ' if ctx.query:\n', ' try:\n', ' importlib.import_module(ctx.query)\n', ' return Response(Status.REDIRECT_TEMPORARY, "../" + ctx.query)\n', ' except ImportError:\n', ' return Response(Status.INPUT, f"Sorry, I don\'t know about {ctx.query}. Module name?")\n', '\n', ' return Response(Status.INPUT, "Module name?")\n', ' else:\n', ' text = self.doc_mod(path)\n', '\n', ' if text is not None:\n', ' return Response(\n', ' Status.SUCCESS, "text/gemini", text.encode()\n', ' )\n', '\n', ' return Response(Status.NOT_FOUND, "text/gemini")\n'], '/nix/store/ljdrdryiv5dqlq0pl5snlgil6nrjlzmy-python3.9-amethyst_extensions-0.0.1/lib/python3.9/site-packages/amethyst_ext/pydoc.py'), '/nix/store/wl02plhc6zf84m6x9984l42wnnnbly5m-python3-3.9.6/lib/python3.9/textwrap.py': (19407, 1.0, ['"""Text wrapping and filling.\n', '"""\n', '\n', '# Copyright (C) 1999-2001 Gregory P. Ward.\n', '# Copyright (C) 2002, 2003 Python Software Foundation.\n', '# Written by Greg Ward <gward@python.net>\n', '\n', 'import re\n', '\n', "__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent', 'indent', 'shorten']\n", '\n', '# Hardcode the recognized whitespace characters to the US-ASCII\n', '# whitespace characters. The main reason for doing this is that\n', '# some Unicode spaces (like \\u00a0) are non-breaking whitespaces.\n', "_whitespace = '\\t\\n\\x0b\\x0c\\r '\n", '\n', 'class TextWrapper:\n', ' """\n', ' Object for wrapping/filling text. The public interface consists of\n', ' the wrap() and fill() methods; the other methods are just there for\n', ' subclasses to override in order to tweak the default behaviour.\n', ' If you want to completely replace the main wrapping algorithm,\n', " you'll probably have to override _wrap_chunks().\n", '\n', ' Several instance attributes control various aspects of wrapping:\n', ' width (default: 70)\n', ' the maximum width of wrapped lines (unless break_long_words\n', ' is false)\n', ' initial_indent (default: "")\n', ' string that will be prepended to the first line of wrapped\n', " output. Counts towards the line's width.\n", ' subsequent_indent (default: "")\n', ' string that will be prepended to all lines save the first\n', " of wrapped output; also counts towards each line's width.\n", ' expand_tabs (default: true)\n', ' Expand tabs in input text to spaces before further processing.\n', " Each tab will become 0 .. 'tabsize' spaces, depending on its position\n", ' in its line. If false, each tab is treated as a single character.\n', ' tabsize (default: 8)\n', " Expand tabs in input text to 0 .. 'tabsize' spaces, unless\n", " 'expand_tabs' is false.\n", ' replace_whitespace (default: true)\n', ' Replace all whitespace characters in the input text by spaces\n', ' after tab expansion. Note that if expand_tabs is false and\n', ' replace_whitespace is true, every tab will be converted to a\n', ' single space!\n', ' fix_sentence_endings (default: false)\n', ' Ensure that sentence-ending punctuation is always followed\n', ' by two spaces. Off by default because the algorithm is\n', ' (unavoidably) imperfect.\n', ' break_long_words (default: true)\n', " Break words longer than 'width'. If false, those words will not\n", " be broken, and some lines might be longer than 'width'.\n", ' break_on_hyphens (default: true)\n', ' Allow breaking hyphenated words. If true, wrapping will occur\n', ' preferably on whitespaces and right after hyphens part of\n', ' compound words.\n', ' drop_whitespace (default: true)\n', ' Drop leading and trailing whitespace from lines.\n', ' max_lines (default: None)\n', ' Truncate wrapped lines.\n', " placeholder (default: ' [...]')\n", ' Append to the last line of truncated text.\n', ' """\n', '\n', ' unicode_whitespace_trans = {}\n', " uspace = ord(' ')\n", ' for x in _whitespace:\n', ' unicode_whitespace_trans[ord(x)] = uspace\n', '\n', ' # This funky little regex is just the trick for splitting\n', ' # text up into word-wrappable chunks. E.g.\n', ' # "Hello there -- you goof-ball, use the -b option!"\n', ' # splits into\n', ' # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!\n', ' # (after stripping out empty strings).\n', ' word_punct = r\'[\\w!"\\\'&.,?]\'\n', " letter = r'[^\\d\\W]'\n", " whitespace = r'[%s]' % re.escape(_whitespace)\n", " nowhitespace = '[^' + whitespace[1:]\n", " wordsep_re = re.compile(r'''\n", ' ( # any whitespace\n', ' %(ws)s+\n', ' | # em-dash between words\n', ' (?<=%(wp)s) -{2,} (?=\\w)\n', ' | # word, possibly hyphenated\n', ' %(nws)s+? (?:\n', ' # hyphenated word\n', ' -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-))\n', ' (?= %(lt)s -? %(lt)s)\n', ' | # end of word\n', ' (?=%(ws)s|\\Z)\n', ' | # em-dash\n', ' (?<=%(wp)s) (?=-{2,}\\w)\n', ' )\n', " )''' % {'wp': word_punct, 'lt': letter,\n", " 'ws': whitespace, 'nws': nowhitespace},\n", ' re.VERBOSE)\n', ' del word_punct, letter, nowhitespace\n', '\n', ' # This less funky little regex just split on recognized spaces. E.g.\n', ' # "Hello there -- you goof-ball, use the -b option!"\n', ' # splits into\n', ' # Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/\n', " wordsep_simple_re = re.compile(r'(%s+)' % whitespace)\n", ' del whitespace\n', '\n', ' # XXX this is not locale- or charset-aware -- string.lowercase\n', ' # is US-ASCII only (and therefore English-only)\n', " sentence_end_re = re.compile(r'[a-z]' # lowercase letter\n", " r'[\\.\\!\\?]' # sentence-ending punct.\n", ' r\'[\\"\\\']?\' # optional end-of-quote\n', " r'\\Z') # end of chunk\n", '\n', ' def __init__(self,\n', ' width=70,\n', ' initial_indent="",\n', ' subsequent_indent="",\n', ' expand_tabs=True,\n', ' replace_whitespace=True,\n', ' fix_sentence_endings=False,\n', ' break_long_words=True,\n', ' drop_whitespace=True,\n', ' break_on_hyphens=True,\n', ' tabsize=8,\n', ' *,\n', ' max_lines=None,\n', " placeholder=' [...]'):\n", ' self.width = width\n', ' self.initial_indent = initial_indent\n', ' self.subsequent_indent = subsequent_indent\n', ' self.expand_tabs = expand_tabs\n', ' self.replace_whitespace = replace_whitespace\n', ' self.fix_sentence_endings = fix_sentence_endings\n', ' self.break_long_words = break_long_words\n', ' self.drop_whitespace = drop_whitespace\n', ' self.break_on_hyphens = break_on_hyphens\n', ' self.tabsize = tabsize\n', ' self.max_lines = max_lines\n', ' self.placeholder = placeholder\n', '\n', '\n', ' # -- Private methods -----------------------------------------------\n', ' # (possibly useful for subclasses to override)\n', '\n', ' def _munge_whitespace(self, text):\n', ' """_munge_whitespace(text : string) -> string\n', '\n', ' Munge whitespace in text: expand tabs and convert all other\n', ' whitespace characters to spaces. Eg. " foo\\\\tbar\\\\n\\\\nbaz"\n', ' becomes " foo bar baz".\n', ' """\n', ' if self.expand_tabs:\n', ' text = text.expandtabs(self.tabsize)\n', ' if self.replace_whitespace:\n', ' text = text.translate(self.unicode_whitespace_trans)\n', ' return text\n', '\n', '\n', ' def _split(self, text):\n', ' """_split(text : string) -> [string]\n', '\n', ' Split the text to wrap into indivisible chunks. Chunks are\n', ' not quite the same as words; see _wrap_chunks() for full\n', ' details. As an example, the text\n', ' Look, goof-ball -- use the -b option!\n', ' breaks into the following chunks:\n', " 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',\n", " 'use', ' ', 'the', ' ', '-b', ' ', 'option!'\n", ' if break_on_hyphens is True, or in:\n', " 'Look,', ' ', 'goof-ball', ' ', '--', ' ',\n", " 'use', ' ', 'the', ' ', '-b', ' ', option!'\n", ' otherwise.\n', ' """\n', ' if self.break_on_hyphens is True:\n', ' chunks = self.wordsep_re.split(text)\n', ' else:\n', ' chunks = self.wordsep_simple_re.split(text)\n', ' chunks = [c for c in chunks if c]\n', ' return chunks\n', '\n', ' def _fix_sentence_endings(self, chunks):\n', ' """_fix_sentence_endings(chunks : [string])\n', '\n', " Correct for sentence endings buried in 'chunks'. Eg. when the\n", ' original text contains "... foo.\\\\nBar ...", munge_whitespace()\n', ' and split() will convert that to [..., "foo.", " ", "Bar", ...]\n', ' which has one too few spaces; this method simply changes the one\n', ' space to two.\n', ' """\n', ' i = 0\n', ' patsearch = self.sentence_end_re.search\n', ' while i < len(chunks)-1:\n', ' if chunks[i+1] == " " and patsearch(chunks[i]):\n', ' chunks[i+1] = " "\n', ' i += 2\n', ' else:\n', ' i += 1\n', '\n', ' def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):\n', ' """_handle_long_word(chunks : [string],\n', ' cur_line : [string],\n', ' cur_len : int, width : int)\n', '\n', ' Handle a chunk of text (most likely a word, not whitespace) that\n', ' is too long to fit in any line.\n', ' """\n', ' # Figure out when indent is larger than the specified width, and make\n', ' # sure at least one character is stripped off on every pass\n', ' if width < 1:\n', ' space_left = 1\n', ' else:\n', ' space_left = width - cur_len\n', '\n', " # If we're allowed to break long words, then do so: put as much\n", ' # of the next chunk onto the current line as will fit.\n', ' if self.break_long_words:\n', ' cur_line.append(reversed_chunks[-1][:space_left])\n', ' reversed_chunks[-1] = reversed_chunks[-1][space_left:]\n', '\n', ' # Otherwise, we have to preserve the long word intact. Only add\n', " # it to the current line if there's nothing already there --\n", ' # that minimizes how much we violate the width constraint.\n', ' elif not cur_line:\n', ' cur_line.append(reversed_chunks.pop())\n', '\n', " # If we're not allowed to break long words, and there's already\n", ' # text on the current line, do nothing. Next time through the\n', " # main loop of _wrap_chunks(), we'll wind up here again, but\n", ' # cur_len will be zero, so the next line will be entirely\n', " # devoted to the long word that we can't handle right now.\n", '\n', ' def _wrap_chunks(self, chunks):\n', ' """_wrap_chunks(chunks : [string]) -> [string]\n', '\n', ' Wrap a sequence of text chunks and return a list of lines of\n', " length 'self.width' or less. (If 'break_long_words' is false,\n", ' some lines may be longer than this.) Chunks correspond roughly\n', ' to words and the whitespace between them: each chunk is\n', " indivisible (modulo 'break_long_words'), but a line break can\n", ' come between any two chunks. Chunks should not have internal\n', ' whitespace; ie. a chunk is either all whitespace or a "word".\n', ' Whitespace chunks will be removed from the beginning and end of\n', ' lines, but apart from that whitespace is preserved.\n', ' """\n', ' lines = []\n', ' if self.width <= 0:\n', ' raise ValueError("invalid width %r (must be > 0)" % self.width)\n', ' if self.max_lines is not None:\n', ' if self.max_lines > 1:\n', ' indent = self.subsequent_indent\n', ' else:\n', ' indent = self.initial_indent\n', ' if len(indent) + len(self.placeholder.lstrip()) > self.width:\n', ' raise ValueError("placeholder too large for max width")\n', '\n', ' # Arrange in reverse order so items can be efficiently popped\n', ' # from a stack of chucks.\n', ' chunks.reverse()\n', '\n', ' while chunks:\n', '\n', ' # Start the list of chunks that will make up the current line.\n', ' # cur_len is just the length of all the chunks in cur_line.\n', ' cur_line = []\n', ' cur_len = 0\n', '\n', ' # Figure out which static string will prefix this line.\n', ' if lines:\n', ' indent = self.subsequent_indent\n', ' else:\n', ' indent = self.initial_indent\n', '\n', ' # Maximum width for this line.\n', ' width = self.width - len(indent)\n', '\n', ' # First chunk on line is whitespace -- drop it, unless this\n', ' # is the very beginning of the text (ie. no lines started yet).\n', " if self.drop_whitespace and chunks[-1].strip() == '' and lines:\n", ' del chunks[-1]\n', '\n', ' while chunks:\n', ' l = len(chunks[-1])\n', '\n', ' # Can at least squeeze this chunk onto the current line.\n', ' if cur_len + l <= width:\n', ' cur_line.append(chunks.pop())\n', ' cur_len += l\n', '\n', ' # Nope, this line is full.\n', ' else:\n', ' break\n', '\n', ' # The current line is full, and the next chunk is too big to\n', ' # fit on *any* line (not just this one).\n', ' if chunks and len(chunks[-1]) > width:\n', ' self._handle_long_word(chunks, cur_line, cur_len, width)\n', ' cur_len = sum(map(len, cur_line))\n', '\n', ' # If the last chunk on this line is all whitespace, drop it.\n', " if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':\n", ' cur_len -= len(cur_line[-1])\n', ' del cur_line[-1]\n', '\n', ' if cur_line:\n', ' if (self.max_lines is None or\n', ' len(lines) + 1 < self.max_lines or\n', ' (not chunks or\n', ' self.drop_whitespace and\n', ' len(chunks) == 1 and\n', ' not chunks[0].strip()) and cur_len <= width):\n', ' # Convert current line back to a string and store it in\n', ' # list of all lines (return value).\n', " lines.append(indent + ''.join(cur_line))\n", ' else:\n', ' while cur_line:\n', ' if (cur_line[-1].strip() and\n', ' cur_len + len(self.placeholder) <= width):\n', ' cur_line.append(self.placeholder)\n', " lines.append(indent + ''.join(cur_line))\n", ' break\n', ' cur_len -= len(cur_line[-1])\n', ' del cur_line[-1]\n', ' else:\n', ' if lines:\n', ' prev_line = lines[-1].rstrip()\n', ' if (len(prev_line) + len(self.placeholder) <=\n', ' self.width):\n', ' lines[-1] = prev_line + self.placeholder\n', ' break\n', ' lines.append(indent + self.placeholder.lstrip())\n', ' break\n', '\n', ' return lines\n', '\n', ' def _split_chunks(self, text):\n', ' text = self._munge_whitespace(text)\n', ' return self._split(text)\n', '\n', ' # -- Public interface ----------------------------------------------\n', '\n', ' def wrap(self, text):\n', ' """wrap(text : string) -> [string]\n', '\n', " Reformat the single paragraph in 'text' so it fits in lines of\n", " no more than 'self.width' columns, and return a list of wrapped\n", " lines. Tabs in 'text' are expanded with string.expandtabs(),\n", ' and all other whitespace characters (including newline) are\n', ' converted to space.\n', ' """\n', ' chunks = self._split_chunks(text)\n', ' if self.fix_sentence_endings:\n', ' self._fix_sentence_endings(chunks)\n', ' return self._wrap_chunks(chunks)\n', '\n', ' def fill(self, text):\n', ' """fill(text : string) -> string\n', '\n', " Reformat the single paragraph in 'text' to fit in lines of no\n", " more than 'self.width' columns, and return a new string\n", ' containing the entire wrapped paragraph.\n', ' """\n', ' return "\\n".join(self.wrap(text))\n', '\n', '\n', '# -- Convenience interface ---------------------------------------------\n', '\n', 'def wrap(text, width=70, **kwargs):\n', ' """Wrap a single paragraph of text, returning a list of wrapped lines.\n', '\n', " Reformat the single paragraph in 'text' so it fits in lines of no\n", " more than 'width' columns, and return a list of wrapped lines. By\n", " default, tabs in 'text' are expanded with string.expandtabs(), and\n", ' all other whitespace characters (including newline) are converted to\n', ' space. See TextWrapper class for available keyword args to customize\n', ' wrapping behaviour.\n', ' """\n', ' w = TextWrapper(width=width, **kwargs)\n', ' return w.wrap(text)\n', '\n', 'def fill(text, width=70, **kwargs):\n', ' """Fill a single paragraph of text, returning a new string.\n', '\n', " Reformat the single paragraph in 'text' to fit in lines of no more\n", " than 'width' columns, and return a new string containing the entire\n", ' wrapped paragraph. As with wrap(), tabs are expanded and other\n', ' whitespace characters converted to space. See TextWrapper class for\n', ' available keyword args to customize wrapping behaviour.\n', ' """\n', ' w = TextWrapper(width=width, **kwargs)\n', ' return w.fill(text)\n', '\n', 'def shorten(text, width, **kwargs):\n', ' """Collapse and truncate the given text to fit in the given width.\n', '\n', ' The text first has its whitespace collapsed. If it then fits in\n', ' the *width*, it is returned as is. Otherwise, as many words\n', ' as possible are joined and then the placeholder is appended::\n', '\n', ' >>> textwrap.shorten("Hello world!", width=12)\n', " 'Hello world!'\n", ' >>> textwrap.shorten("Hello world!", width=11)\n', " 'Hello [...]'\n", ' """\n', ' w = TextWrapper(width=width, max_lines=1, **kwargs)\n', " return w.fill(' '.join(text.strip().split()))\n", '\n', '\n', '# -- Loosely related functionality -------------------------------------\n', '\n', "_whitespace_only_re = re.compile('^[ \\t]+