Psyduck - 可達鴨 之 鴨力山大2


Server : LiteSpeed
System : Linux premium217.web-hosting.com 4.18.0-553.54.1.lve.el8.x86_64 #1 SMP Wed Jun 4 13:01:13 UTC 2025 x86_64
User : alloknri ( 880)
PHP Version : 8.1.34
Disable Function : NONE
Directory :  /opt/alt/python312/lib64/python3.12/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Current File : //opt/alt/python312/lib64/python3.12/tokenize.py
"""Tokenization help for Python programs.

tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens.  It decodes the bytes according to PEP-0263 for
determining source file encoding.

It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF).  It generates 5-tuples with these
members:

    the token type (see token.py)
    the token (a string)
    the starting (row, column) indices of the token (a 2-tuple of ints)
    the ending (row, column) indices of the token (a 2-tuple of ints)
    the original line (string)

It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators.  Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""

__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
               'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
               'Michael Foord')
from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
import functools
from io import TextIOWrapper
import itertools as _itertools
import re
import sys
from token import *
from token import EXACT_TOKEN_TYPES
import _tokenize

cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)

import token
__all__ = token.__all__ + ["tokenize", "generate_tokens", "detect_encoding",
                           "untokenize", "TokenInfo"]
del token

class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
    def __repr__(self):
        annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
        return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
                self._replace(type=annotated_type))

    @property
    def exact_type(self):
        if self.type == OP and self.string in EXACT_TOKEN_TYPES:
            return EXACT_TOKEN_TYPES[self.string]
        else:
            return self.type

def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'

# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'

Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
                   r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)

# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
    # The valid string prefixes. Only contain the lower case versions,
    #  and don't contain any permutations (include 'fr', but not
    #  'rf'). The various permutations will be generated.
    _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
    # if we add binary f-strings, add: ['fb', 'fbr']
    result = {''}
    for prefix in _valid_string_prefixes:
        for t in _itertools.permutations(prefix):
            # create a list with upper and lower versions of each
            #  character
            for u in _itertools.product(*[(c, c.upper()) for c in t]):
                result.add(''.join(u))
    return result

@functools.lru_cache
def _compile(expr):
    return re.compile(expr, re.UNICODE)

# Note that since _all_string_prefixes includes the empty string,
#  StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())

# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
               StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')

# Sorting in reverse order puts the long operators before their prefixes.
# Otherwise if = came before ==, == would get recognized as two instances
# of =.
Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True)))
Funny = group(r'\r?\n', Special)

PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken

# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
                group("'", r'\\\r?\n'),
                StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
                group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)

# For a given string prefix plus quotes, endpats maps it to a regex
#  to match the remainder of that string. _prefix can be empty, for
#  a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
    endpats[_prefix + "'"] = Single
    endpats[_prefix + '"'] = Double
    endpats[_prefix + "'''"] = Single3
    endpats[_prefix + '"""'] = Double3
del _prefix

# A set of all of the single and triple quoted string prefixes,
#  including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
    for u in (t + '"', t + "'"):
        single_quoted.add(u)
    for u in (t + '"""', t + "'''"):
        triple_quoted.add(u)
del t, u

tabsize = 8

class TokenError(Exception): pass


class StopTokenizing(Exception): pass

class Untokenizer:

    def __init__(self):
        self.tokens = []
        self.prev_row = 1
        self.prev_col = 0
        self.prev_type = None
        self.prev_line = ""
        self.encoding = None

    def add_whitespace(self, start):
        row, col = start
        if row < self.prev_row or row == self.prev_row and col < self.prev_col:
            raise ValueError("start ({},{}) precedes previous end ({},{})"
                             .format(row, col, self.prev_row, self.prev_col))
        self.add_backslash_continuation(start)
        col_offset = col - self.prev_col
        if col_offset:
            self.tokens.append(" " * col_offset)

    def add_backslash_continuation(self, start):
        """Add backslash continuation characters if the row has increased
        without encountering a newline token.

        This also inserts the correct amount of whitespace before the backslash.
        """
        row = start[0]
        row_offset = row - self.prev_row
        if row_offset == 0:
            return

        newline = '\r\n' if self.prev_line.endswith('\r\n') else '\n'
        line = self.prev_line.rstrip('\\\r\n')
        ws = ''.join(_itertools.takewhile(str.isspace, reversed(line)))
        self.tokens.append(ws + f"\\{newline}" * row_offset)
        self.prev_col = 0

    def escape_brackets(self, token):
        characters = []
        consume_until_next_bracket = False
        for character in token:
            if character == "}":
                if consume_until_next_bracket:
                    consume_until_next_bracket = False
                else:
                    characters.append(character)
            if character == "{":
                n_backslashes = sum(
                    1 for char in _itertools.takewhile(
                        "\\".__eq__,
                        characters[-2::-1]
                    )
                )
                if n_backslashes % 2 == 0 or characters[-1] != "N":
                    characters.append(character)
                else:
                    consume_until_next_bracket = True
            characters.append(character)
        return "".join(characters)

    def untokenize(self, iterable):
        it = iter(iterable)
        indents = []
        startline = False
        for t in it:
            if len(t) == 2:
                self.compat(t, it)
                break
            tok_type, token, start, end, line = t
            if tok_type == ENCODING:
                self.encoding = token
                continue
            if tok_type == ENDMARKER:
                break
            if tok_type == INDENT:
                indents.append(token)
                continue
            elif tok_type == DEDENT:
                indents.pop()
                self.prev_row, self.prev_col = end
                continue
            elif tok_type in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                indent = indents[-1]
                if start[1] >= len(indent):
                    self.tokens.append(indent)
                    self.prev_col = len(indent)
                startline = False
            elif tok_type == FSTRING_MIDDLE:
                if '{' in token or '}' in token:
                    token = self.escape_brackets(token)
                    last_line = token.splitlines()[-1]
                    end_line, end_col = end
                    extra_chars = last_line.count("{{") + last_line.count("}}")
                    end = (end_line, end_col + extra_chars)

            self.add_whitespace(start)
            self.tokens.append(token)
            self.prev_row, self.prev_col = end
            if tok_type in (NEWLINE, NL):
                self.prev_row += 1
                self.prev_col = 0
            self.prev_type = tok_type
            self.prev_line = line
        return "".join(self.tokens)

    def compat(self, token, iterable):
        indents = []
        toks_append = self.tokens.append
        startline = token[0] in (NEWLINE, NL)
        prevstring = False
        in_fstring = 0

        for tok in _itertools.chain([token], iterable):
            toknum, tokval = tok[:2]
            if toknum == ENCODING:
                self.encoding = tokval
                continue

            if toknum in (NAME, NUMBER):
                tokval += ' '

            # Insert a space between two consecutive strings
            if toknum == STRING:
                if prevstring:
                    tokval = ' ' + tokval
                prevstring = True
            else:
                prevstring = False

            if toknum == FSTRING_START:
                in_fstring += 1
            elif toknum == FSTRING_END:
                in_fstring -= 1
            if toknum == INDENT:
                indents.append(tokval)
                continue
            elif toknum == DEDENT:
                indents.pop()
                continue
            elif toknum in (NEWLINE, NL):
                startline = True
            elif startline and indents:
                toks_append(indents[-1])
                startline = False
            elif toknum == FSTRING_MIDDLE:
                tokval = self.escape_brackets(tokval)

            # Insert a space between two consecutive brackets if we are in an f-string
            if tokval in {"{", "}"} and self.tokens and self.tokens[-1] == tokval and in_fstring:
                tokval = ' ' + tokval

            # Insert a space between two consecutive f-strings
            if toknum in (STRING, FSTRING_START) and self.prev_type in (STRING, FSTRING_END):
                self.tokens.append(" ")

            toks_append(tokval)
            self.prev_type = toknum


def untokenize(iterable):
    """Transform tokens back into Python source code.
    It returns a bytes object, encoded using the ENCODING
    token, which is the first token sequence output by tokenize.

    Each element returned by the iterable must be a token sequence
    with at least two elements, a token number and token value.  If
    only two tokens are passed, the resulting output is poor.

    The result is guaranteed to tokenize back to match the input so
    that the conversion is lossless and round-trips are assured.
    The guarantee applies only to the token type and token string as
    the spacing between tokens (column positions) may change.
    """
    ut = Untokenizer()
    out = ut.untokenize(iterable)
    if ut.encoding is not None:
        out = out.encode(ut.encoding)
    return out


def _get_normal_name(orig_enc):
    """Imitates get_normal_name in tokenizer.c."""
    # Only care about the first 12 characters.
    enc = orig_enc[:12].lower().replace("_", "-")
    if enc == "utf-8" or enc.startswith("utf-8-"):
        return "utf-8"
    if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
       enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
        return "iso-8859-1"
    return orig_enc

def detect_encoding(readline):
    """
    The detect_encoding() function is used to detect the encoding that should
    be used to decode a Python source file.  It requires one argument, readline,
    in the same way as the tokenize() generator.

    It will call readline a maximum of twice, and return the encoding used
    (as a string) and a list of any lines (left as bytes) it has read in.

    It detects the encoding from the presence of a utf-8 bom or an encoding
    cookie as specified in pep-0263.  If both a bom and a cookie are present,
    but disagree, a SyntaxError will be raised.  If the encoding cookie is an
    invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
    'utf-8-sig' is returned.

    If no encoding is specified, then the default of 'utf-8' will be returned.
    """
    try:
        filename = readline.__self__.name
    except AttributeError:
        filename = None
    bom_found = False
    encoding = None
    default = 'utf-8'
    def read_or_stop():
        try:
            return readline()
        except StopIteration:
            return b''

    def find_cookie(line):
        try:
            # Decode as UTF-8. Either the line is an encoding declaration,
            # in which case it should be pure ASCII, or it must be UTF-8
            # per default encoding.
            line_string = line.decode('utf-8')
        except UnicodeDecodeError:
            msg = "invalid or missing encoding declaration"
            if filename is not None:
                msg = '{} for {!r}'.format(msg, filename)
            raise SyntaxError(msg)

        match = cookie_re.match(line_string)
        if not match:
            return None
        encoding = _get_normal_name(match.group(1))
        try:
            codec = lookup(encoding)
        except LookupError:
            # This behaviour mimics the Python interpreter
            if filename is None:
                msg = "unknown encoding: " + encoding
            else:
                msg = "unknown encoding for {!r}: {}".format(filename,
                        encoding)
            raise SyntaxError(msg)

        if bom_found:
            if encoding != 'utf-8':
                # This behaviour mimics the Python interpreter
                if filename is None:
                    msg = 'encoding problem: utf-8'
                else:
                    msg = 'encoding problem for {!r}: utf-8'.format(filename)
                raise SyntaxError(msg)
            encoding += '-sig'
        return encoding

    first = read_or_stop()
    if first.startswith(BOM_UTF8):
        bom_found = True
        first = first[3:]
        default = 'utf-8-sig'
    if not first:
        return default, []

    encoding = find_cookie(first)
    if encoding:
        return encoding, [first]
    if not blank_re.match(first):
        return default, [first]

    second = read_or_stop()
    if not second:
        return default, [first]

    encoding = find_cookie(second)
    if encoding:
        return encoding, [first, second]

    return default, [first, second]


def open(filename):
    """Open a file in read only mode using the encoding detected by
    detect_encoding().
    """
    buffer = _builtin_open(filename, 'rb')
    try:
        encoding, lines = detect_encoding(buffer.readline)
        buffer.seek(0)
        text = TextIOWrapper(buffer, encoding, line_buffering=True)
        text.mode = 'r'
        return text
    except:
        buffer.close()
        raise

def tokenize(readline):
    """
    The tokenize() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects.  Each call to the function
    should return one line of input as bytes.  Alternatively, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile, 'rb').__next__  # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found.  The line passed is the
    physical line.

    The first token sequence will always be an ENCODING token
    which tells you which encoding was used to decode the bytes stream.
    """
    encoding, consumed = detect_encoding(readline)
    rl_gen = _itertools.chain(consumed, iter(readline, b""))
    if encoding is not None:
        if encoding == "utf-8-sig":
            # BOM will already have been stripped.
            encoding = "utf-8"
        yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
    yield from _generate_tokens_from_c_tokenizer(rl_gen.__next__, encoding, extra_tokens=True)

def generate_tokens(readline):
    """Tokenize a source reading Python code as unicode strings.

    This has the same API as tokenize(), except that it expects the *readline*
    callable to return str objects instead of bytes.
    """
    return _generate_tokens_from_c_tokenizer(readline, extra_tokens=True)

def main():
    import argparse

    # Helper error handling routines
    def perror(message):
        sys.stderr.write(message)
        sys.stderr.write('\n')

    def error(message, filename=None, location=None):
        if location:
            args = (filename,) + location + (message,)
            perror("%s:%d:%d: error: %s" % args)
        elif filename:
            perror("%s: error: %s" % (filename, message))
        else:
            perror("error: %s" % message)
        sys.exit(1)

    # Parse the arguments and options
    parser = argparse.ArgumentParser(prog='python -m tokenize')
    parser.add_argument(dest='filename', nargs='?',
                        metavar='filename.py',
                        help='the file to tokenize; defaults to stdin')
    parser.add_argument('-e', '--exact', dest='exact', action='store_true',
                        help='display token names using the exact type')
    args = parser.parse_args()

    try:
        # Tokenize the input
        if args.filename:
            filename = args.filename
            with _builtin_open(filename, 'rb') as f:
                tokens = list(tokenize(f.readline))
        else:
            filename = "<stdin>"
            tokens = _generate_tokens_from_c_tokenizer(
                sys.stdin.readline, extra_tokens=True)


        # Output the tokenization
        for token in tokens:
            token_type = token.type
            if args.exact:
                token_type = token.exact_type
            token_range = "%d,%d-%d,%d:" % (token.start + token.end)
            print("%-20s%-15s%-15r" %
                  (token_range, tok_name[token_type], token.string))
    except IndentationError as err:
        line, column = err.args[1][1:3]
        error(err.args[0], filename, (line, column))
    except TokenError as err:
        line, column = err.args[1]
        error(err.args[0], filename, (line, column))
    except SyntaxError as err:
        error(err, filename)
    except OSError as err:
        error(err)
    except KeyboardInterrupt:
        print("interrupted\n")
    except Exception as err:
        perror("unexpected error: %s" % err)
        raise

def _transform_msg(msg):
    """Transform error messages from the C tokenizer into the Python tokenize

    The C tokenizer is more picky than the Python one, so we need to massage
    the error messages a bit for backwards compatibility.
    """
    if "unterminated triple-quoted string literal" in msg:
        return "EOF in multi-line string"
    return msg

def _generate_tokens_from_c_tokenizer(source, encoding=None, extra_tokens=False):
    """Tokenize a source reading Python code as unicode strings using the internal C tokenizer"""
    if encoding is None:
        it = _tokenize.TokenizerIter(source, extra_tokens=extra_tokens)
    else:
        it = _tokenize.TokenizerIter(source, encoding=encoding, extra_tokens=extra_tokens)
    try:
        for info in it:
            yield TokenInfo._make(info)
    except SyntaxError as e:
        if type(e) != SyntaxError:
            raise e from None
        msg = _transform_msg(e.msg)
        raise TokenError(msg, (e.lineno, e.offset)) from None


if __name__ == "__main__":
    main()
Name
Size
Permissions
Options
__pycache__
--
drwxr-xr-x
asyncio
--
drwxr-xr-x
collections
--
drwxr-xr-x
concurrent
--
drwxr-xr-x
config-3.12-x86_64-linux-gnu
--
drwxr-xr-x
ctypes
--
drwxr-xr-x
curses
--
drwxr-xr-x
dbm
--
drwxr-xr-x
email
--
drwxr-xr-x
encodings
--
drwxr-xr-x
ensurepip
--
drwxr-xr-x
html
--
drwxr-xr-x
http
--
drwxr-xr-x
importlib
--
drwxr-xr-x
json
--
drwxr-xr-x
lib-dynload
--
drwxr-xr-x
lib2to3
--
drwxr-xr-x
logging
--
drwxr-xr-x
multiprocessing
--
drwxr-xr-x
pydoc_data
--
drwxr-xr-x
re
--
drwxr-xr-x
site-packages
--
drwxr-xr-x
sqlite3
--
drwxr-xr-x
tomllib
--
drwxr-xr-x
unittest
--
drwxr-xr-x
urllib
--
drwxr-xr-x
venv
--
drwxr-xr-x
wsgiref
--
drwxr-xr-x
xml
--
drwxr-xr-x
xmlrpc
--
drwxr-xr-x
zipfile
--
drwxr-xr-x
zoneinfo
--
drwxr-xr-x
LICENSE.txt
13.609 KB
-rw-r--r--
__future__.py
5.096 KB
-rw-r--r--
__hello__.py
0.222 KB
-rw-r--r--
_aix_support.py
3.927 KB
-rw-r--r--
_collections_abc.py
31.337 KB
-rw-r--r--
_compat_pickle.py
8.556 KB
-rw-r--r--
_compression.py
5.548 KB
-rw-r--r--
_markupbase.py
14.31 KB
-rw-r--r--
_osx_support.py
21.507 KB
-rw-r--r--
_py_abc.py
6.044 KB
-rw-r--r--
_pydatetime.py
89.929 KB
-rw-r--r--
_pydecimal.py
221.956 KB
-rw-r--r--
_pyio.py
91.399 KB
-rw-r--r--
_pylong.py
10.537 KB
-rw-r--r--
_sitebuiltins.py
3.055 KB
-rw-r--r--
_strptime.py
27.728 KB
-rw-r--r--
_sysconfigdata__linux_x86_64-linux-gnu.py
74.759 KB
-rw-r--r--
_sysconfigdata_d_linux_x86_64-linux-gnu.py
74.755 KB
-rw-r--r--
_threading_local.py
7.051 KB
-rw-r--r--
_weakrefset.py
5.755 KB
-rw-r--r--
abc.py
6.385 KB
-rw-r--r--
aifc.py
33.409 KB
-rw-r--r--
antigravity.py
0.488 KB
-rw-r--r--
argparse.py
98.784 KB
-rw-r--r--
ast.py
62.941 KB
-rw-r--r--
base64.py
20.164 KB
-rwxr-xr-x
bdb.py
32.786 KB
-rw-r--r--
bisect.py
3.343 KB
-rw-r--r--
bz2.py
11.569 KB
-rw-r--r--
cProfile.py
6.415 KB
-rwxr-xr-x
calendar.py
25.258 KB
-rw-r--r--
cgi.py
33.625 KB
-rwxr-xr-x
cgitb.py
12.13 KB
-rw-r--r--
chunk.py
5.371 KB
-rw-r--r--
cmd.py
14.524 KB
-rw-r--r--
code.py
10.705 KB
-rw-r--r--
codecs.py
36.006 KB
-rw-r--r--
codeop.py
5.77 KB
-rw-r--r--
colorsys.py
3.967 KB
-rw-r--r--
compileall.py
20.026 KB
-rw-r--r--
configparser.py
52.528 KB
-rw-r--r--
contextlib.py
26.989 KB
-rw-r--r--
contextvars.py
0.126 KB
-rw-r--r--
copy.py
8.215 KB
-rw-r--r--
copyreg.py
7.436 KB
-rw-r--r--
crypt.py
3.821 KB
-rw-r--r--
csv.py
16.002 KB
-rw-r--r--
dataclasses.py
60.63 KB
-rw-r--r--
datetime.py
0.262 KB
-rw-r--r--
decimal.py
2.739 KB
-rw-r--r--
difflib.py
81.414 KB
-rw-r--r--
dis.py
29.519 KB
-rw-r--r--
doctest.py
104.247 KB
-rw-r--r--
enum.py
79.629 KB
-rw-r--r--
filecmp.py
10.138 KB
-rw-r--r--
fileinput.py
15.346 KB
-rw-r--r--
fnmatch.py
5.858 KB
-rw-r--r--
fractions.py
37.253 KB
-rw-r--r--
ftplib.py
33.921 KB
-rw-r--r--
functools.py
37.051 KB
-rw-r--r--
genericpath.py
5.441 KB
-rw-r--r--
getopt.py
7.313 KB
-rw-r--r--
getpass.py
5.85 KB
-rw-r--r--
gettext.py
20.82 KB
-rw-r--r--
glob.py
8.527 KB
-rw-r--r--
graphlib.py
9.422 KB
-rw-r--r--
gzip.py
24.807 KB
-rw-r--r--
hashlib.py
9.13 KB
-rw-r--r--
heapq.py
22.484 KB
-rw-r--r--
hmac.py
7.535 KB
-rw-r--r--
imaplib.py
52.773 KB
-rw-r--r--
imghdr.py
4.295 KB
-rw-r--r--
inspect.py
124.146 KB
-rw-r--r--
io.py
3.498 KB
-rw-r--r--
ipaddress.py
79.506 KB
-rw-r--r--
keyword.py
1.048 KB
-rw-r--r--
linecache.py
5.664 KB
-rw-r--r--
locale.py
76.757 KB
-rw-r--r--
lzma.py
12.966 KB
-rw-r--r--
mailbox.py
77.062 KB
-rw-r--r--
mailcap.py
9.114 KB
-rw-r--r--
mimetypes.py
22.497 KB
-rw-r--r--
modulefinder.py
23.144 KB
-rw-r--r--
netrc.py
6.76 KB
-rw-r--r--
nntplib.py
40.124 KB
-rw-r--r--
ntpath.py
31.566 KB
-rw-r--r--
nturl2path.py
2.318 KB
-rw-r--r--
numbers.py
11.198 KB
-rw-r--r--
opcode.py
12.865 KB
-rw-r--r--
operator.py
10.708 KB
-rw-r--r--
optparse.py
58.954 KB
-rw-r--r--
os.py
39.864 KB
-rw-r--r--
pathlib.py
49.855 KB
-rw-r--r--
pdb.py
68.663 KB
-rwxr-xr-x
pickle.py
65.343 KB
-rw-r--r--
pickletools.py
91.848 KB
-rw-r--r--
pipes.py
8.768 KB
-rw-r--r--
pkgutil.py
17.853 KB
-rw-r--r--
platform.py
42.385 KB
-rwxr-xr-x
plistlib.py
27.678 KB
-rw-r--r--
poplib.py
14.276 KB
-rw-r--r--
posixpath.py
17.073 KB
-rw-r--r--
pprint.py
23.592 KB
-rw-r--r--
profile.py
22.564 KB
-rwxr-xr-x
pstats.py
28.603 KB
-rw-r--r--
pty.py
5.993 KB
-rw-r--r--
py_compile.py
7.653 KB
-rw-r--r--
pyclbr.py
11.129 KB
-rw-r--r--
pydoc.py
110.861 KB
-rwxr-xr-x
queue.py
11.227 KB
-rw-r--r--
quopri.py
7.028 KB
-rwxr-xr-x
random.py
33.876 KB
-rw-r--r--
reprlib.py
6.98 KB
-rw-r--r--
rlcompleter.py
7.644 KB
-rw-r--r--
runpy.py
12.583 KB
-rw-r--r--
sched.py
6.202 KB
-rw-r--r--
secrets.py
1.938 KB
-rw-r--r--
selectors.py
19.21 KB
-rw-r--r--
shelve.py
8.359 KB
-rw-r--r--
shlex.py
13.04 KB
-rw-r--r--
shutil.py
55.432 KB
-rw-r--r--
signal.py
2.437 KB
-rw-r--r--
site.py
22.654 KB
-rw-r--r--
smtplib.py
42.524 KB
-rwxr-xr-x
sndhdr.py
7.273 KB
-rw-r--r--
socket.py
36.929 KB
-rw-r--r--
socketserver.py
27.407 KB
-rw-r--r--
sre_compile.py
0.226 KB
-rw-r--r--
sre_constants.py
0.227 KB
-rw-r--r--
sre_parse.py
0.224 KB
-rw-r--r--
ssl.py
49.711 KB
-rw-r--r--
stat.py
5.356 KB
-rw-r--r--
statistics.py
49.05 KB
-rw-r--r--
string.py
11.51 KB
-rw-r--r--
stringprep.py
12.614 KB
-rw-r--r--
struct.py
0.251 KB
-rw-r--r--
subprocess.py
86.667 KB
-rw-r--r--
sunau.py
18.045 KB
-rw-r--r--
symtable.py
12.185 KB
-rw-r--r--
sysconfig.py
31.104 KB
-rw-r--r--
tabnanny.py
11.274 KB
-rwxr-xr-x
tarfile.py
109.822 KB
-rwxr-xr-x
telnetlib.py
22.787 KB
-rw-r--r--
tempfile.py
31.627 KB
-rw-r--r--
textwrap.py
19.256 KB
-rw-r--r--
this.py
0.979 KB
-rw-r--r--
threading.py
58.789 KB
-rw-r--r--
timeit.py
13.161 KB
-rwxr-xr-x
token.py
2.452 KB
-rw-r--r--
tokenize.py
21.064 KB
-rw-r--r--
trace.py
28.678 KB
-rwxr-xr-x
traceback.py
45.306 KB
-rw-r--r--
tracemalloc.py
17.624 KB
-rw-r--r--
tty.py
1.987 KB
-rw-r--r--
types.py
10.735 KB
-rw-r--r--
typing.py
116.051 KB
-rw-r--r--
uu.py
7.169 KB
-rw-r--r--
uuid.py
28.961 KB
-rw-r--r--
warnings.py
21.396 KB
-rw-r--r--
wave.py
22.235 KB
-rw-r--r--
weakref.py
21.009 KB
-rw-r--r--
webbrowser.py
23.189 KB
-rwxr-xr-x
xdrlib.py
5.803 KB
-rw-r--r--
zipapp.py
7.366 KB
-rw-r--r--
zipimport.py
27.188 KB
-rw-r--r--